1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8 #ifndef _UAPI__LINUX_BPF_H__ 9 #define _UAPI__LINUX_BPF_H__ 10 11 #include <linux/types.h> 12 #include <linux/bpf_common.h> 13 14 /* Extended instruction set based on top of classic BPF */ 15 16 /* instruction classes */ 17 #define BPF_JMP32 0x06 /* jmp mode in word width */ 18 #define BPF_ALU64 0x07 /* alu mode in double word width */ 19 20 /* ld/ldx fields */ 21 #define BPF_DW 0x18 /* double word (64-bit) */ 22 #define BPF_XADD 0xc0 /* exclusive add */ 23 24 /* alu/jmp fields */ 25 #define BPF_MOV 0xb0 /* mov reg to reg */ 26 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 27 28 /* change endianness of a register */ 29 #define BPF_END 0xd0 /* flags for endianness conversion: */ 30 #define BPF_TO_LE 0x00 /* convert to little-endian */ 31 #define BPF_TO_BE 0x08 /* convert to big-endian */ 32 #define BPF_FROM_LE BPF_TO_LE 33 #define BPF_FROM_BE BPF_TO_BE 34 35 /* jmp encodings */ 36 #define BPF_JNE 0x50 /* jump != */ 37 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 38 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 39 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 40 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 41 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 42 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 43 #define BPF_CALL 0x80 /* function call */ 44 #define BPF_EXIT 0x90 /* function return */ 45 46 /* Register numbers */ 47 enum { 48 BPF_REG_0 = 0, 49 BPF_REG_1, 50 BPF_REG_2, 51 BPF_REG_3, 52 BPF_REG_4, 53 BPF_REG_5, 54 BPF_REG_6, 55 BPF_REG_7, 56 BPF_REG_8, 57 BPF_REG_9, 58 BPF_REG_10, 59 __MAX_BPF_REG, 60 }; 61 62 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 63 #define MAX_BPF_REG __MAX_BPF_REG 64 65 struct bpf_insn { 66 __u8 code; /* opcode */ 67 __u8 dst_reg:4; /* dest register */ 68 __u8 src_reg:4; /* source register */ 69 __s16 off; /* signed offset */ 70 __s32 imm; /* signed immediate constant */ 71 }; 72 73 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 74 struct bpf_lpm_trie_key { 75 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 76 __u8 data[0]; /* Arbitrary size */ 77 }; 78 79 struct bpf_cgroup_storage_key { 80 __u64 cgroup_inode_id; /* cgroup inode id */ 81 __u32 attach_type; /* program attach type */ 82 }; 83 84 union bpf_iter_link_info { 85 struct { 86 __u32 map_fd; 87 } map; 88 }; 89 90 /* BPF syscall commands, see bpf(2) man-page for details. */ 91 enum bpf_cmd { 92 BPF_MAP_CREATE, 93 BPF_MAP_LOOKUP_ELEM, 94 BPF_MAP_UPDATE_ELEM, 95 BPF_MAP_DELETE_ELEM, 96 BPF_MAP_GET_NEXT_KEY, 97 BPF_PROG_LOAD, 98 BPF_OBJ_PIN, 99 BPF_OBJ_GET, 100 BPF_PROG_ATTACH, 101 BPF_PROG_DETACH, 102 BPF_PROG_TEST_RUN, 103 BPF_PROG_GET_NEXT_ID, 104 BPF_MAP_GET_NEXT_ID, 105 BPF_PROG_GET_FD_BY_ID, 106 BPF_MAP_GET_FD_BY_ID, 107 BPF_OBJ_GET_INFO_BY_FD, 108 BPF_PROG_QUERY, 109 BPF_RAW_TRACEPOINT_OPEN, 110 BPF_BTF_LOAD, 111 BPF_BTF_GET_FD_BY_ID, 112 BPF_TASK_FD_QUERY, 113 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 114 BPF_MAP_FREEZE, 115 BPF_BTF_GET_NEXT_ID, 116 BPF_MAP_LOOKUP_BATCH, 117 BPF_MAP_LOOKUP_AND_DELETE_BATCH, 118 BPF_MAP_UPDATE_BATCH, 119 BPF_MAP_DELETE_BATCH, 120 BPF_LINK_CREATE, 121 BPF_LINK_UPDATE, 122 BPF_LINK_GET_FD_BY_ID, 123 BPF_LINK_GET_NEXT_ID, 124 BPF_ENABLE_STATS, 125 BPF_ITER_CREATE, 126 BPF_LINK_DETACH, 127 BPF_PROG_BIND_MAP, 128 }; 129 130 enum bpf_map_type { 131 BPF_MAP_TYPE_UNSPEC, 132 BPF_MAP_TYPE_HASH, 133 BPF_MAP_TYPE_ARRAY, 134 BPF_MAP_TYPE_PROG_ARRAY, 135 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 136 BPF_MAP_TYPE_PERCPU_HASH, 137 BPF_MAP_TYPE_PERCPU_ARRAY, 138 BPF_MAP_TYPE_STACK_TRACE, 139 BPF_MAP_TYPE_CGROUP_ARRAY, 140 BPF_MAP_TYPE_LRU_HASH, 141 BPF_MAP_TYPE_LRU_PERCPU_HASH, 142 BPF_MAP_TYPE_LPM_TRIE, 143 BPF_MAP_TYPE_ARRAY_OF_MAPS, 144 BPF_MAP_TYPE_HASH_OF_MAPS, 145 BPF_MAP_TYPE_DEVMAP, 146 BPF_MAP_TYPE_SOCKMAP, 147 BPF_MAP_TYPE_CPUMAP, 148 BPF_MAP_TYPE_XSKMAP, 149 BPF_MAP_TYPE_SOCKHASH, 150 BPF_MAP_TYPE_CGROUP_STORAGE, 151 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 152 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, 153 BPF_MAP_TYPE_QUEUE, 154 BPF_MAP_TYPE_STACK, 155 BPF_MAP_TYPE_SK_STORAGE, 156 BPF_MAP_TYPE_DEVMAP_HASH, 157 BPF_MAP_TYPE_STRUCT_OPS, 158 BPF_MAP_TYPE_RINGBUF, 159 BPF_MAP_TYPE_INODE_STORAGE, 160 }; 161 162 /* Note that tracing related programs such as 163 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} 164 * are not subject to a stable API since kernel internal data 165 * structures can change from release to release and may 166 * therefore break existing tracing BPF programs. Tracing BPF 167 * programs correspond to /a/ specific kernel which is to be 168 * analyzed, and not /a/ specific kernel /and/ all future ones. 169 */ 170 enum bpf_prog_type { 171 BPF_PROG_TYPE_UNSPEC, 172 BPF_PROG_TYPE_SOCKET_FILTER, 173 BPF_PROG_TYPE_KPROBE, 174 BPF_PROG_TYPE_SCHED_CLS, 175 BPF_PROG_TYPE_SCHED_ACT, 176 BPF_PROG_TYPE_TRACEPOINT, 177 BPF_PROG_TYPE_XDP, 178 BPF_PROG_TYPE_PERF_EVENT, 179 BPF_PROG_TYPE_CGROUP_SKB, 180 BPF_PROG_TYPE_CGROUP_SOCK, 181 BPF_PROG_TYPE_LWT_IN, 182 BPF_PROG_TYPE_LWT_OUT, 183 BPF_PROG_TYPE_LWT_XMIT, 184 BPF_PROG_TYPE_SOCK_OPS, 185 BPF_PROG_TYPE_SK_SKB, 186 BPF_PROG_TYPE_CGROUP_DEVICE, 187 BPF_PROG_TYPE_SK_MSG, 188 BPF_PROG_TYPE_RAW_TRACEPOINT, 189 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 190 BPF_PROG_TYPE_LWT_SEG6LOCAL, 191 BPF_PROG_TYPE_LIRC_MODE2, 192 BPF_PROG_TYPE_SK_REUSEPORT, 193 BPF_PROG_TYPE_FLOW_DISSECTOR, 194 BPF_PROG_TYPE_CGROUP_SYSCTL, 195 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 196 BPF_PROG_TYPE_CGROUP_SOCKOPT, 197 BPF_PROG_TYPE_TRACING, 198 BPF_PROG_TYPE_STRUCT_OPS, 199 BPF_PROG_TYPE_EXT, 200 BPF_PROG_TYPE_LSM, 201 BPF_PROG_TYPE_SK_LOOKUP, 202 }; 203 204 enum bpf_attach_type { 205 BPF_CGROUP_INET_INGRESS, 206 BPF_CGROUP_INET_EGRESS, 207 BPF_CGROUP_INET_SOCK_CREATE, 208 BPF_CGROUP_SOCK_OPS, 209 BPF_SK_SKB_STREAM_PARSER, 210 BPF_SK_SKB_STREAM_VERDICT, 211 BPF_CGROUP_DEVICE, 212 BPF_SK_MSG_VERDICT, 213 BPF_CGROUP_INET4_BIND, 214 BPF_CGROUP_INET6_BIND, 215 BPF_CGROUP_INET4_CONNECT, 216 BPF_CGROUP_INET6_CONNECT, 217 BPF_CGROUP_INET4_POST_BIND, 218 BPF_CGROUP_INET6_POST_BIND, 219 BPF_CGROUP_UDP4_SENDMSG, 220 BPF_CGROUP_UDP6_SENDMSG, 221 BPF_LIRC_MODE2, 222 BPF_FLOW_DISSECTOR, 223 BPF_CGROUP_SYSCTL, 224 BPF_CGROUP_UDP4_RECVMSG, 225 BPF_CGROUP_UDP6_RECVMSG, 226 BPF_CGROUP_GETSOCKOPT, 227 BPF_CGROUP_SETSOCKOPT, 228 BPF_TRACE_RAW_TP, 229 BPF_TRACE_FENTRY, 230 BPF_TRACE_FEXIT, 231 BPF_MODIFY_RETURN, 232 BPF_LSM_MAC, 233 BPF_TRACE_ITER, 234 BPF_CGROUP_INET4_GETPEERNAME, 235 BPF_CGROUP_INET6_GETPEERNAME, 236 BPF_CGROUP_INET4_GETSOCKNAME, 237 BPF_CGROUP_INET6_GETSOCKNAME, 238 BPF_XDP_DEVMAP, 239 BPF_CGROUP_INET_SOCK_RELEASE, 240 BPF_XDP_CPUMAP, 241 BPF_SK_LOOKUP, 242 BPF_XDP, 243 __MAX_BPF_ATTACH_TYPE 244 }; 245 246 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 247 248 enum bpf_link_type { 249 BPF_LINK_TYPE_UNSPEC = 0, 250 BPF_LINK_TYPE_RAW_TRACEPOINT = 1, 251 BPF_LINK_TYPE_TRACING = 2, 252 BPF_LINK_TYPE_CGROUP = 3, 253 BPF_LINK_TYPE_ITER = 4, 254 BPF_LINK_TYPE_NETNS = 5, 255 BPF_LINK_TYPE_XDP = 6, 256 257 MAX_BPF_LINK_TYPE, 258 }; 259 260 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 261 * 262 * NONE(default): No further bpf programs allowed in the subtree. 263 * 264 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 265 * the program in this cgroup yields to sub-cgroup program. 266 * 267 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 268 * that cgroup program gets run in addition to the program in this cgroup. 269 * 270 * Only one program is allowed to be attached to a cgroup with 271 * NONE or BPF_F_ALLOW_OVERRIDE flag. 272 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 273 * release old program and attach the new one. Attach flags has to match. 274 * 275 * Multiple programs are allowed to be attached to a cgroup with 276 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 277 * (those that were attached first, run first) 278 * The programs of sub-cgroup are executed first, then programs of 279 * this cgroup and then programs of parent cgroup. 280 * When children program makes decision (like picking TCP CA or sock bind) 281 * parent program has a chance to override it. 282 * 283 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of 284 * programs for a cgroup. Though it's possible to replace an old program at 285 * any position by also specifying BPF_F_REPLACE flag and position itself in 286 * replace_bpf_fd attribute. Old program at this position will be released. 287 * 288 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 289 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 290 * Ex1: 291 * cgrp1 (MULTI progs A, B) -> 292 * cgrp2 (OVERRIDE prog C) -> 293 * cgrp3 (MULTI prog D) -> 294 * cgrp4 (OVERRIDE prog E) -> 295 * cgrp5 (NONE prog F) 296 * the event in cgrp5 triggers execution of F,D,A,B in that order. 297 * if prog F is detached, the execution is E,D,A,B 298 * if prog F and D are detached, the execution is E,A,B 299 * if prog F, E and D are detached, the execution is C,A,B 300 * 301 * All eligible programs are executed regardless of return code from 302 * earlier programs. 303 */ 304 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 305 #define BPF_F_ALLOW_MULTI (1U << 1) 306 #define BPF_F_REPLACE (1U << 2) 307 308 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 309 * verifier will perform strict alignment checking as if the kernel 310 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 311 * and NET_IP_ALIGN defined to 2. 312 */ 313 #define BPF_F_STRICT_ALIGNMENT (1U << 0) 314 315 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the 316 * verifier will allow any alignment whatsoever. On platforms 317 * with strict alignment requirements for loads ands stores (such 318 * as sparc and mips) the verifier validates that all loads and 319 * stores provably follow this requirement. This flag turns that 320 * checking and enforcement off. 321 * 322 * It is mostly used for testing when we want to validate the 323 * context and memory access aspects of the verifier, but because 324 * of an unaligned access the alignment check would trigger before 325 * the one we are interested in. 326 */ 327 #define BPF_F_ANY_ALIGNMENT (1U << 1) 328 329 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. 330 * Verifier does sub-register def/use analysis and identifies instructions whose 331 * def only matters for low 32-bit, high 32-bit is never referenced later 332 * through implicit zero extension. Therefore verifier notifies JIT back-ends 333 * that it is safe to ignore clearing high 32-bit for these instructions. This 334 * saves some back-ends a lot of code-gen. However such optimization is not 335 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends 336 * hence hasn't used verifier's analysis result. But, we really want to have a 337 * way to be able to verify the correctness of the described optimization on 338 * x86_64 on which testsuites are frequently exercised. 339 * 340 * So, this flag is introduced. Once it is set, verifier will randomize high 341 * 32-bit for those instructions who has been identified as safe to ignore them. 342 * Then, if verifier is not doing correct analysis, such randomization will 343 * regress tests to expose bugs. 344 */ 345 #define BPF_F_TEST_RND_HI32 (1U << 2) 346 347 /* The verifier internal test flag. Behavior is undefined */ 348 #define BPF_F_TEST_STATE_FREQ (1U << 3) 349 350 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will 351 * restrict map and helper usage for such programs. Sleepable BPF programs can 352 * only be attached to hooks where kernel execution context allows sleeping. 353 * Such programs are allowed to use helpers that may sleep like 354 * bpf_copy_from_user(). 355 */ 356 #define BPF_F_SLEEPABLE (1U << 4) 357 358 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have 359 * two extensions: 360 * 361 * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE 362 * insn[0].imm: map fd map fd 363 * insn[1].imm: 0 offset into value 364 * insn[0].off: 0 0 365 * insn[1].off: 0 0 366 * ldimm64 rewrite: address of map address of map[0]+offset 367 * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE 368 */ 369 #define BPF_PSEUDO_MAP_FD 1 370 #define BPF_PSEUDO_MAP_VALUE 2 371 372 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 373 * offset to another bpf function 374 */ 375 #define BPF_PSEUDO_CALL 1 376 377 /* flags for BPF_MAP_UPDATE_ELEM command */ 378 enum { 379 BPF_ANY = 0, /* create new element or update existing */ 380 BPF_NOEXIST = 1, /* create new element if it didn't exist */ 381 BPF_EXIST = 2, /* update existing element */ 382 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ 383 }; 384 385 /* flags for BPF_MAP_CREATE command */ 386 enum { 387 BPF_F_NO_PREALLOC = (1U << 0), 388 /* Instead of having one common LRU list in the 389 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 390 * which can scale and perform better. 391 * Note, the LRU nodes (including free nodes) cannot be moved 392 * across different LRU lists. 393 */ 394 BPF_F_NO_COMMON_LRU = (1U << 1), 395 /* Specify numa node during map creation */ 396 BPF_F_NUMA_NODE = (1U << 2), 397 398 /* Flags for accessing BPF object from syscall side. */ 399 BPF_F_RDONLY = (1U << 3), 400 BPF_F_WRONLY = (1U << 4), 401 402 /* Flag for stack_map, store build_id+offset instead of pointer */ 403 BPF_F_STACK_BUILD_ID = (1U << 5), 404 405 /* Zero-initialize hash function seed. This should only be used for testing. */ 406 BPF_F_ZERO_SEED = (1U << 6), 407 408 /* Flags for accessing BPF object from program side. */ 409 BPF_F_RDONLY_PROG = (1U << 7), 410 BPF_F_WRONLY_PROG = (1U << 8), 411 412 /* Clone map from listener for newly accepted socket */ 413 BPF_F_CLONE = (1U << 9), 414 415 /* Enable memory-mapping BPF map */ 416 BPF_F_MMAPABLE = (1U << 10), 417 }; 418 419 /* Flags for BPF_PROG_QUERY. */ 420 421 /* Query effective (directly attached + inherited from ancestor cgroups) 422 * programs that will be executed for events within a cgroup. 423 * attach_flags with this flag are returned only for directly attached programs. 424 */ 425 #define BPF_F_QUERY_EFFECTIVE (1U << 0) 426 427 /* Flags for BPF_PROG_TEST_RUN */ 428 429 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ 430 #define BPF_F_TEST_RUN_ON_CPU (1U << 0) 431 432 /* type for BPF_ENABLE_STATS */ 433 enum bpf_stats_type { 434 /* enabled run_time_ns and run_cnt */ 435 BPF_STATS_RUN_TIME = 0, 436 }; 437 438 enum bpf_stack_build_id_status { 439 /* user space need an empty entry to identify end of a trace */ 440 BPF_STACK_BUILD_ID_EMPTY = 0, 441 /* with valid build_id and offset */ 442 BPF_STACK_BUILD_ID_VALID = 1, 443 /* couldn't get build_id, fallback to ip */ 444 BPF_STACK_BUILD_ID_IP = 2, 445 }; 446 447 #define BPF_BUILD_ID_SIZE 20 448 struct bpf_stack_build_id { 449 __s32 status; 450 unsigned char build_id[BPF_BUILD_ID_SIZE]; 451 union { 452 __u64 offset; 453 __u64 ip; 454 }; 455 }; 456 457 #define BPF_OBJ_NAME_LEN 16U 458 459 union bpf_attr { 460 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 461 __u32 map_type; /* one of enum bpf_map_type */ 462 __u32 key_size; /* size of key in bytes */ 463 __u32 value_size; /* size of value in bytes */ 464 __u32 max_entries; /* max number of entries in a map */ 465 __u32 map_flags; /* BPF_MAP_CREATE related 466 * flags defined above. 467 */ 468 __u32 inner_map_fd; /* fd pointing to the inner map */ 469 __u32 numa_node; /* numa node (effective only if 470 * BPF_F_NUMA_NODE is set). 471 */ 472 char map_name[BPF_OBJ_NAME_LEN]; 473 __u32 map_ifindex; /* ifindex of netdev to create on */ 474 __u32 btf_fd; /* fd pointing to a BTF type data */ 475 __u32 btf_key_type_id; /* BTF type_id of the key */ 476 __u32 btf_value_type_id; /* BTF type_id of the value */ 477 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- 478 * struct stored as the 479 * map value 480 */ 481 }; 482 483 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 484 __u32 map_fd; 485 __aligned_u64 key; 486 union { 487 __aligned_u64 value; 488 __aligned_u64 next_key; 489 }; 490 __u64 flags; 491 }; 492 493 struct { /* struct used by BPF_MAP_*_BATCH commands */ 494 __aligned_u64 in_batch; /* start batch, 495 * NULL to start from beginning 496 */ 497 __aligned_u64 out_batch; /* output: next start batch */ 498 __aligned_u64 keys; 499 __aligned_u64 values; 500 __u32 count; /* input/output: 501 * input: # of key/value 502 * elements 503 * output: # of filled elements 504 */ 505 __u32 map_fd; 506 __u64 elem_flags; 507 __u64 flags; 508 } batch; 509 510 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 511 __u32 prog_type; /* one of enum bpf_prog_type */ 512 __u32 insn_cnt; 513 __aligned_u64 insns; 514 __aligned_u64 license; 515 __u32 log_level; /* verbosity level of verifier */ 516 __u32 log_size; /* size of user buffer */ 517 __aligned_u64 log_buf; /* user supplied buffer */ 518 __u32 kern_version; /* not used */ 519 __u32 prog_flags; 520 char prog_name[BPF_OBJ_NAME_LEN]; 521 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 522 /* For some prog types expected attach type must be known at 523 * load time to verify attach type specific parts of prog 524 * (context accesses, allowed helpers, etc). 525 */ 526 __u32 expected_attach_type; 527 __u32 prog_btf_fd; /* fd pointing to BTF type data */ 528 __u32 func_info_rec_size; /* userspace bpf_func_info size */ 529 __aligned_u64 func_info; /* func info */ 530 __u32 func_info_cnt; /* number of bpf_func_info records */ 531 __u32 line_info_rec_size; /* userspace bpf_line_info size */ 532 __aligned_u64 line_info; /* line info */ 533 __u32 line_info_cnt; /* number of bpf_line_info records */ 534 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 535 __u32 attach_prog_fd; /* 0 to attach to vmlinux */ 536 }; 537 538 struct { /* anonymous struct used by BPF_OBJ_* commands */ 539 __aligned_u64 pathname; 540 __u32 bpf_fd; 541 __u32 file_flags; 542 }; 543 544 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 545 __u32 target_fd; /* container object to attach to */ 546 __u32 attach_bpf_fd; /* eBPF program to attach */ 547 __u32 attach_type; 548 __u32 attach_flags; 549 __u32 replace_bpf_fd; /* previously attached eBPF 550 * program to replace if 551 * BPF_F_REPLACE is used 552 */ 553 }; 554 555 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 556 __u32 prog_fd; 557 __u32 retval; 558 __u32 data_size_in; /* input: len of data_in */ 559 __u32 data_size_out; /* input/output: len of data_out 560 * returns ENOSPC if data_out 561 * is too small. 562 */ 563 __aligned_u64 data_in; 564 __aligned_u64 data_out; 565 __u32 repeat; 566 __u32 duration; 567 __u32 ctx_size_in; /* input: len of ctx_in */ 568 __u32 ctx_size_out; /* input/output: len of ctx_out 569 * returns ENOSPC if ctx_out 570 * is too small. 571 */ 572 __aligned_u64 ctx_in; 573 __aligned_u64 ctx_out; 574 __u32 flags; 575 __u32 cpu; 576 } test; 577 578 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 579 union { 580 __u32 start_id; 581 __u32 prog_id; 582 __u32 map_id; 583 __u32 btf_id; 584 __u32 link_id; 585 }; 586 __u32 next_id; 587 __u32 open_flags; 588 }; 589 590 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 591 __u32 bpf_fd; 592 __u32 info_len; 593 __aligned_u64 info; 594 } info; 595 596 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 597 __u32 target_fd; /* container object to query */ 598 __u32 attach_type; 599 __u32 query_flags; 600 __u32 attach_flags; 601 __aligned_u64 prog_ids; 602 __u32 prog_cnt; 603 } query; 604 605 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ 606 __u64 name; 607 __u32 prog_fd; 608 } raw_tracepoint; 609 610 struct { /* anonymous struct for BPF_BTF_LOAD */ 611 __aligned_u64 btf; 612 __aligned_u64 btf_log_buf; 613 __u32 btf_size; 614 __u32 btf_log_size; 615 __u32 btf_log_level; 616 }; 617 618 struct { 619 __u32 pid; /* input: pid */ 620 __u32 fd; /* input: fd */ 621 __u32 flags; /* input: flags */ 622 __u32 buf_len; /* input/output: buf len */ 623 __aligned_u64 buf; /* input/output: 624 * tp_name for tracepoint 625 * symbol for kprobe 626 * filename for uprobe 627 */ 628 __u32 prog_id; /* output: prod_id */ 629 __u32 fd_type; /* output: BPF_FD_TYPE_* */ 630 __u64 probe_offset; /* output: probe_offset */ 631 __u64 probe_addr; /* output: probe_addr */ 632 } task_fd_query; 633 634 struct { /* struct used by BPF_LINK_CREATE command */ 635 __u32 prog_fd; /* eBPF program to attach */ 636 union { 637 __u32 target_fd; /* object to attach to */ 638 __u32 target_ifindex; /* target ifindex */ 639 }; 640 __u32 attach_type; /* attach type */ 641 __u32 flags; /* extra flags */ 642 __aligned_u64 iter_info; /* extra bpf_iter_link_info */ 643 __u32 iter_info_len; /* iter_info length */ 644 } link_create; 645 646 struct { /* struct used by BPF_LINK_UPDATE command */ 647 __u32 link_fd; /* link fd */ 648 /* new program fd to update link with */ 649 __u32 new_prog_fd; 650 __u32 flags; /* extra flags */ 651 /* expected link's program fd; is specified only if 652 * BPF_F_REPLACE flag is set in flags */ 653 __u32 old_prog_fd; 654 } link_update; 655 656 struct { 657 __u32 link_fd; 658 } link_detach; 659 660 struct { /* struct used by BPF_ENABLE_STATS command */ 661 __u32 type; 662 } enable_stats; 663 664 struct { /* struct used by BPF_ITER_CREATE command */ 665 __u32 link_fd; 666 __u32 flags; 667 } iter_create; 668 669 struct { /* struct used by BPF_PROG_BIND_MAP command */ 670 __u32 prog_fd; 671 __u32 map_fd; 672 __u32 flags; /* extra flags */ 673 } prog_bind_map; 674 675 } __attribute__((aligned(8))); 676 677 /* The description below is an attempt at providing documentation to eBPF 678 * developers about the multiple available eBPF helper functions. It can be 679 * parsed and used to produce a manual page. The workflow is the following, 680 * and requires the rst2man utility: 681 * 682 * $ ./scripts/bpf_helpers_doc.py \ 683 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 684 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 685 * $ man /tmp/bpf-helpers.7 686 * 687 * Note that in order to produce this external documentation, some RST 688 * formatting is used in the descriptions to get "bold" and "italics" in 689 * manual pages. Also note that the few trailing white spaces are 690 * intentional, removing them would break paragraphs for rst2man. 691 * 692 * Start of BPF helper function descriptions: 693 * 694 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 695 * Description 696 * Perform a lookup in *map* for an entry associated to *key*. 697 * Return 698 * Map value associated to *key*, or **NULL** if no entry was 699 * found. 700 * 701 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 702 * Description 703 * Add or update the value of the entry associated to *key* in 704 * *map* with *value*. *flags* is one of: 705 * 706 * **BPF_NOEXIST** 707 * The entry for *key* must not exist in the map. 708 * **BPF_EXIST** 709 * The entry for *key* must already exist in the map. 710 * **BPF_ANY** 711 * No condition on the existence of the entry for *key*. 712 * 713 * Flag value **BPF_NOEXIST** cannot be used for maps of types 714 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 715 * elements always exist), the helper would return an error. 716 * Return 717 * 0 on success, or a negative error in case of failure. 718 * 719 * long bpf_map_delete_elem(struct bpf_map *map, const void *key) 720 * Description 721 * Delete entry with *key* from *map*. 722 * Return 723 * 0 on success, or a negative error in case of failure. 724 * 725 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) 726 * Description 727 * For tracing programs, safely attempt to read *size* bytes from 728 * kernel space address *unsafe_ptr* and store the data in *dst*. 729 * 730 * Generally, use **bpf_probe_read_user**\ () or 731 * **bpf_probe_read_kernel**\ () instead. 732 * Return 733 * 0 on success, or a negative error in case of failure. 734 * 735 * u64 bpf_ktime_get_ns(void) 736 * Description 737 * Return the time elapsed since system boot, in nanoseconds. 738 * Does not include time the system was suspended. 739 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) 740 * Return 741 * Current *ktime*. 742 * 743 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 744 * Description 745 * This helper is a "printk()-like" facility for debugging. It 746 * prints a message defined by format *fmt* (of size *fmt_size*) 747 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 748 * available. It can take up to three additional **u64** 749 * arguments (as an eBPF helpers, the total number of arguments is 750 * limited to five). 751 * 752 * Each time the helper is called, it appends a line to the trace. 753 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 754 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 755 * The format of the trace is customizable, and the exact output 756 * one will get depends on the options set in 757 * *\/sys/kernel/debug/tracing/trace_options* (see also the 758 * *README* file under the same directory). However, it usually 759 * defaults to something like: 760 * 761 * :: 762 * 763 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 764 * 765 * In the above: 766 * 767 * * ``telnet`` is the name of the current task. 768 * * ``470`` is the PID of the current task. 769 * * ``001`` is the CPU number on which the task is 770 * running. 771 * * In ``.N..``, each character refers to a set of 772 * options (whether irqs are enabled, scheduling 773 * options, whether hard/softirqs are running, level of 774 * preempt_disabled respectively). **N** means that 775 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 776 * are set. 777 * * ``419421.045894`` is a timestamp. 778 * * ``0x00000001`` is a fake value used by BPF for the 779 * instruction pointer register. 780 * * ``<formatted msg>`` is the message formatted with 781 * *fmt*. 782 * 783 * The conversion specifiers supported by *fmt* are similar, but 784 * more limited than for printk(). They are **%d**, **%i**, 785 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 786 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 787 * of field, padding with zeroes, etc.) is available, and the 788 * helper will return **-EINVAL** (but print nothing) if it 789 * encounters an unknown specifier. 790 * 791 * Also, note that **bpf_trace_printk**\ () is slow, and should 792 * only be used for debugging purposes. For this reason, a notice 793 * block (spanning several lines) is printed to kernel logs and 794 * states that the helper should not be used "for production use" 795 * the first time this helper is used (or more precisely, when 796 * **trace_printk**\ () buffers are allocated). For passing values 797 * to user space, perf events should be preferred. 798 * Return 799 * The number of bytes written to the buffer, or a negative error 800 * in case of failure. 801 * 802 * u32 bpf_get_prandom_u32(void) 803 * Description 804 * Get a pseudo-random number. 805 * 806 * From a security point of view, this helper uses its own 807 * pseudo-random internal state, and cannot be used to infer the 808 * seed of other random functions in the kernel. However, it is 809 * essential to note that the generator used by the helper is not 810 * cryptographically secure. 811 * Return 812 * A random 32-bit unsigned value. 813 * 814 * u32 bpf_get_smp_processor_id(void) 815 * Description 816 * Get the SMP (symmetric multiprocessing) processor id. Note that 817 * all programs run with preemption disabled, which means that the 818 * SMP processor id is stable during all the execution of the 819 * program. 820 * Return 821 * The SMP id of the processor running the program. 822 * 823 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 824 * Description 825 * Store *len* bytes from address *from* into the packet 826 * associated to *skb*, at *offset*. *flags* are a combination of 827 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 828 * checksum for the packet after storing the bytes) and 829 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 830 * **->swhash** and *skb*\ **->l4hash** to 0). 831 * 832 * A call to this helper is susceptible to change the underlying 833 * packet buffer. Therefore, at load time, all checks on pointers 834 * previously done by the verifier are invalidated and must be 835 * performed again, if the helper is used in combination with 836 * direct packet access. 837 * Return 838 * 0 on success, or a negative error in case of failure. 839 * 840 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 841 * Description 842 * Recompute the layer 3 (e.g. IP) checksum for the packet 843 * associated to *skb*. Computation is incremental, so the helper 844 * must know the former value of the header field that was 845 * modified (*from*), the new value of this field (*to*), and the 846 * number of bytes (2 or 4) for this field, stored in *size*. 847 * Alternatively, it is possible to store the difference between 848 * the previous and the new values of the header field in *to*, by 849 * setting *from* and *size* to 0. For both methods, *offset* 850 * indicates the location of the IP checksum within the packet. 851 * 852 * This helper works in combination with **bpf_csum_diff**\ (), 853 * which does not update the checksum in-place, but offers more 854 * flexibility and can handle sizes larger than 2 or 4 for the 855 * checksum to update. 856 * 857 * A call to this helper is susceptible to change the underlying 858 * packet buffer. Therefore, at load time, all checks on pointers 859 * previously done by the verifier are invalidated and must be 860 * performed again, if the helper is used in combination with 861 * direct packet access. 862 * Return 863 * 0 on success, or a negative error in case of failure. 864 * 865 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 866 * Description 867 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 868 * packet associated to *skb*. Computation is incremental, so the 869 * helper must know the former value of the header field that was 870 * modified (*from*), the new value of this field (*to*), and the 871 * number of bytes (2 or 4) for this field, stored on the lowest 872 * four bits of *flags*. Alternatively, it is possible to store 873 * the difference between the previous and the new values of the 874 * header field in *to*, by setting *from* and the four lowest 875 * bits of *flags* to 0. For both methods, *offset* indicates the 876 * location of the IP checksum within the packet. In addition to 877 * the size of the field, *flags* can be added (bitwise OR) actual 878 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 879 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 880 * for updates resulting in a null checksum the value is set to 881 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 882 * the checksum is to be computed against a pseudo-header. 883 * 884 * This helper works in combination with **bpf_csum_diff**\ (), 885 * which does not update the checksum in-place, but offers more 886 * flexibility and can handle sizes larger than 2 or 4 for the 887 * checksum to update. 888 * 889 * A call to this helper is susceptible to change the underlying 890 * packet buffer. Therefore, at load time, all checks on pointers 891 * previously done by the verifier are invalidated and must be 892 * performed again, if the helper is used in combination with 893 * direct packet access. 894 * Return 895 * 0 on success, or a negative error in case of failure. 896 * 897 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 898 * Description 899 * This special helper is used to trigger a "tail call", or in 900 * other words, to jump into another eBPF program. The same stack 901 * frame is used (but values on stack and in registers for the 902 * caller are not accessible to the callee). This mechanism allows 903 * for program chaining, either for raising the maximum number of 904 * available eBPF instructions, or to execute given programs in 905 * conditional blocks. For security reasons, there is an upper 906 * limit to the number of successive tail calls that can be 907 * performed. 908 * 909 * Upon call of this helper, the program attempts to jump into a 910 * program referenced at index *index* in *prog_array_map*, a 911 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 912 * *ctx*, a pointer to the context. 913 * 914 * If the call succeeds, the kernel immediately runs the first 915 * instruction of the new program. This is not a function call, 916 * and it never returns to the previous program. If the call 917 * fails, then the helper has no effect, and the caller continues 918 * to run its subsequent instructions. A call can fail if the 919 * destination program for the jump does not exist (i.e. *index* 920 * is superior to the number of entries in *prog_array_map*), or 921 * if the maximum number of tail calls has been reached for this 922 * chain of programs. This limit is defined in the kernel by the 923 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 924 * which is currently set to 32. 925 * Return 926 * 0 on success, or a negative error in case of failure. 927 * 928 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 929 * Description 930 * Clone and redirect the packet associated to *skb* to another 931 * net device of index *ifindex*. Both ingress and egress 932 * interfaces can be used for redirection. The **BPF_F_INGRESS** 933 * value in *flags* is used to make the distinction (ingress path 934 * is selected if the flag is present, egress path otherwise). 935 * This is the only flag supported for now. 936 * 937 * In comparison with **bpf_redirect**\ () helper, 938 * **bpf_clone_redirect**\ () has the associated cost of 939 * duplicating the packet buffer, but this can be executed out of 940 * the eBPF program. Conversely, **bpf_redirect**\ () is more 941 * efficient, but it is handled through an action code where the 942 * redirection happens only after the eBPF program has returned. 943 * 944 * A call to this helper is susceptible to change the underlying 945 * packet buffer. Therefore, at load time, all checks on pointers 946 * previously done by the verifier are invalidated and must be 947 * performed again, if the helper is used in combination with 948 * direct packet access. 949 * Return 950 * 0 on success, or a negative error in case of failure. 951 * 952 * u64 bpf_get_current_pid_tgid(void) 953 * Return 954 * A 64-bit integer containing the current tgid and pid, and 955 * created as such: 956 * *current_task*\ **->tgid << 32 \|** 957 * *current_task*\ **->pid**. 958 * 959 * u64 bpf_get_current_uid_gid(void) 960 * Return 961 * A 64-bit integer containing the current GID and UID, and 962 * created as such: *current_gid* **<< 32 \|** *current_uid*. 963 * 964 * long bpf_get_current_comm(void *buf, u32 size_of_buf) 965 * Description 966 * Copy the **comm** attribute of the current task into *buf* of 967 * *size_of_buf*. The **comm** attribute contains the name of 968 * the executable (excluding the path) for the current task. The 969 * *size_of_buf* must be strictly positive. On success, the 970 * helper makes sure that the *buf* is NUL-terminated. On failure, 971 * it is filled with zeroes. 972 * Return 973 * 0 on success, or a negative error in case of failure. 974 * 975 * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 976 * Description 977 * Retrieve the classid for the current task, i.e. for the net_cls 978 * cgroup to which *skb* belongs. 979 * 980 * This helper can be used on TC egress path, but not on ingress. 981 * 982 * The net_cls cgroup provides an interface to tag network packets 983 * based on a user-provided identifier for all traffic coming from 984 * the tasks belonging to the related cgroup. See also the related 985 * kernel documentation, available from the Linux sources in file 986 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. 987 * 988 * The Linux kernel has two versions for cgroups: there are 989 * cgroups v1 and cgroups v2. Both are available to users, who can 990 * use a mixture of them, but note that the net_cls cgroup is for 991 * cgroup v1 only. This makes it incompatible with BPF programs 992 * run on cgroups, which is a cgroup-v2-only feature (a socket can 993 * only hold data for one version of cgroups at a time). 994 * 995 * This helper is only available is the kernel was compiled with 996 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 997 * "**y**" or to "**m**". 998 * Return 999 * The classid, or 0 for the default unconfigured classid. 1000 * 1001 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 1002 * Description 1003 * Push a *vlan_tci* (VLAN tag control information) of protocol 1004 * *vlan_proto* to the packet associated to *skb*, then update 1005 * the checksum. Note that if *vlan_proto* is different from 1006 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 1007 * be **ETH_P_8021Q**. 1008 * 1009 * A call to this helper is susceptible to change the underlying 1010 * packet buffer. Therefore, at load time, all checks on pointers 1011 * previously done by the verifier are invalidated and must be 1012 * performed again, if the helper is used in combination with 1013 * direct packet access. 1014 * Return 1015 * 0 on success, or a negative error in case of failure. 1016 * 1017 * long bpf_skb_vlan_pop(struct sk_buff *skb) 1018 * Description 1019 * Pop a VLAN header from the packet associated to *skb*. 1020 * 1021 * A call to this helper is susceptible to change the underlying 1022 * packet buffer. Therefore, at load time, all checks on pointers 1023 * previously done by the verifier are invalidated and must be 1024 * performed again, if the helper is used in combination with 1025 * direct packet access. 1026 * Return 1027 * 0 on success, or a negative error in case of failure. 1028 * 1029 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1030 * Description 1031 * Get tunnel metadata. This helper takes a pointer *key* to an 1032 * empty **struct bpf_tunnel_key** of **size**, that will be 1033 * filled with tunnel metadata for the packet associated to *skb*. 1034 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 1035 * indicates that the tunnel is based on IPv6 protocol instead of 1036 * IPv4. 1037 * 1038 * The **struct bpf_tunnel_key** is an object that generalizes the 1039 * principal parameters used by various tunneling protocols into a 1040 * single struct. This way, it can be used to easily make a 1041 * decision based on the contents of the encapsulation header, 1042 * "summarized" in this struct. In particular, it holds the IP 1043 * address of the remote end (IPv4 or IPv6, depending on the case) 1044 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 1045 * this struct exposes the *key*\ **->tunnel_id**, which is 1046 * generally mapped to a VNI (Virtual Network Identifier), making 1047 * it programmable together with the **bpf_skb_set_tunnel_key**\ 1048 * () helper. 1049 * 1050 * Let's imagine that the following code is part of a program 1051 * attached to the TC ingress interface, on one end of a GRE 1052 * tunnel, and is supposed to filter out all messages coming from 1053 * remote ends with IPv4 address other than 10.0.0.1: 1054 * 1055 * :: 1056 * 1057 * int ret; 1058 * struct bpf_tunnel_key key = {}; 1059 * 1060 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 1061 * if (ret < 0) 1062 * return TC_ACT_SHOT; // drop packet 1063 * 1064 * if (key.remote_ipv4 != 0x0a000001) 1065 * return TC_ACT_SHOT; // drop packet 1066 * 1067 * return TC_ACT_OK; // accept packet 1068 * 1069 * This interface can also be used with all encapsulation devices 1070 * that can operate in "collect metadata" mode: instead of having 1071 * one network device per specific configuration, the "collect 1072 * metadata" mode only requires a single device where the 1073 * configuration can be extracted from this helper. 1074 * 1075 * This can be used together with various tunnels such as VXLan, 1076 * Geneve, GRE or IP in IP (IPIP). 1077 * Return 1078 * 0 on success, or a negative error in case of failure. 1079 * 1080 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1081 * Description 1082 * Populate tunnel metadata for packet associated to *skb.* The 1083 * tunnel metadata is set to the contents of *key*, of *size*. The 1084 * *flags* can be set to a combination of the following values: 1085 * 1086 * **BPF_F_TUNINFO_IPV6** 1087 * Indicate that the tunnel is based on IPv6 protocol 1088 * instead of IPv4. 1089 * **BPF_F_ZERO_CSUM_TX** 1090 * For IPv4 packets, add a flag to tunnel metadata 1091 * indicating that checksum computation should be skipped 1092 * and checksum set to zeroes. 1093 * **BPF_F_DONT_FRAGMENT** 1094 * Add a flag to tunnel metadata indicating that the 1095 * packet should not be fragmented. 1096 * **BPF_F_SEQ_NUMBER** 1097 * Add a flag to tunnel metadata indicating that a 1098 * sequence number should be added to tunnel header before 1099 * sending the packet. This flag was added for GRE 1100 * encapsulation, but might be used with other protocols 1101 * as well in the future. 1102 * 1103 * Here is a typical usage on the transmit path: 1104 * 1105 * :: 1106 * 1107 * struct bpf_tunnel_key key; 1108 * populate key ... 1109 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 1110 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 1111 * 1112 * See also the description of the **bpf_skb_get_tunnel_key**\ () 1113 * helper for additional information. 1114 * Return 1115 * 0 on success, or a negative error in case of failure. 1116 * 1117 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 1118 * Description 1119 * Read the value of a perf event counter. This helper relies on a 1120 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 1121 * the perf event counter is selected when *map* is updated with 1122 * perf event file descriptors. The *map* is an array whose size 1123 * is the number of available CPUs, and each cell contains a value 1124 * relative to one CPU. The value to retrieve is indicated by 1125 * *flags*, that contains the index of the CPU to look up, masked 1126 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1127 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1128 * current CPU should be retrieved. 1129 * 1130 * Note that before Linux 4.13, only hardware perf event can be 1131 * retrieved. 1132 * 1133 * Also, be aware that the newer helper 1134 * **bpf_perf_event_read_value**\ () is recommended over 1135 * **bpf_perf_event_read**\ () in general. The latter has some ABI 1136 * quirks where error and counter value are used as a return code 1137 * (which is wrong to do since ranges may overlap). This issue is 1138 * fixed with **bpf_perf_event_read_value**\ (), which at the same 1139 * time provides more features over the **bpf_perf_event_read**\ 1140 * () interface. Please refer to the description of 1141 * **bpf_perf_event_read_value**\ () for details. 1142 * Return 1143 * The value of the perf event counter read from the map, or a 1144 * negative error code in case of failure. 1145 * 1146 * long bpf_redirect(u32 ifindex, u64 flags) 1147 * Description 1148 * Redirect the packet to another net device of index *ifindex*. 1149 * This helper is somewhat similar to **bpf_clone_redirect**\ 1150 * (), except that the packet is not cloned, which provides 1151 * increased performance. 1152 * 1153 * Except for XDP, both ingress and egress interfaces can be used 1154 * for redirection. The **BPF_F_INGRESS** value in *flags* is used 1155 * to make the distinction (ingress path is selected if the flag 1156 * is present, egress path otherwise). Currently, XDP only 1157 * supports redirection to the egress interface, and accepts no 1158 * flag at all. 1159 * 1160 * The same effect can also be attained with the more generic 1161 * **bpf_redirect_map**\ (), which uses a BPF map to store the 1162 * redirect target instead of providing it directly to the helper. 1163 * Return 1164 * For XDP, the helper returns **XDP_REDIRECT** on success or 1165 * **XDP_ABORTED** on error. For other program types, the values 1166 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 1167 * error. 1168 * 1169 * u32 bpf_get_route_realm(struct sk_buff *skb) 1170 * Description 1171 * Retrieve the realm or the route, that is to say the 1172 * **tclassid** field of the destination for the *skb*. The 1173 * identifier retrieved is a user-provided tag, similar to the 1174 * one used with the net_cls cgroup (see description for 1175 * **bpf_get_cgroup_classid**\ () helper), but here this tag is 1176 * held by a route (a destination entry), not by a task. 1177 * 1178 * Retrieving this identifier works with the clsact TC egress hook 1179 * (see also **tc-bpf(8)**), or alternatively on conventional 1180 * classful egress qdiscs, but not on TC ingress path. In case of 1181 * clsact TC egress hook, this has the advantage that, internally, 1182 * the destination entry has not been dropped yet in the transmit 1183 * path. Therefore, the destination entry does not need to be 1184 * artificially held via **netif_keep_dst**\ () for a classful 1185 * qdisc until the *skb* is freed. 1186 * 1187 * This helper is available only if the kernel was compiled with 1188 * **CONFIG_IP_ROUTE_CLASSID** configuration option. 1189 * Return 1190 * The realm of the route for the packet associated to *skb*, or 0 1191 * if none was found. 1192 * 1193 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1194 * Description 1195 * Write raw *data* blob into a special BPF perf event held by 1196 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 1197 * event must have the following attributes: **PERF_SAMPLE_RAW** 1198 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 1199 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 1200 * 1201 * The *flags* are used to indicate the index in *map* for which 1202 * the value must be put, masked with **BPF_F_INDEX_MASK**. 1203 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 1204 * to indicate that the index of the current CPU core should be 1205 * used. 1206 * 1207 * The value to write, of *size*, is passed through eBPF stack and 1208 * pointed by *data*. 1209 * 1210 * The context of the program *ctx* needs also be passed to the 1211 * helper. 1212 * 1213 * On user space, a program willing to read the values needs to 1214 * call **perf_event_open**\ () on the perf event (either for 1215 * one or for all CPUs) and to store the file descriptor into the 1216 * *map*. This must be done before the eBPF program can send data 1217 * into it. An example is available in file 1218 * *samples/bpf/trace_output_user.c* in the Linux kernel source 1219 * tree (the eBPF program counterpart is in 1220 * *samples/bpf/trace_output_kern.c*). 1221 * 1222 * **bpf_perf_event_output**\ () achieves better performance 1223 * than **bpf_trace_printk**\ () for sharing data with user 1224 * space, and is much better suitable for streaming data from eBPF 1225 * programs. 1226 * 1227 * Note that this helper is not restricted to tracing use cases 1228 * and can be used with programs attached to TC or XDP as well, 1229 * where it allows for passing data to user space listeners. Data 1230 * can be: 1231 * 1232 * * Only custom structs, 1233 * * Only the packet payload, or 1234 * * A combination of both. 1235 * Return 1236 * 0 on success, or a negative error in case of failure. 1237 * 1238 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) 1239 * Description 1240 * This helper was provided as an easy way to load data from a 1241 * packet. It can be used to load *len* bytes from *offset* from 1242 * the packet associated to *skb*, into the buffer pointed by 1243 * *to*. 1244 * 1245 * Since Linux 4.7, usage of this helper has mostly been replaced 1246 * by "direct packet access", enabling packet data to be 1247 * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 1248 * pointing respectively to the first byte of packet data and to 1249 * the byte after the last byte of packet data. However, it 1250 * remains useful if one wishes to read large quantities of data 1251 * at once from a packet into the eBPF stack. 1252 * Return 1253 * 0 on success, or a negative error in case of failure. 1254 * 1255 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) 1256 * Description 1257 * Walk a user or a kernel stack and return its id. To achieve 1258 * this, the helper needs *ctx*, which is a pointer to the context 1259 * on which the tracing program is executed, and a pointer to a 1260 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 1261 * 1262 * The last argument, *flags*, holds the number of stack frames to 1263 * skip (from 0 to 255), masked with 1264 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1265 * a combination of the following flags: 1266 * 1267 * **BPF_F_USER_STACK** 1268 * Collect a user space stack instead of a kernel stack. 1269 * **BPF_F_FAST_STACK_CMP** 1270 * Compare stacks by hash only. 1271 * **BPF_F_REUSE_STACKID** 1272 * If two different stacks hash into the same *stackid*, 1273 * discard the old one. 1274 * 1275 * The stack id retrieved is a 32 bit long integer handle which 1276 * can be further combined with other data (including other stack 1277 * ids) and used as a key into maps. This can be useful for 1278 * generating a variety of graphs (such as flame graphs or off-cpu 1279 * graphs). 1280 * 1281 * For walking a stack, this helper is an improvement over 1282 * **bpf_probe_read**\ (), which can be used with unrolled loops 1283 * but is not efficient and consumes a lot of eBPF instructions. 1284 * Instead, **bpf_get_stackid**\ () can collect up to 1285 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 1286 * this limit can be controlled with the **sysctl** program, and 1287 * that it should be manually increased in order to profile long 1288 * user stacks (such as stacks for Java programs). To do so, use: 1289 * 1290 * :: 1291 * 1292 * # sysctl kernel.perf_event_max_stack=<new value> 1293 * Return 1294 * The positive or null stack id on success, or a negative error 1295 * in case of failure. 1296 * 1297 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 1298 * Description 1299 * Compute a checksum difference, from the raw buffer pointed by 1300 * *from*, of length *from_size* (that must be a multiple of 4), 1301 * towards the raw buffer pointed by *to*, of size *to_size* 1302 * (same remark). An optional *seed* can be added to the value 1303 * (this can be cascaded, the seed may come from a previous call 1304 * to the helper). 1305 * 1306 * This is flexible enough to be used in several ways: 1307 * 1308 * * With *from_size* == 0, *to_size* > 0 and *seed* set to 1309 * checksum, it can be used when pushing new data. 1310 * * With *from_size* > 0, *to_size* == 0 and *seed* set to 1311 * checksum, it can be used when removing data from a packet. 1312 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 1313 * can be used to compute a diff. Note that *from_size* and 1314 * *to_size* do not need to be equal. 1315 * 1316 * This helper can be used in combination with 1317 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 1318 * which one can feed in the difference computed with 1319 * **bpf_csum_diff**\ (). 1320 * Return 1321 * The checksum result, or a negative error code in case of 1322 * failure. 1323 * 1324 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1325 * Description 1326 * Retrieve tunnel options metadata for the packet associated to 1327 * *skb*, and store the raw tunnel option data to the buffer *opt* 1328 * of *size*. 1329 * 1330 * This helper can be used with encapsulation devices that can 1331 * operate in "collect metadata" mode (please refer to the related 1332 * note in the description of **bpf_skb_get_tunnel_key**\ () for 1333 * more details). A particular example where this can be used is 1334 * in combination with the Geneve encapsulation protocol, where it 1335 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 1336 * and retrieving arbitrary TLVs (Type-Length-Value headers) from 1337 * the eBPF program. This allows for full customization of these 1338 * headers. 1339 * Return 1340 * The size of the option data retrieved. 1341 * 1342 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1343 * Description 1344 * Set tunnel options metadata for the packet associated to *skb* 1345 * to the option data contained in the raw buffer *opt* of *size*. 1346 * 1347 * See also the description of the **bpf_skb_get_tunnel_opt**\ () 1348 * helper for additional information. 1349 * Return 1350 * 0 on success, or a negative error in case of failure. 1351 * 1352 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 1353 * Description 1354 * Change the protocol of the *skb* to *proto*. Currently 1355 * supported are transition from IPv4 to IPv6, and from IPv6 to 1356 * IPv4. The helper takes care of the groundwork for the 1357 * transition, including resizing the socket buffer. The eBPF 1358 * program is expected to fill the new headers, if any, via 1359 * **skb_store_bytes**\ () and to recompute the checksums with 1360 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 1361 * (). The main case for this helper is to perform NAT64 1362 * operations out of an eBPF program. 1363 * 1364 * Internally, the GSO type is marked as dodgy so that headers are 1365 * checked and segments are recalculated by the GSO/GRO engine. 1366 * The size for GSO target is adapted as well. 1367 * 1368 * All values for *flags* are reserved for future usage, and must 1369 * be left at zero. 1370 * 1371 * A call to this helper is susceptible to change the underlying 1372 * packet buffer. Therefore, at load time, all checks on pointers 1373 * previously done by the verifier are invalidated and must be 1374 * performed again, if the helper is used in combination with 1375 * direct packet access. 1376 * Return 1377 * 0 on success, or a negative error in case of failure. 1378 * 1379 * long bpf_skb_change_type(struct sk_buff *skb, u32 type) 1380 * Description 1381 * Change the packet type for the packet associated to *skb*. This 1382 * comes down to setting *skb*\ **->pkt_type** to *type*, except 1383 * the eBPF program does not have a write access to *skb*\ 1384 * **->pkt_type** beside this helper. Using a helper here allows 1385 * for graceful handling of errors. 1386 * 1387 * The major use case is to change incoming *skb*s to 1388 * **PACKET_HOST** in a programmatic way instead of having to 1389 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 1390 * example. 1391 * 1392 * Note that *type* only allows certain values. At this time, they 1393 * are: 1394 * 1395 * **PACKET_HOST** 1396 * Packet is for us. 1397 * **PACKET_BROADCAST** 1398 * Send packet to all. 1399 * **PACKET_MULTICAST** 1400 * Send packet to group. 1401 * **PACKET_OTHERHOST** 1402 * Send packet to someone else. 1403 * Return 1404 * 0 on success, or a negative error in case of failure. 1405 * 1406 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 1407 * Description 1408 * Check whether *skb* is a descendant of the cgroup2 held by 1409 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1410 * Return 1411 * The return value depends on the result of the test, and can be: 1412 * 1413 * * 0, if the *skb* failed the cgroup2 descendant test. 1414 * * 1, if the *skb* succeeded the cgroup2 descendant test. 1415 * * A negative error code, if an error occurred. 1416 * 1417 * u32 bpf_get_hash_recalc(struct sk_buff *skb) 1418 * Description 1419 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 1420 * not set, in particular if the hash was cleared due to mangling, 1421 * recompute this hash. Later accesses to the hash can be done 1422 * directly with *skb*\ **->hash**. 1423 * 1424 * Calling **bpf_set_hash_invalid**\ (), changing a packet 1425 * prototype with **bpf_skb_change_proto**\ (), or calling 1426 * **bpf_skb_store_bytes**\ () with the 1427 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 1428 * the hash and to trigger a new computation for the next call to 1429 * **bpf_get_hash_recalc**\ (). 1430 * Return 1431 * The 32-bit hash. 1432 * 1433 * u64 bpf_get_current_task(void) 1434 * Return 1435 * A pointer to the current task struct. 1436 * 1437 * long bpf_probe_write_user(void *dst, const void *src, u32 len) 1438 * Description 1439 * Attempt in a safe way to write *len* bytes from the buffer 1440 * *src* to *dst* in memory. It only works for threads that are in 1441 * user context, and *dst* must be a valid user space address. 1442 * 1443 * This helper should not be used to implement any kind of 1444 * security mechanism because of TOC-TOU attacks, but rather to 1445 * debug, divert, and manipulate execution of semi-cooperative 1446 * processes. 1447 * 1448 * Keep in mind that this feature is meant for experiments, and it 1449 * has a risk of crashing the system and running programs. 1450 * Therefore, when an eBPF program using this helper is attached, 1451 * a warning including PID and process name is printed to kernel 1452 * logs. 1453 * Return 1454 * 0 on success, or a negative error in case of failure. 1455 * 1456 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 1457 * Description 1458 * Check whether the probe is being run is the context of a given 1459 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 1460 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1461 * Return 1462 * The return value depends on the result of the test, and can be: 1463 * 1464 * * 0, if current task belongs to the cgroup2. 1465 * * 1, if current task does not belong to the cgroup2. 1466 * * A negative error code, if an error occurred. 1467 * 1468 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 1469 * Description 1470 * Resize (trim or grow) the packet associated to *skb* to the 1471 * new *len*. The *flags* are reserved for future usage, and must 1472 * be left at zero. 1473 * 1474 * The basic idea is that the helper performs the needed work to 1475 * change the size of the packet, then the eBPF program rewrites 1476 * the rest via helpers like **bpf_skb_store_bytes**\ (), 1477 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 1478 * and others. This helper is a slow path utility intended for 1479 * replies with control messages. And because it is targeted for 1480 * slow path, the helper itself can afford to be slow: it 1481 * implicitly linearizes, unclones and drops offloads from the 1482 * *skb*. 1483 * 1484 * A call to this helper is susceptible to change the underlying 1485 * packet buffer. Therefore, at load time, all checks on pointers 1486 * previously done by the verifier are invalidated and must be 1487 * performed again, if the helper is used in combination with 1488 * direct packet access. 1489 * Return 1490 * 0 on success, or a negative error in case of failure. 1491 * 1492 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) 1493 * Description 1494 * Pull in non-linear data in case the *skb* is non-linear and not 1495 * all of *len* are part of the linear section. Make *len* bytes 1496 * from *skb* readable and writable. If a zero value is passed for 1497 * *len*, then the whole length of the *skb* is pulled. 1498 * 1499 * This helper is only needed for reading and writing with direct 1500 * packet access. 1501 * 1502 * For direct packet access, testing that offsets to access 1503 * are within packet boundaries (test on *skb*\ **->data_end**) is 1504 * susceptible to fail if offsets are invalid, or if the requested 1505 * data is in non-linear parts of the *skb*. On failure the 1506 * program can just bail out, or in the case of a non-linear 1507 * buffer, use a helper to make the data available. The 1508 * **bpf_skb_load_bytes**\ () helper is a first solution to access 1509 * the data. Another one consists in using **bpf_skb_pull_data** 1510 * to pull in once the non-linear parts, then retesting and 1511 * eventually access the data. 1512 * 1513 * At the same time, this also makes sure the *skb* is uncloned, 1514 * which is a necessary condition for direct write. As this needs 1515 * to be an invariant for the write part only, the verifier 1516 * detects writes and adds a prologue that is calling 1517 * **bpf_skb_pull_data()** to effectively unclone the *skb* from 1518 * the very beginning in case it is indeed cloned. 1519 * 1520 * A call to this helper is susceptible to change the underlying 1521 * packet buffer. Therefore, at load time, all checks on pointers 1522 * previously done by the verifier are invalidated and must be 1523 * performed again, if the helper is used in combination with 1524 * direct packet access. 1525 * Return 1526 * 0 on success, or a negative error in case of failure. 1527 * 1528 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 1529 * Description 1530 * Add the checksum *csum* into *skb*\ **->csum** in case the 1531 * driver has supplied a checksum for the entire packet into that 1532 * field. Return an error otherwise. This helper is intended to be 1533 * used in combination with **bpf_csum_diff**\ (), in particular 1534 * when the checksum needs to be updated after data has been 1535 * written into the packet through direct packet access. 1536 * Return 1537 * The checksum on success, or a negative error code in case of 1538 * failure. 1539 * 1540 * void bpf_set_hash_invalid(struct sk_buff *skb) 1541 * Description 1542 * Invalidate the current *skb*\ **->hash**. It can be used after 1543 * mangling on headers through direct packet access, in order to 1544 * indicate that the hash is outdated and to trigger a 1545 * recalculation the next time the kernel tries to access this 1546 * hash or when the **bpf_get_hash_recalc**\ () helper is called. 1547 * 1548 * long bpf_get_numa_node_id(void) 1549 * Description 1550 * Return the id of the current NUMA node. The primary use case 1551 * for this helper is the selection of sockets for the local NUMA 1552 * node, when the program is attached to sockets using the 1553 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 1554 * but the helper is also available to other eBPF program types, 1555 * similarly to **bpf_get_smp_processor_id**\ (). 1556 * Return 1557 * The id of current NUMA node. 1558 * 1559 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 1560 * Description 1561 * Grows headroom of packet associated to *skb* and adjusts the 1562 * offset of the MAC header accordingly, adding *len* bytes of 1563 * space. It automatically extends and reallocates memory as 1564 * required. 1565 * 1566 * This helper can be used on a layer 3 *skb* to push a MAC header 1567 * for redirection into a layer 2 device. 1568 * 1569 * All values for *flags* are reserved for future usage, and must 1570 * be left at zero. 1571 * 1572 * A call to this helper is susceptible to change the underlying 1573 * packet buffer. Therefore, at load time, all checks on pointers 1574 * previously done by the verifier are invalidated and must be 1575 * performed again, if the helper is used in combination with 1576 * direct packet access. 1577 * Return 1578 * 0 on success, or a negative error in case of failure. 1579 * 1580 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 1581 * Description 1582 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 1583 * it is possible to use a negative value for *delta*. This helper 1584 * can be used to prepare the packet for pushing or popping 1585 * headers. 1586 * 1587 * A call to this helper is susceptible to change the underlying 1588 * packet buffer. Therefore, at load time, all checks on pointers 1589 * previously done by the verifier are invalidated and must be 1590 * performed again, if the helper is used in combination with 1591 * direct packet access. 1592 * Return 1593 * 0 on success, or a negative error in case of failure. 1594 * 1595 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) 1596 * Description 1597 * Copy a NUL terminated string from an unsafe kernel address 1598 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for 1599 * more details. 1600 * 1601 * Generally, use **bpf_probe_read_user_str**\ () or 1602 * **bpf_probe_read_kernel_str**\ () instead. 1603 * Return 1604 * On success, the strictly positive length of the string, 1605 * including the trailing NUL character. On error, a negative 1606 * value. 1607 * 1608 * u64 bpf_get_socket_cookie(struct sk_buff *skb) 1609 * Description 1610 * If the **struct sk_buff** pointed by *skb* has a known socket, 1611 * retrieve the cookie (generated by the kernel) of this socket. 1612 * If no cookie has been set yet, generate a new cookie. Once 1613 * generated, the socket cookie remains stable for the life of the 1614 * socket. This helper can be useful for monitoring per socket 1615 * networking traffic statistics as it provides a global socket 1616 * identifier that can be assumed unique. 1617 * Return 1618 * A 8-byte long non-decreasing number on success, or 0 if the 1619 * socket field is missing inside *skb*. 1620 * 1621 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 1622 * Description 1623 * Equivalent to bpf_get_socket_cookie() helper that accepts 1624 * *skb*, but gets socket from **struct bpf_sock_addr** context. 1625 * Return 1626 * A 8-byte long non-decreasing number. 1627 * 1628 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 1629 * Description 1630 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 1631 * *skb*, but gets socket from **struct bpf_sock_ops** context. 1632 * Return 1633 * A 8-byte long non-decreasing number. 1634 * 1635 * u32 bpf_get_socket_uid(struct sk_buff *skb) 1636 * Return 1637 * The owner UID of the socket associated to *skb*. If the socket 1638 * is **NULL**, or if it is not a full socket (i.e. if it is a 1639 * time-wait or a request socket instead), **overflowuid** value 1640 * is returned (note that **overflowuid** might also be the actual 1641 * UID value for the socket). 1642 * 1643 * long bpf_set_hash(struct sk_buff *skb, u32 hash) 1644 * Description 1645 * Set the full hash for *skb* (set the field *skb*\ **->hash**) 1646 * to value *hash*. 1647 * Return 1648 * 0 1649 * 1650 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 1651 * Description 1652 * Emulate a call to **setsockopt()** on the socket associated to 1653 * *bpf_socket*, which must be a full socket. The *level* at 1654 * which the option resides and the name *optname* of the option 1655 * must be specified, see **setsockopt(2)** for more information. 1656 * The option value of length *optlen* is pointed by *optval*. 1657 * 1658 * *bpf_socket* should be one of the following: 1659 * 1660 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1661 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1662 * and **BPF_CGROUP_INET6_CONNECT**. 1663 * 1664 * This helper actually implements a subset of **setsockopt()**. 1665 * It supports the following *level*\ s: 1666 * 1667 * * **SOL_SOCKET**, which supports the following *optname*\ s: 1668 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 1669 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, 1670 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. 1671 * * **IPPROTO_TCP**, which supports the following *optname*\ s: 1672 * **TCP_CONGESTION**, **TCP_BPF_IW**, 1673 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, 1674 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, 1675 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. 1676 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1677 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1678 * Return 1679 * 0 on success, or a negative error in case of failure. 1680 * 1681 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) 1682 * Description 1683 * Grow or shrink the room for data in the packet associated to 1684 * *skb* by *len_diff*, and according to the selected *mode*. 1685 * 1686 * By default, the helper will reset any offloaded checksum 1687 * indicator of the skb to CHECKSUM_NONE. This can be avoided 1688 * by the following flag: 1689 * 1690 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded 1691 * checksum data of the skb to CHECKSUM_NONE. 1692 * 1693 * There are two supported modes at this time: 1694 * 1695 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 1696 * (room space is added or removed below the layer 2 header). 1697 * 1698 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 1699 * (room space is added or removed below the layer 3 header). 1700 * 1701 * The following flags are supported at this time: 1702 * 1703 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 1704 * Adjusting mss in this way is not allowed for datagrams. 1705 * 1706 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 1707 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 1708 * Any new space is reserved to hold a tunnel header. 1709 * Configure skb offsets and other fields accordingly. 1710 * 1711 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 1712 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 1713 * Use with ENCAP_L3 flags to further specify the tunnel type. 1714 * 1715 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 1716 * Use with ENCAP_L3/L4 flags to further specify the tunnel 1717 * type; *len* is the length of the inner MAC header. 1718 * 1719 * A call to this helper is susceptible to change the underlying 1720 * packet buffer. Therefore, at load time, all checks on pointers 1721 * previously done by the verifier are invalidated and must be 1722 * performed again, if the helper is used in combination with 1723 * direct packet access. 1724 * Return 1725 * 0 on success, or a negative error in case of failure. 1726 * 1727 * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) 1728 * Description 1729 * Redirect the packet to the endpoint referenced by *map* at 1730 * index *key*. Depending on its type, this *map* can contain 1731 * references to net devices (for forwarding packets through other 1732 * ports), or to CPUs (for redirecting XDP frames to another CPU; 1733 * but this is only implemented for native XDP (with driver 1734 * support) as of this writing). 1735 * 1736 * The lower two bits of *flags* are used as the return code if 1737 * the map lookup fails. This is so that the return value can be 1738 * one of the XDP program return codes up to **XDP_TX**, as chosen 1739 * by the caller. Any higher bits in the *flags* argument must be 1740 * unset. 1741 * 1742 * See also **bpf_redirect**\ (), which only supports redirecting 1743 * to an ifindex, but doesn't require a map to do so. 1744 * Return 1745 * **XDP_REDIRECT** on success, or the value of the two lower bits 1746 * of the *flags* argument on error. 1747 * 1748 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) 1749 * Description 1750 * Redirect the packet to the socket referenced by *map* (of type 1751 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1752 * egress interfaces can be used for redirection. The 1753 * **BPF_F_INGRESS** value in *flags* is used to make the 1754 * distinction (ingress path is selected if the flag is present, 1755 * egress path otherwise). This is the only flag supported for now. 1756 * Return 1757 * **SK_PASS** on success, or **SK_DROP** on error. 1758 * 1759 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 1760 * Description 1761 * Add an entry to, or update a *map* referencing sockets. The 1762 * *skops* is used as a new value for the entry associated to 1763 * *key*. *flags* is one of: 1764 * 1765 * **BPF_NOEXIST** 1766 * The entry for *key* must not exist in the map. 1767 * **BPF_EXIST** 1768 * The entry for *key* must already exist in the map. 1769 * **BPF_ANY** 1770 * No condition on the existence of the entry for *key*. 1771 * 1772 * If the *map* has eBPF programs (parser and verdict), those will 1773 * be inherited by the socket being added. If the socket is 1774 * already attached to eBPF programs, this results in an error. 1775 * Return 1776 * 0 on success, or a negative error in case of failure. 1777 * 1778 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 1779 * Description 1780 * Adjust the address pointed by *xdp_md*\ **->data_meta** by 1781 * *delta* (which can be positive or negative). Note that this 1782 * operation modifies the address stored in *xdp_md*\ **->data**, 1783 * so the latter must be loaded only after the helper has been 1784 * called. 1785 * 1786 * The use of *xdp_md*\ **->data_meta** is optional and programs 1787 * are not required to use it. The rationale is that when the 1788 * packet is processed with XDP (e.g. as DoS filter), it is 1789 * possible to push further meta data along with it before passing 1790 * to the stack, and to give the guarantee that an ingress eBPF 1791 * program attached as a TC classifier on the same device can pick 1792 * this up for further post-processing. Since TC works with socket 1793 * buffers, it remains possible to set from XDP the **mark** or 1794 * **priority** pointers, or other pointers for the socket buffer. 1795 * Having this scratch space generic and programmable allows for 1796 * more flexibility as the user is free to store whatever meta 1797 * data they need. 1798 * 1799 * A call to this helper is susceptible to change the underlying 1800 * packet buffer. Therefore, at load time, all checks on pointers 1801 * previously done by the verifier are invalidated and must be 1802 * performed again, if the helper is used in combination with 1803 * direct packet access. 1804 * Return 1805 * 0 on success, or a negative error in case of failure. 1806 * 1807 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 1808 * Description 1809 * Read the value of a perf event counter, and store it into *buf* 1810 * of size *buf_size*. This helper relies on a *map* of type 1811 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 1812 * counter is selected when *map* is updated with perf event file 1813 * descriptors. The *map* is an array whose size is the number of 1814 * available CPUs, and each cell contains a value relative to one 1815 * CPU. The value to retrieve is indicated by *flags*, that 1816 * contains the index of the CPU to look up, masked with 1817 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1818 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1819 * current CPU should be retrieved. 1820 * 1821 * This helper behaves in a way close to 1822 * **bpf_perf_event_read**\ () helper, save that instead of 1823 * just returning the value observed, it fills the *buf* 1824 * structure. This allows for additional data to be retrieved: in 1825 * particular, the enabled and running times (in *buf*\ 1826 * **->enabled** and *buf*\ **->running**, respectively) are 1827 * copied. In general, **bpf_perf_event_read_value**\ () is 1828 * recommended over **bpf_perf_event_read**\ (), which has some 1829 * ABI issues and provides fewer functionalities. 1830 * 1831 * These values are interesting, because hardware PMU (Performance 1832 * Monitoring Unit) counters are limited resources. When there are 1833 * more PMU based perf events opened than available counters, 1834 * kernel will multiplex these events so each event gets certain 1835 * percentage (but not all) of the PMU time. In case that 1836 * multiplexing happens, the number of samples or counter value 1837 * will not reflect the case compared to when no multiplexing 1838 * occurs. This makes comparison between different runs difficult. 1839 * Typically, the counter value should be normalized before 1840 * comparing to other experiments. The usual normalization is done 1841 * as follows. 1842 * 1843 * :: 1844 * 1845 * normalized_counter = counter * t_enabled / t_running 1846 * 1847 * Where t_enabled is the time enabled for event and t_running is 1848 * the time running for event since last normalization. The 1849 * enabled and running times are accumulated since the perf event 1850 * open. To achieve scaling factor between two invocations of an 1851 * eBPF program, users can use CPU id as the key (which is 1852 * typical for perf array usage model) to remember the previous 1853 * value and do the calculation inside the eBPF program. 1854 * Return 1855 * 0 on success, or a negative error in case of failure. 1856 * 1857 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 1858 * Description 1859 * For en eBPF program attached to a perf event, retrieve the 1860 * value of the event counter associated to *ctx* and store it in 1861 * the structure pointed by *buf* and of size *buf_size*. Enabled 1862 * and running times are also stored in the structure (see 1863 * description of helper **bpf_perf_event_read_value**\ () for 1864 * more details). 1865 * Return 1866 * 0 on success, or a negative error in case of failure. 1867 * 1868 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 1869 * Description 1870 * Emulate a call to **getsockopt()** on the socket associated to 1871 * *bpf_socket*, which must be a full socket. The *level* at 1872 * which the option resides and the name *optname* of the option 1873 * must be specified, see **getsockopt(2)** for more information. 1874 * The retrieved value is stored in the structure pointed by 1875 * *opval* and of length *optlen*. 1876 * 1877 * *bpf_socket* should be one of the following: 1878 * 1879 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1880 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1881 * and **BPF_CGROUP_INET6_CONNECT**. 1882 * 1883 * This helper actually implements a subset of **getsockopt()**. 1884 * It supports the following *level*\ s: 1885 * 1886 * * **IPPROTO_TCP**, which supports *optname* 1887 * **TCP_CONGESTION**. 1888 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1889 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1890 * Return 1891 * 0 on success, or a negative error in case of failure. 1892 * 1893 * long bpf_override_return(struct pt_regs *regs, u64 rc) 1894 * Description 1895 * Used for error injection, this helper uses kprobes to override 1896 * the return value of the probed function, and to set it to *rc*. 1897 * The first argument is the context *regs* on which the kprobe 1898 * works. 1899 * 1900 * This helper works by setting the PC (program counter) 1901 * to an override function which is run in place of the original 1902 * probed function. This means the probed function is not run at 1903 * all. The replacement function just returns with the required 1904 * value. 1905 * 1906 * This helper has security implications, and thus is subject to 1907 * restrictions. It is only available if the kernel was compiled 1908 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 1909 * option, and in this case it only works on functions tagged with 1910 * **ALLOW_ERROR_INJECTION** in the kernel code. 1911 * 1912 * Also, the helper is only available for the architectures having 1913 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 1914 * x86 architecture is the only one to support this feature. 1915 * Return 1916 * 0 1917 * 1918 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 1919 * Description 1920 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 1921 * for the full TCP socket associated to *bpf_sock_ops* to 1922 * *argval*. 1923 * 1924 * The primary use of this field is to determine if there should 1925 * be calls to eBPF programs of type 1926 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 1927 * code. A program of the same type can change its value, per 1928 * connection and as necessary, when the connection is 1929 * established. This field is directly accessible for reading, but 1930 * this helper must be used for updates in order to return an 1931 * error if an eBPF program tries to set a callback that is not 1932 * supported in the current kernel. 1933 * 1934 * *argval* is a flag array which can combine these flags: 1935 * 1936 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 1937 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 1938 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 1939 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 1940 * 1941 * Therefore, this function can be used to clear a callback flag by 1942 * setting the appropriate bit to zero. e.g. to disable the RTO 1943 * callback: 1944 * 1945 * **bpf_sock_ops_cb_flags_set(bpf_sock,** 1946 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 1947 * 1948 * Here are some examples of where one could call such eBPF 1949 * program: 1950 * 1951 * * When RTO fires. 1952 * * When a packet is retransmitted. 1953 * * When the connection terminates. 1954 * * When a packet is sent. 1955 * * When a packet is received. 1956 * Return 1957 * Code **-EINVAL** if the socket is not a full TCP socket; 1958 * otherwise, a positive number containing the bits that could not 1959 * be set is returned (which comes down to 0 if all bits were set 1960 * as required). 1961 * 1962 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 1963 * Description 1964 * This helper is used in programs implementing policies at the 1965 * socket level. If the message *msg* is allowed to pass (i.e. if 1966 * the verdict eBPF program returns **SK_PASS**), redirect it to 1967 * the socket referenced by *map* (of type 1968 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1969 * egress interfaces can be used for redirection. The 1970 * **BPF_F_INGRESS** value in *flags* is used to make the 1971 * distinction (ingress path is selected if the flag is present, 1972 * egress path otherwise). This is the only flag supported for now. 1973 * Return 1974 * **SK_PASS** on success, or **SK_DROP** on error. 1975 * 1976 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 1977 * Description 1978 * For socket policies, apply the verdict of the eBPF program to 1979 * the next *bytes* (number of bytes) of message *msg*. 1980 * 1981 * For example, this helper can be used in the following cases: 1982 * 1983 * * A single **sendmsg**\ () or **sendfile**\ () system call 1984 * contains multiple logical messages that the eBPF program is 1985 * supposed to read and for which it should apply a verdict. 1986 * * An eBPF program only cares to read the first *bytes* of a 1987 * *msg*. If the message has a large payload, then setting up 1988 * and calling the eBPF program repeatedly for all bytes, even 1989 * though the verdict is already known, would create unnecessary 1990 * overhead. 1991 * 1992 * When called from within an eBPF program, the helper sets a 1993 * counter internal to the BPF infrastructure, that is used to 1994 * apply the last verdict to the next *bytes*. If *bytes* is 1995 * smaller than the current data being processed from a 1996 * **sendmsg**\ () or **sendfile**\ () system call, the first 1997 * *bytes* will be sent and the eBPF program will be re-run with 1998 * the pointer for start of data pointing to byte number *bytes* 1999 * **+ 1**. If *bytes* is larger than the current data being 2000 * processed, then the eBPF verdict will be applied to multiple 2001 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 2002 * consumed. 2003 * 2004 * Note that if a socket closes with the internal counter holding 2005 * a non-zero value, this is not a problem because data is not 2006 * being buffered for *bytes* and is sent as it is received. 2007 * Return 2008 * 0 2009 * 2010 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 2011 * Description 2012 * For socket policies, prevent the execution of the verdict eBPF 2013 * program for message *msg* until *bytes* (byte number) have been 2014 * accumulated. 2015 * 2016 * This can be used when one needs a specific number of bytes 2017 * before a verdict can be assigned, even if the data spans 2018 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 2019 * case would be a user calling **sendmsg**\ () repeatedly with 2020 * 1-byte long message segments. Obviously, this is bad for 2021 * performance, but it is still valid. If the eBPF program needs 2022 * *bytes* bytes to validate a header, this helper can be used to 2023 * prevent the eBPF program to be called again until *bytes* have 2024 * been accumulated. 2025 * Return 2026 * 0 2027 * 2028 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 2029 * Description 2030 * For socket policies, pull in non-linear data from user space 2031 * for *msg* and set pointers *msg*\ **->data** and *msg*\ 2032 * **->data_end** to *start* and *end* bytes offsets into *msg*, 2033 * respectively. 2034 * 2035 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2036 * *msg* it can only parse data that the (**data**, **data_end**) 2037 * pointers have already consumed. For **sendmsg**\ () hooks this 2038 * is likely the first scatterlist element. But for calls relying 2039 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 2040 * be the range (**0**, **0**) because the data is shared with 2041 * user space and by default the objective is to avoid allowing 2042 * user space to modify data while (or after) eBPF verdict is 2043 * being decided. This helper can be used to pull in data and to 2044 * set the start and end pointer to given values. Data will be 2045 * copied if necessary (i.e. if data was not linear and if start 2046 * and end pointers do not point to the same chunk). 2047 * 2048 * A call to this helper is susceptible to change the underlying 2049 * packet buffer. Therefore, at load time, all checks on pointers 2050 * previously done by the verifier are invalidated and must be 2051 * performed again, if the helper is used in combination with 2052 * direct packet access. 2053 * 2054 * All values for *flags* are reserved for future usage, and must 2055 * be left at zero. 2056 * Return 2057 * 0 on success, or a negative error in case of failure. 2058 * 2059 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 2060 * Description 2061 * Bind the socket associated to *ctx* to the address pointed by 2062 * *addr*, of length *addr_len*. This allows for making outgoing 2063 * connection from the desired IP address, which can be useful for 2064 * example when all processes inside a cgroup should use one 2065 * single IP address on a host that has multiple IP configured. 2066 * 2067 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 2068 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 2069 * **AF_INET6**). It's advised to pass zero port (**sin_port** 2070 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like 2071 * behavior and lets the kernel efficiently pick up an unused 2072 * port as long as 4-tuple is unique. Passing non-zero port might 2073 * lead to degraded performance. 2074 * Return 2075 * 0 on success, or a negative error in case of failure. 2076 * 2077 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 2078 * Description 2079 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 2080 * possible to both shrink and grow the packet tail. 2081 * Shrink done via *delta* being a negative integer. 2082 * 2083 * A call to this helper is susceptible to change the underlying 2084 * packet buffer. Therefore, at load time, all checks on pointers 2085 * previously done by the verifier are invalidated and must be 2086 * performed again, if the helper is used in combination with 2087 * direct packet access. 2088 * Return 2089 * 0 on success, or a negative error in case of failure. 2090 * 2091 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 2092 * Description 2093 * Retrieve the XFRM state (IP transform framework, see also 2094 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 2095 * 2096 * The retrieved value is stored in the **struct bpf_xfrm_state** 2097 * pointed by *xfrm_state* and of length *size*. 2098 * 2099 * All values for *flags* are reserved for future usage, and must 2100 * be left at zero. 2101 * 2102 * This helper is available only if the kernel was compiled with 2103 * **CONFIG_XFRM** configuration option. 2104 * Return 2105 * 0 on success, or a negative error in case of failure. 2106 * 2107 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) 2108 * Description 2109 * Return a user or a kernel stack in bpf program provided buffer. 2110 * To achieve this, the helper needs *ctx*, which is a pointer 2111 * to the context on which the tracing program is executed. 2112 * To store the stacktrace, the bpf program provides *buf* with 2113 * a nonnegative *size*. 2114 * 2115 * The last argument, *flags*, holds the number of stack frames to 2116 * skip (from 0 to 255), masked with 2117 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 2118 * the following flags: 2119 * 2120 * **BPF_F_USER_STACK** 2121 * Collect a user space stack instead of a kernel stack. 2122 * **BPF_F_USER_BUILD_ID** 2123 * Collect buildid+offset instead of ips for user stack, 2124 * only valid if **BPF_F_USER_STACK** is also specified. 2125 * 2126 * **bpf_get_stack**\ () can collect up to 2127 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 2128 * to sufficient large buffer size. Note that 2129 * this limit can be controlled with the **sysctl** program, and 2130 * that it should be manually increased in order to profile long 2131 * user stacks (such as stacks for Java programs). To do so, use: 2132 * 2133 * :: 2134 * 2135 * # sysctl kernel.perf_event_max_stack=<new value> 2136 * Return 2137 * A non-negative value equal to or less than *size* on success, 2138 * or a negative error in case of failure. 2139 * 2140 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) 2141 * Description 2142 * This helper is similar to **bpf_skb_load_bytes**\ () in that 2143 * it provides an easy way to load *len* bytes from *offset* 2144 * from the packet associated to *skb*, into the buffer pointed 2145 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 2146 * a fifth argument *start_header* exists in order to select a 2147 * base offset to start from. *start_header* can be one of: 2148 * 2149 * **BPF_HDR_START_MAC** 2150 * Base offset to load data from is *skb*'s mac header. 2151 * **BPF_HDR_START_NET** 2152 * Base offset to load data from is *skb*'s network header. 2153 * 2154 * In general, "direct packet access" is the preferred method to 2155 * access packet data, however, this helper is in particular useful 2156 * in socket filters where *skb*\ **->data** does not always point 2157 * to the start of the mac header and where "direct packet access" 2158 * is not available. 2159 * Return 2160 * 0 on success, or a negative error in case of failure. 2161 * 2162 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 2163 * Description 2164 * Do FIB lookup in kernel tables using parameters in *params*. 2165 * If lookup is successful and result shows packet is to be 2166 * forwarded, the neighbor tables are searched for the nexthop. 2167 * If successful (ie., FIB lookup shows forwarding and nexthop 2168 * is resolved), the nexthop address is returned in ipv4_dst 2169 * or ipv6_dst based on family, smac is set to mac address of 2170 * egress device, dmac is set to nexthop mac address, rt_metric 2171 * is set to metric from route (IPv4/IPv6 only), and ifindex 2172 * is set to the device index of the nexthop from the FIB lookup. 2173 * 2174 * *plen* argument is the size of the passed in struct. 2175 * *flags* argument can be a combination of one or more of the 2176 * following values: 2177 * 2178 * **BPF_FIB_LOOKUP_DIRECT** 2179 * Do a direct table lookup vs full lookup using FIB 2180 * rules. 2181 * **BPF_FIB_LOOKUP_OUTPUT** 2182 * Perform lookup from an egress perspective (default is 2183 * ingress). 2184 * 2185 * *ctx* is either **struct xdp_md** for XDP programs or 2186 * **struct sk_buff** tc cls_act programs. 2187 * Return 2188 * * < 0 if any input argument is invalid 2189 * * 0 on success (packet is forwarded, nexthop neighbor exists) 2190 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 2191 * packet is not forwarded or needs assist from full stack 2192 * 2193 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 2194 * Description 2195 * Add an entry to, or update a sockhash *map* referencing sockets. 2196 * The *skops* is used as a new value for the entry associated to 2197 * *key*. *flags* is one of: 2198 * 2199 * **BPF_NOEXIST** 2200 * The entry for *key* must not exist in the map. 2201 * **BPF_EXIST** 2202 * The entry for *key* must already exist in the map. 2203 * **BPF_ANY** 2204 * No condition on the existence of the entry for *key*. 2205 * 2206 * If the *map* has eBPF programs (parser and verdict), those will 2207 * be inherited by the socket being added. If the socket is 2208 * already attached to eBPF programs, this results in an error. 2209 * Return 2210 * 0 on success, or a negative error in case of failure. 2211 * 2212 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 2213 * Description 2214 * This helper is used in programs implementing policies at the 2215 * socket level. If the message *msg* is allowed to pass (i.e. if 2216 * the verdict eBPF program returns **SK_PASS**), redirect it to 2217 * the socket referenced by *map* (of type 2218 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2219 * egress interfaces can be used for redirection. The 2220 * **BPF_F_INGRESS** value in *flags* is used to make the 2221 * distinction (ingress path is selected if the flag is present, 2222 * egress path otherwise). This is the only flag supported for now. 2223 * Return 2224 * **SK_PASS** on success, or **SK_DROP** on error. 2225 * 2226 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 2227 * Description 2228 * This helper is used in programs implementing policies at the 2229 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 2230 * if the verdeict eBPF program returns **SK_PASS**), redirect it 2231 * to the socket referenced by *map* (of type 2232 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2233 * egress interfaces can be used for redirection. The 2234 * **BPF_F_INGRESS** value in *flags* is used to make the 2235 * distinction (ingress path is selected if the flag is present, 2236 * egress otherwise). This is the only flag supported for now. 2237 * Return 2238 * **SK_PASS** on success, or **SK_DROP** on error. 2239 * 2240 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 2241 * Description 2242 * Encapsulate the packet associated to *skb* within a Layer 3 2243 * protocol header. This header is provided in the buffer at 2244 * address *hdr*, with *len* its size in bytes. *type* indicates 2245 * the protocol of the header and can be one of: 2246 * 2247 * **BPF_LWT_ENCAP_SEG6** 2248 * IPv6 encapsulation with Segment Routing Header 2249 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 2250 * the IPv6 header is computed by the kernel. 2251 * **BPF_LWT_ENCAP_SEG6_INLINE** 2252 * Only works if *skb* contains an IPv6 packet. Insert a 2253 * Segment Routing Header (**struct ipv6_sr_hdr**) inside 2254 * the IPv6 header. 2255 * **BPF_LWT_ENCAP_IP** 2256 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 2257 * must be IPv4 or IPv6, followed by zero or more 2258 * additional headers, up to **LWT_BPF_MAX_HEADROOM** 2259 * total bytes in all prepended headers. Please note that 2260 * if **skb_is_gso**\ (*skb*) is true, no more than two 2261 * headers can be prepended, and the inner header, if 2262 * present, should be either GRE or UDP/GUE. 2263 * 2264 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 2265 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 2266 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 2267 * **BPF_PROG_TYPE_LWT_XMIT**. 2268 * 2269 * A call to this helper is susceptible to change the underlying 2270 * packet buffer. Therefore, at load time, all checks on pointers 2271 * previously done by the verifier are invalidated and must be 2272 * performed again, if the helper is used in combination with 2273 * direct packet access. 2274 * Return 2275 * 0 on success, or a negative error in case of failure. 2276 * 2277 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 2278 * Description 2279 * Store *len* bytes from address *from* into the packet 2280 * associated to *skb*, at *offset*. Only the flags, tag and TLVs 2281 * inside the outermost IPv6 Segment Routing Header can be 2282 * modified through this helper. 2283 * 2284 * A call to this helper is susceptible to change the underlying 2285 * packet buffer. Therefore, at load time, all checks on pointers 2286 * previously done by the verifier are invalidated and must be 2287 * performed again, if the helper is used in combination with 2288 * direct packet access. 2289 * Return 2290 * 0 on success, or a negative error in case of failure. 2291 * 2292 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 2293 * Description 2294 * Adjust the size allocated to TLVs in the outermost IPv6 2295 * Segment Routing Header contained in the packet associated to 2296 * *skb*, at position *offset* by *delta* bytes. Only offsets 2297 * after the segments are accepted. *delta* can be as well 2298 * positive (growing) as negative (shrinking). 2299 * 2300 * A call to this helper is susceptible to change the underlying 2301 * packet buffer. Therefore, at load time, all checks on pointers 2302 * previously done by the verifier are invalidated and must be 2303 * performed again, if the helper is used in combination with 2304 * direct packet access. 2305 * Return 2306 * 0 on success, or a negative error in case of failure. 2307 * 2308 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 2309 * Description 2310 * Apply an IPv6 Segment Routing action of type *action* to the 2311 * packet associated to *skb*. Each action takes a parameter 2312 * contained at address *param*, and of length *param_len* bytes. 2313 * *action* can be one of: 2314 * 2315 * **SEG6_LOCAL_ACTION_END_X** 2316 * End.X action: Endpoint with Layer-3 cross-connect. 2317 * Type of *param*: **struct in6_addr**. 2318 * **SEG6_LOCAL_ACTION_END_T** 2319 * End.T action: Endpoint with specific IPv6 table lookup. 2320 * Type of *param*: **int**. 2321 * **SEG6_LOCAL_ACTION_END_B6** 2322 * End.B6 action: Endpoint bound to an SRv6 policy. 2323 * Type of *param*: **struct ipv6_sr_hdr**. 2324 * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 2325 * End.B6.Encap action: Endpoint bound to an SRv6 2326 * encapsulation policy. 2327 * Type of *param*: **struct ipv6_sr_hdr**. 2328 * 2329 * A call to this helper is susceptible to change the underlying 2330 * packet buffer. Therefore, at load time, all checks on pointers 2331 * previously done by the verifier are invalidated and must be 2332 * performed again, if the helper is used in combination with 2333 * direct packet access. 2334 * Return 2335 * 0 on success, or a negative error in case of failure. 2336 * 2337 * long bpf_rc_repeat(void *ctx) 2338 * Description 2339 * This helper is used in programs implementing IR decoding, to 2340 * report a successfully decoded repeat key message. This delays 2341 * the generation of a key up event for previously generated 2342 * key down event. 2343 * 2344 * Some IR protocols like NEC have a special IR message for 2345 * repeating last button, for when a button is held down. 2346 * 2347 * The *ctx* should point to the lirc sample as passed into 2348 * the program. 2349 * 2350 * This helper is only available is the kernel was compiled with 2351 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2352 * "**y**". 2353 * Return 2354 * 0 2355 * 2356 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2357 * Description 2358 * This helper is used in programs implementing IR decoding, to 2359 * report a successfully decoded key press with *scancode*, 2360 * *toggle* value in the given *protocol*. The scancode will be 2361 * translated to a keycode using the rc keymap, and reported as 2362 * an input key down event. After a period a key up event is 2363 * generated. This period can be extended by calling either 2364 * **bpf_rc_keydown**\ () again with the same values, or calling 2365 * **bpf_rc_repeat**\ (). 2366 * 2367 * Some protocols include a toggle bit, in case the button was 2368 * released and pressed again between consecutive scancodes. 2369 * 2370 * The *ctx* should point to the lirc sample as passed into 2371 * the program. 2372 * 2373 * The *protocol* is the decoded protocol number (see 2374 * **enum rc_proto** for some predefined values). 2375 * 2376 * This helper is only available is the kernel was compiled with 2377 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2378 * "**y**". 2379 * Return 2380 * 0 2381 * 2382 * u64 bpf_skb_cgroup_id(struct sk_buff *skb) 2383 * Description 2384 * Return the cgroup v2 id of the socket associated with the *skb*. 2385 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2386 * helper for cgroup v1 by providing a tag resp. identifier that 2387 * can be matched on or used for map lookups e.g. to implement 2388 * policy. The cgroup v2 id of a given path in the hierarchy is 2389 * exposed in user space through the f_handle API in order to get 2390 * to the same 64-bit id. 2391 * 2392 * This helper can be used on TC egress path, but not on ingress, 2393 * and is available only if the kernel was compiled with the 2394 * **CONFIG_SOCK_CGROUP_DATA** configuration option. 2395 * Return 2396 * The id is returned or 0 in case the id could not be retrieved. 2397 * 2398 * u64 bpf_get_current_cgroup_id(void) 2399 * Return 2400 * A 64-bit integer containing the current cgroup id based 2401 * on the cgroup within which the current task is running. 2402 * 2403 * void *bpf_get_local_storage(void *map, u64 flags) 2404 * Description 2405 * Get the pointer to the local storage area. 2406 * The type and the size of the local storage is defined 2407 * by the *map* argument. 2408 * The *flags* meaning is specific for each map type, 2409 * and has to be 0 for cgroup local storage. 2410 * 2411 * Depending on the BPF program type, a local storage area 2412 * can be shared between multiple instances of the BPF program, 2413 * running simultaneously. 2414 * 2415 * A user should care about the synchronization by himself. 2416 * For example, by using the **BPF_STX_XADD** instruction to alter 2417 * the shared data. 2418 * Return 2419 * A pointer to the local storage area. 2420 * 2421 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) 2422 * Description 2423 * Select a **SO_REUSEPORT** socket from a 2424 * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. 2425 * It checks the selected socket is matching the incoming 2426 * request in the socket buffer. 2427 * Return 2428 * 0 on success, or a negative error in case of failure. 2429 * 2430 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) 2431 * Description 2432 * Return id of cgroup v2 that is ancestor of cgroup associated 2433 * with the *skb* at the *ancestor_level*. The root cgroup is at 2434 * *ancestor_level* zero and each step down the hierarchy 2435 * increments the level. If *ancestor_level* == level of cgroup 2436 * associated with *skb*, then return value will be same as that 2437 * of **bpf_skb_cgroup_id**\ (). 2438 * 2439 * The helper is useful to implement policies based on cgroups 2440 * that are upper in hierarchy than immediate cgroup associated 2441 * with *skb*. 2442 * 2443 * The format of returned id and helper limitations are same as in 2444 * **bpf_skb_cgroup_id**\ (). 2445 * Return 2446 * The id is returned or 0 in case the id could not be retrieved. 2447 * 2448 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2449 * Description 2450 * Look for TCP socket matching *tuple*, optionally in a child 2451 * network namespace *netns*. The return value must be checked, 2452 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2453 * 2454 * The *ctx* should point to the context of the program, such as 2455 * the skb or socket (depending on the hook in use). This is used 2456 * to determine the base network namespace for the lookup. 2457 * 2458 * *tuple_size* must be one of: 2459 * 2460 * **sizeof**\ (*tuple*\ **->ipv4**) 2461 * Look for an IPv4 socket. 2462 * **sizeof**\ (*tuple*\ **->ipv6**) 2463 * Look for an IPv6 socket. 2464 * 2465 * If the *netns* is a negative signed 32-bit integer, then the 2466 * socket lookup table in the netns associated with the *ctx* 2467 * will be used. For the TC hooks, this is the netns of the device 2468 * in the skb. For socket hooks, this is the netns of the socket. 2469 * If *netns* is any other signed 32-bit value greater than or 2470 * equal to zero then it specifies the ID of the netns relative to 2471 * the netns associated with the *ctx*. *netns* values beyond the 2472 * range of 32-bit integers are reserved for future use. 2473 * 2474 * All values for *flags* are reserved for future usage, and must 2475 * be left at zero. 2476 * 2477 * This helper is available only if the kernel was compiled with 2478 * **CONFIG_NET** configuration option. 2479 * Return 2480 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2481 * For sockets with reuseport option, the **struct bpf_sock** 2482 * result is from *reuse*\ **->socks**\ [] using the hash of the 2483 * tuple. 2484 * 2485 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2486 * Description 2487 * Look for UDP socket matching *tuple*, optionally in a child 2488 * network namespace *netns*. The return value must be checked, 2489 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2490 * 2491 * The *ctx* should point to the context of the program, such as 2492 * the skb or socket (depending on the hook in use). This is used 2493 * to determine the base network namespace for the lookup. 2494 * 2495 * *tuple_size* must be one of: 2496 * 2497 * **sizeof**\ (*tuple*\ **->ipv4**) 2498 * Look for an IPv4 socket. 2499 * **sizeof**\ (*tuple*\ **->ipv6**) 2500 * Look for an IPv6 socket. 2501 * 2502 * If the *netns* is a negative signed 32-bit integer, then the 2503 * socket lookup table in the netns associated with the *ctx* 2504 * will be used. For the TC hooks, this is the netns of the device 2505 * in the skb. For socket hooks, this is the netns of the socket. 2506 * If *netns* is any other signed 32-bit value greater than or 2507 * equal to zero then it specifies the ID of the netns relative to 2508 * the netns associated with the *ctx*. *netns* values beyond the 2509 * range of 32-bit integers are reserved for future use. 2510 * 2511 * All values for *flags* are reserved for future usage, and must 2512 * be left at zero. 2513 * 2514 * This helper is available only if the kernel was compiled with 2515 * **CONFIG_NET** configuration option. 2516 * Return 2517 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2518 * For sockets with reuseport option, the **struct bpf_sock** 2519 * result is from *reuse*\ **->socks**\ [] using the hash of the 2520 * tuple. 2521 * 2522 * long bpf_sk_release(void *sock) 2523 * Description 2524 * Release the reference held by *sock*. *sock* must be a 2525 * non-**NULL** pointer that was returned from 2526 * **bpf_sk_lookup_xxx**\ (). 2527 * Return 2528 * 0 on success, or a negative error in case of failure. 2529 * 2530 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) 2531 * Description 2532 * Push an element *value* in *map*. *flags* is one of: 2533 * 2534 * **BPF_EXIST** 2535 * If the queue/stack is full, the oldest element is 2536 * removed to make room for this. 2537 * Return 2538 * 0 on success, or a negative error in case of failure. 2539 * 2540 * long bpf_map_pop_elem(struct bpf_map *map, void *value) 2541 * Description 2542 * Pop an element from *map*. 2543 * Return 2544 * 0 on success, or a negative error in case of failure. 2545 * 2546 * long bpf_map_peek_elem(struct bpf_map *map, void *value) 2547 * Description 2548 * Get an element from *map* without removing it. 2549 * Return 2550 * 0 on success, or a negative error in case of failure. 2551 * 2552 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2553 * Description 2554 * For socket policies, insert *len* bytes into *msg* at offset 2555 * *start*. 2556 * 2557 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2558 * *msg* it may want to insert metadata or options into the *msg*. 2559 * This can later be read and used by any of the lower layer BPF 2560 * hooks. 2561 * 2562 * This helper may fail if under memory pressure (a malloc 2563 * fails) in these cases BPF programs will get an appropriate 2564 * error and BPF programs will need to handle them. 2565 * Return 2566 * 0 on success, or a negative error in case of failure. 2567 * 2568 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2569 * Description 2570 * Will remove *len* bytes from a *msg* starting at byte *start*. 2571 * This may result in **ENOMEM** errors under certain situations if 2572 * an allocation and copy are required due to a full ring buffer. 2573 * However, the helper will try to avoid doing the allocation 2574 * if possible. Other errors can occur if input parameters are 2575 * invalid either due to *start* byte not being valid part of *msg* 2576 * payload and/or *pop* value being to large. 2577 * Return 2578 * 0 on success, or a negative error in case of failure. 2579 * 2580 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) 2581 * Description 2582 * This helper is used in programs implementing IR decoding, to 2583 * report a successfully decoded pointer movement. 2584 * 2585 * The *ctx* should point to the lirc sample as passed into 2586 * the program. 2587 * 2588 * This helper is only available is the kernel was compiled with 2589 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2590 * "**y**". 2591 * Return 2592 * 0 2593 * 2594 * long bpf_spin_lock(struct bpf_spin_lock *lock) 2595 * Description 2596 * Acquire a spinlock represented by the pointer *lock*, which is 2597 * stored as part of a value of a map. Taking the lock allows to 2598 * safely update the rest of the fields in that value. The 2599 * spinlock can (and must) later be released with a call to 2600 * **bpf_spin_unlock**\ (\ *lock*\ ). 2601 * 2602 * Spinlocks in BPF programs come with a number of restrictions 2603 * and constraints: 2604 * 2605 * * **bpf_spin_lock** objects are only allowed inside maps of 2606 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 2607 * list could be extended in the future). 2608 * * BTF description of the map is mandatory. 2609 * * The BPF program can take ONE lock at a time, since taking two 2610 * or more could cause dead locks. 2611 * * Only one **struct bpf_spin_lock** is allowed per map element. 2612 * * When the lock is taken, calls (either BPF to BPF or helpers) 2613 * are not allowed. 2614 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 2615 * allowed inside a spinlock-ed region. 2616 * * The BPF program MUST call **bpf_spin_unlock**\ () to release 2617 * the lock, on all execution paths, before it returns. 2618 * * The BPF program can access **struct bpf_spin_lock** only via 2619 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 2620 * helpers. Loading or storing data into the **struct 2621 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 2622 * * To use the **bpf_spin_lock**\ () helper, the BTF description 2623 * of the map value must be a struct and have **struct 2624 * bpf_spin_lock** *anyname*\ **;** field at the top level. 2625 * Nested lock inside another struct is not allowed. 2626 * * The **struct bpf_spin_lock** *lock* field in a map value must 2627 * be aligned on a multiple of 4 bytes in that value. 2628 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 2629 * the **bpf_spin_lock** field to user space. 2630 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 2631 * a BPF program, do not update the **bpf_spin_lock** field. 2632 * * **bpf_spin_lock** cannot be on the stack or inside a 2633 * networking packet (it can only be inside of a map values). 2634 * * **bpf_spin_lock** is available to root only. 2635 * * Tracing programs and socket filter programs cannot use 2636 * **bpf_spin_lock**\ () due to insufficient preemption checks 2637 * (but this may change in the future). 2638 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 2639 * Return 2640 * 0 2641 * 2642 * long bpf_spin_unlock(struct bpf_spin_lock *lock) 2643 * Description 2644 * Release the *lock* previously locked by a call to 2645 * **bpf_spin_lock**\ (\ *lock*\ ). 2646 * Return 2647 * 0 2648 * 2649 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 2650 * Description 2651 * This helper gets a **struct bpf_sock** pointer such 2652 * that all the fields in this **bpf_sock** can be accessed. 2653 * Return 2654 * A **struct bpf_sock** pointer on success, or **NULL** in 2655 * case of failure. 2656 * 2657 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 2658 * Description 2659 * This helper gets a **struct bpf_tcp_sock** pointer from a 2660 * **struct bpf_sock** pointer. 2661 * Return 2662 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 2663 * case of failure. 2664 * 2665 * long bpf_skb_ecn_set_ce(struct sk_buff *skb) 2666 * Description 2667 * Set ECN (Explicit Congestion Notification) field of IP header 2668 * to **CE** (Congestion Encountered) if current value is **ECT** 2669 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 2670 * and IPv4. 2671 * Return 2672 * 1 if the **CE** flag is set (either by the current helper call 2673 * or because it was already present), 0 if it is not set. 2674 * 2675 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) 2676 * Description 2677 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 2678 * **bpf_sk_release**\ () is unnecessary and not allowed. 2679 * Return 2680 * A **struct bpf_sock** pointer on success, or **NULL** in 2681 * case of failure. 2682 * 2683 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2684 * Description 2685 * Look for TCP socket matching *tuple*, optionally in a child 2686 * network namespace *netns*. The return value must be checked, 2687 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2688 * 2689 * This function is identical to **bpf_sk_lookup_tcp**\ (), except 2690 * that it also returns timewait or request sockets. Use 2691 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 2692 * full structure. 2693 * 2694 * This helper is available only if the kernel was compiled with 2695 * **CONFIG_NET** configuration option. 2696 * Return 2697 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2698 * For sockets with reuseport option, the **struct bpf_sock** 2699 * result is from *reuse*\ **->socks**\ [] using the hash of the 2700 * tuple. 2701 * 2702 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2703 * Description 2704 * Check whether *iph* and *th* contain a valid SYN cookie ACK for 2705 * the listening socket in *sk*. 2706 * 2707 * *iph* points to the start of the IPv4 or IPv6 header, while 2708 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2709 * **sizeof**\ (**struct ip6hdr**). 2710 * 2711 * *th* points to the start of the TCP header, while *th_len* 2712 * contains **sizeof**\ (**struct tcphdr**). 2713 * Return 2714 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 2715 * error otherwise. 2716 * 2717 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) 2718 * Description 2719 * Get name of sysctl in /proc/sys/ and copy it into provided by 2720 * program buffer *buf* of size *buf_len*. 2721 * 2722 * The buffer is always NUL terminated, unless it's zero-sized. 2723 * 2724 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 2725 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 2726 * only (e.g. "tcp_mem"). 2727 * Return 2728 * Number of character copied (not including the trailing NUL). 2729 * 2730 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2731 * truncated name in this case). 2732 * 2733 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2734 * Description 2735 * Get current value of sysctl as it is presented in /proc/sys 2736 * (incl. newline, etc), and copy it as a string into provided 2737 * by program buffer *buf* of size *buf_len*. 2738 * 2739 * The whole value is copied, no matter what file position user 2740 * space issued e.g. sys_read at. 2741 * 2742 * The buffer is always NUL terminated, unless it's zero-sized. 2743 * Return 2744 * Number of character copied (not including the trailing NUL). 2745 * 2746 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2747 * truncated name in this case). 2748 * 2749 * **-EINVAL** if current value was unavailable, e.g. because 2750 * sysctl is uninitialized and read returns -EIO for it. 2751 * 2752 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2753 * Description 2754 * Get new value being written by user space to sysctl (before 2755 * the actual write happens) and copy it as a string into 2756 * provided by program buffer *buf* of size *buf_len*. 2757 * 2758 * User space may write new value at file position > 0. 2759 * 2760 * The buffer is always NUL terminated, unless it's zero-sized. 2761 * Return 2762 * Number of character copied (not including the trailing NUL). 2763 * 2764 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2765 * truncated name in this case). 2766 * 2767 * **-EINVAL** if sysctl is being read. 2768 * 2769 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) 2770 * Description 2771 * Override new value being written by user space to sysctl with 2772 * value provided by program in buffer *buf* of size *buf_len*. 2773 * 2774 * *buf* should contain a string in same form as provided by user 2775 * space on sysctl write. 2776 * 2777 * User space may write new value at file position > 0. To override 2778 * the whole sysctl value file position should be set to zero. 2779 * Return 2780 * 0 on success. 2781 * 2782 * **-E2BIG** if the *buf_len* is too big. 2783 * 2784 * **-EINVAL** if sysctl is being read. 2785 * 2786 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) 2787 * Description 2788 * Convert the initial part of the string from buffer *buf* of 2789 * size *buf_len* to a long integer according to the given base 2790 * and save the result in *res*. 2791 * 2792 * The string may begin with an arbitrary amount of white space 2793 * (as determined by **isspace**\ (3)) followed by a single 2794 * optional '**-**' sign. 2795 * 2796 * Five least significant bits of *flags* encode base, other bits 2797 * are currently unused. 2798 * 2799 * Base must be either 8, 10, 16 or 0 to detect it automatically 2800 * similar to user space **strtol**\ (3). 2801 * Return 2802 * Number of characters consumed on success. Must be positive but 2803 * no more than *buf_len*. 2804 * 2805 * **-EINVAL** if no valid digits were found or unsupported base 2806 * was provided. 2807 * 2808 * **-ERANGE** if resulting value was out of range. 2809 * 2810 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) 2811 * Description 2812 * Convert the initial part of the string from buffer *buf* of 2813 * size *buf_len* to an unsigned long integer according to the 2814 * given base and save the result in *res*. 2815 * 2816 * The string may begin with an arbitrary amount of white space 2817 * (as determined by **isspace**\ (3)). 2818 * 2819 * Five least significant bits of *flags* encode base, other bits 2820 * are currently unused. 2821 * 2822 * Base must be either 8, 10, 16 or 0 to detect it automatically 2823 * similar to user space **strtoul**\ (3). 2824 * Return 2825 * Number of characters consumed on success. Must be positive but 2826 * no more than *buf_len*. 2827 * 2828 * **-EINVAL** if no valid digits were found or unsupported base 2829 * was provided. 2830 * 2831 * **-ERANGE** if resulting value was out of range. 2832 * 2833 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) 2834 * Description 2835 * Get a bpf-local-storage from a *sk*. 2836 * 2837 * Logically, it could be thought of getting the value from 2838 * a *map* with *sk* as the **key**. From this 2839 * perspective, the usage is not much different from 2840 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 2841 * helper enforces the key must be a full socket and the map must 2842 * be a **BPF_MAP_TYPE_SK_STORAGE** also. 2843 * 2844 * Underneath, the value is stored locally at *sk* instead of 2845 * the *map*. The *map* is used as the bpf-local-storage 2846 * "type". The bpf-local-storage "type" (i.e. the *map*) is 2847 * searched against all bpf-local-storages residing at *sk*. 2848 * 2849 * *sk* is a kernel **struct sock** pointer for LSM program. 2850 * *sk* is a **struct bpf_sock** pointer for other program types. 2851 * 2852 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 2853 * used such that a new bpf-local-storage will be 2854 * created if one does not exist. *value* can be used 2855 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 2856 * the initial value of a bpf-local-storage. If *value* is 2857 * **NULL**, the new bpf-local-storage will be zero initialized. 2858 * Return 2859 * A bpf-local-storage pointer is returned on success. 2860 * 2861 * **NULL** if not found or there was an error in adding 2862 * a new bpf-local-storage. 2863 * 2864 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) 2865 * Description 2866 * Delete a bpf-local-storage from a *sk*. 2867 * Return 2868 * 0 on success. 2869 * 2870 * **-ENOENT** if the bpf-local-storage cannot be found. 2871 * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). 2872 * 2873 * long bpf_send_signal(u32 sig) 2874 * Description 2875 * Send signal *sig* to the process of the current task. 2876 * The signal may be delivered to any of this process's threads. 2877 * Return 2878 * 0 on success or successfully queued. 2879 * 2880 * **-EBUSY** if work queue under nmi is full. 2881 * 2882 * **-EINVAL** if *sig* is invalid. 2883 * 2884 * **-EPERM** if no permission to send the *sig*. 2885 * 2886 * **-EAGAIN** if bpf program can try again. 2887 * 2888 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2889 * Description 2890 * Try to issue a SYN cookie for the packet with corresponding 2891 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 2892 * 2893 * *iph* points to the start of the IPv4 or IPv6 header, while 2894 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2895 * **sizeof**\ (**struct ip6hdr**). 2896 * 2897 * *th* points to the start of the TCP header, while *th_len* 2898 * contains the length of the TCP header. 2899 * Return 2900 * On success, lower 32 bits hold the generated SYN cookie in 2901 * followed by 16 bits which hold the MSS value for that cookie, 2902 * and the top 16 bits are unused. 2903 * 2904 * On failure, the returned value is one of the following: 2905 * 2906 * **-EINVAL** SYN cookie cannot be issued due to error 2907 * 2908 * **-ENOENT** SYN cookie should not be issued (no SYN flood) 2909 * 2910 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 2911 * 2912 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 2913 * 2914 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2915 * Description 2916 * Write raw *data* blob into a special BPF perf event held by 2917 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2918 * event must have the following attributes: **PERF_SAMPLE_RAW** 2919 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2920 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2921 * 2922 * The *flags* are used to indicate the index in *map* for which 2923 * the value must be put, masked with **BPF_F_INDEX_MASK**. 2924 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2925 * to indicate that the index of the current CPU core should be 2926 * used. 2927 * 2928 * The value to write, of *size*, is passed through eBPF stack and 2929 * pointed by *data*. 2930 * 2931 * *ctx* is a pointer to in-kernel struct sk_buff. 2932 * 2933 * This helper is similar to **bpf_perf_event_output**\ () but 2934 * restricted to raw_tracepoint bpf programs. 2935 * Return 2936 * 0 on success, or a negative error in case of failure. 2937 * 2938 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) 2939 * Description 2940 * Safely attempt to read *size* bytes from user space address 2941 * *unsafe_ptr* and store the data in *dst*. 2942 * Return 2943 * 0 on success, or a negative error in case of failure. 2944 * 2945 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 2946 * Description 2947 * Safely attempt to read *size* bytes from kernel space address 2948 * *unsafe_ptr* and store the data in *dst*. 2949 * Return 2950 * 0 on success, or a negative error in case of failure. 2951 * 2952 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) 2953 * Description 2954 * Copy a NUL terminated string from an unsafe user address 2955 * *unsafe_ptr* to *dst*. The *size* should include the 2956 * terminating NUL byte. In case the string length is smaller than 2957 * *size*, the target is not padded with further NUL bytes. If the 2958 * string length is larger than *size*, just *size*-1 bytes are 2959 * copied and the last byte is set to NUL. 2960 * 2961 * On success, the length of the copied string is returned. This 2962 * makes this helper useful in tracing programs for reading 2963 * strings, and more importantly to get its length at runtime. See 2964 * the following snippet: 2965 * 2966 * :: 2967 * 2968 * SEC("kprobe/sys_open") 2969 * void bpf_sys_open(struct pt_regs *ctx) 2970 * { 2971 * char buf[PATHLEN]; // PATHLEN is defined to 256 2972 * int res = bpf_probe_read_user_str(buf, sizeof(buf), 2973 * ctx->di); 2974 * 2975 * // Consume buf, for example push it to 2976 * // userspace via bpf_perf_event_output(); we 2977 * // can use res (the string length) as event 2978 * // size, after checking its boundaries. 2979 * } 2980 * 2981 * In comparison, using **bpf_probe_read_user**\ () helper here 2982 * instead to read the string would require to estimate the length 2983 * at compile time, and would often result in copying more memory 2984 * than necessary. 2985 * 2986 * Another useful use case is when parsing individual process 2987 * arguments or individual environment variables navigating 2988 * *current*\ **->mm->arg_start** and *current*\ 2989 * **->mm->env_start**: using this helper and the return value, 2990 * one can quickly iterate at the right offset of the memory area. 2991 * Return 2992 * On success, the strictly positive length of the string, 2993 * including the trailing NUL character. On error, a negative 2994 * value. 2995 * 2996 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) 2997 * Description 2998 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 2999 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. 3000 * Return 3001 * On success, the strictly positive length of the string, including 3002 * the trailing NUL character. On error, a negative value. 3003 * 3004 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) 3005 * Description 3006 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. 3007 * *rcv_nxt* is the ack_seq to be sent out. 3008 * Return 3009 * 0 on success, or a negative error in case of failure. 3010 * 3011 * long bpf_send_signal_thread(u32 sig) 3012 * Description 3013 * Send signal *sig* to the thread corresponding to the current task. 3014 * Return 3015 * 0 on success or successfully queued. 3016 * 3017 * **-EBUSY** if work queue under nmi is full. 3018 * 3019 * **-EINVAL** if *sig* is invalid. 3020 * 3021 * **-EPERM** if no permission to send the *sig*. 3022 * 3023 * **-EAGAIN** if bpf program can try again. 3024 * 3025 * u64 bpf_jiffies64(void) 3026 * Description 3027 * Obtain the 64bit jiffies 3028 * Return 3029 * The 64 bit jiffies 3030 * 3031 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) 3032 * Description 3033 * For an eBPF program attached to a perf event, retrieve the 3034 * branch records (**struct perf_branch_entry**) associated to *ctx* 3035 * and store it in the buffer pointed by *buf* up to size 3036 * *size* bytes. 3037 * Return 3038 * On success, number of bytes written to *buf*. On error, a 3039 * negative value. 3040 * 3041 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 3042 * instead return the number of bytes required to store all the 3043 * branch entries. If this flag is set, *buf* may be NULL. 3044 * 3045 * **-EINVAL** if arguments invalid or **size** not a multiple 3046 * of **sizeof**\ (**struct perf_branch_entry**\ ). 3047 * 3048 * **-ENOENT** if architecture does not support branch records. 3049 * 3050 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) 3051 * Description 3052 * Returns 0 on success, values for *pid* and *tgid* as seen from the current 3053 * *namespace* will be returned in *nsdata*. 3054 * Return 3055 * 0 on success, or one of the following in case of failure: 3056 * 3057 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 3058 * with nsfs of current task, or if dev conversion to dev_t lost high bits. 3059 * 3060 * **-ENOENT** if pidns does not exists for the current task. 3061 * 3062 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 3063 * Description 3064 * Write raw *data* blob into a special BPF perf event held by 3065 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 3066 * event must have the following attributes: **PERF_SAMPLE_RAW** 3067 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 3068 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 3069 * 3070 * The *flags* are used to indicate the index in *map* for which 3071 * the value must be put, masked with **BPF_F_INDEX_MASK**. 3072 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 3073 * to indicate that the index of the current CPU core should be 3074 * used. 3075 * 3076 * The value to write, of *size*, is passed through eBPF stack and 3077 * pointed by *data*. 3078 * 3079 * *ctx* is a pointer to in-kernel struct xdp_buff. 3080 * 3081 * This helper is similar to **bpf_perf_eventoutput**\ () but 3082 * restricted to raw_tracepoint bpf programs. 3083 * Return 3084 * 0 on success, or a negative error in case of failure. 3085 * 3086 * u64 bpf_get_netns_cookie(void *ctx) 3087 * Description 3088 * Retrieve the cookie (generated by the kernel) of the network 3089 * namespace the input *ctx* is associated with. The network 3090 * namespace cookie remains stable for its lifetime and provides 3091 * a global identifier that can be assumed unique. If *ctx* is 3092 * NULL, then the helper returns the cookie for the initial 3093 * network namespace. The cookie itself is very similar to that 3094 * of **bpf_get_socket_cookie**\ () helper, but for network 3095 * namespaces instead of sockets. 3096 * Return 3097 * A 8-byte long opaque number. 3098 * 3099 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) 3100 * Description 3101 * Return id of cgroup v2 that is ancestor of the cgroup associated 3102 * with the current task at the *ancestor_level*. The root cgroup 3103 * is at *ancestor_level* zero and each step down the hierarchy 3104 * increments the level. If *ancestor_level* == level of cgroup 3105 * associated with the current task, then return value will be the 3106 * same as that of **bpf_get_current_cgroup_id**\ (). 3107 * 3108 * The helper is useful to implement policies based on cgroups 3109 * that are upper in hierarchy than immediate cgroup associated 3110 * with the current task. 3111 * 3112 * The format of returned id and helper limitations are same as in 3113 * **bpf_get_current_cgroup_id**\ (). 3114 * Return 3115 * The id is returned or 0 in case the id could not be retrieved. 3116 * 3117 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) 3118 * Description 3119 * Helper is overloaded depending on BPF program type. This 3120 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and 3121 * **BPF_PROG_TYPE_SCHED_ACT** programs. 3122 * 3123 * Assign the *sk* to the *skb*. When combined with appropriate 3124 * routing configuration to receive the packet towards the socket, 3125 * will cause *skb* to be delivered to the specified socket. 3126 * Subsequent redirection of *skb* via **bpf_redirect**\ (), 3127 * **bpf_clone_redirect**\ () or other methods outside of BPF may 3128 * interfere with successful delivery to the socket. 3129 * 3130 * This operation is only valid from TC ingress path. 3131 * 3132 * The *flags* argument must be zero. 3133 * Return 3134 * 0 on success, or a negative error in case of failure: 3135 * 3136 * **-EINVAL** if specified *flags* are not supported. 3137 * 3138 * **-ENOENT** if the socket is unavailable for assignment. 3139 * 3140 * **-ENETUNREACH** if the socket is unreachable (wrong netns). 3141 * 3142 * **-EOPNOTSUPP** if the operation is not supported, for example 3143 * a call from outside of TC ingress. 3144 * 3145 * **-ESOCKTNOSUPPORT** if the socket type is not supported 3146 * (reuseport). 3147 * 3148 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) 3149 * Description 3150 * Helper is overloaded depending on BPF program type. This 3151 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. 3152 * 3153 * Select the *sk* as a result of a socket lookup. 3154 * 3155 * For the operation to succeed passed socket must be compatible 3156 * with the packet description provided by the *ctx* object. 3157 * 3158 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must 3159 * be an exact match. While IP family (**AF_INET** or 3160 * **AF_INET6**) must be compatible, that is IPv6 sockets 3161 * that are not v6-only can be selected for IPv4 packets. 3162 * 3163 * Only TCP listeners and UDP unconnected sockets can be 3164 * selected. *sk* can also be NULL to reset any previous 3165 * selection. 3166 * 3167 * *flags* argument can combination of following values: 3168 * 3169 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous 3170 * socket selection, potentially done by a BPF program 3171 * that ran before us. 3172 * 3173 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip 3174 * load-balancing within reuseport group for the socket 3175 * being selected. 3176 * 3177 * On success *ctx->sk* will point to the selected socket. 3178 * 3179 * Return 3180 * 0 on success, or a negative errno in case of failure. 3181 * 3182 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is 3183 * not compatible with packet family (*ctx->family*). 3184 * 3185 * * **-EEXIST** if socket has been already selected, 3186 * potentially by another program, and 3187 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. 3188 * 3189 * * **-EINVAL** if unsupported flags were specified. 3190 * 3191 * * **-EPROTOTYPE** if socket L4 protocol 3192 * (*sk->protocol*) doesn't match packet protocol 3193 * (*ctx->protocol*). 3194 * 3195 * * **-ESOCKTNOSUPPORT** if socket is not in allowed 3196 * state (TCP listening or UDP unconnected). 3197 * 3198 * u64 bpf_ktime_get_boot_ns(void) 3199 * Description 3200 * Return the time elapsed since system boot, in nanoseconds. 3201 * Does include the time the system was suspended. 3202 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) 3203 * Return 3204 * Current *ktime*. 3205 * 3206 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) 3207 * Description 3208 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print 3209 * out the format string. 3210 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for 3211 * the format string itself. The *data* and *data_len* are format string 3212 * arguments. The *data* are a **u64** array and corresponding format string 3213 * values are stored in the array. For strings and pointers where pointees 3214 * are accessed, only the pointer values are stored in the *data* array. 3215 * The *data_len* is the size of *data* in bytes. 3216 * 3217 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. 3218 * Reading kernel memory may fail due to either invalid address or 3219 * valid address but requiring a major memory fault. If reading kernel memory 3220 * fails, the string for **%s** will be an empty string, and the ip 3221 * address for **%p{i,I}{4,6}** will be 0. Not returning error to 3222 * bpf program is consistent with what **bpf_trace_printk**\ () does for now. 3223 * Return 3224 * 0 on success, or a negative error in case of failure: 3225 * 3226 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again 3227 * by returning 1 from bpf program. 3228 * 3229 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. 3230 * 3231 * **-E2BIG** if *fmt* contains too many format specifiers. 3232 * 3233 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3234 * 3235 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) 3236 * Description 3237 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. 3238 * The *m* represents the seq_file. The *data* and *len* represent the 3239 * data to write in bytes. 3240 * Return 3241 * 0 on success, or a negative error in case of failure: 3242 * 3243 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3244 * 3245 * u64 bpf_sk_cgroup_id(void *sk) 3246 * Description 3247 * Return the cgroup v2 id of the socket *sk*. 3248 * 3249 * *sk* must be a non-**NULL** pointer to a socket, e.g. one 3250 * returned from **bpf_sk_lookup_xxx**\ (), 3251 * **bpf_sk_fullsock**\ (), etc. The format of returned id is 3252 * same as in **bpf_skb_cgroup_id**\ (). 3253 * 3254 * This helper is available only if the kernel was compiled with 3255 * the **CONFIG_SOCK_CGROUP_DATA** configuration option. 3256 * Return 3257 * The id is returned or 0 in case the id could not be retrieved. 3258 * 3259 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) 3260 * Description 3261 * Return id of cgroup v2 that is ancestor of cgroup associated 3262 * with the *sk* at the *ancestor_level*. The root cgroup is at 3263 * *ancestor_level* zero and each step down the hierarchy 3264 * increments the level. If *ancestor_level* == level of cgroup 3265 * associated with *sk*, then return value will be same as that 3266 * of **bpf_sk_cgroup_id**\ (). 3267 * 3268 * The helper is useful to implement policies based on cgroups 3269 * that are upper in hierarchy than immediate cgroup associated 3270 * with *sk*. 3271 * 3272 * The format of returned id and helper limitations are same as in 3273 * **bpf_sk_cgroup_id**\ (). 3274 * Return 3275 * The id is returned or 0 in case the id could not be retrieved. 3276 * 3277 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 3278 * Description 3279 * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 3280 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3281 * of new data availability is sent. 3282 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3283 * of new data availability is sent unconditionally. 3284 * Return 3285 * 0 on success, or a negative error in case of failure. 3286 * 3287 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 3288 * Description 3289 * Reserve *size* bytes of payload in a ring buffer *ringbuf*. 3290 * Return 3291 * Valid pointer with *size* bytes of memory available; NULL, 3292 * otherwise. 3293 * 3294 * void bpf_ringbuf_submit(void *data, u64 flags) 3295 * Description 3296 * Submit reserved ring buffer sample, pointed to by *data*. 3297 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3298 * of new data availability is sent. 3299 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3300 * of new data availability is sent unconditionally. 3301 * Return 3302 * Nothing. Always succeeds. 3303 * 3304 * void bpf_ringbuf_discard(void *data, u64 flags) 3305 * Description 3306 * Discard reserved ring buffer sample, pointed to by *data*. 3307 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3308 * of new data availability is sent. 3309 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3310 * of new data availability is sent unconditionally. 3311 * Return 3312 * Nothing. Always succeeds. 3313 * 3314 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) 3315 * Description 3316 * Query various characteristics of provided ring buffer. What 3317 * exactly is queries is determined by *flags*: 3318 * 3319 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 3320 * * **BPF_RB_RING_SIZE**: The size of ring buffer. 3321 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 3322 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 3323 * 3324 * Data returned is just a momentary snapshot of actual values 3325 * and could be inaccurate, so this facility should be used to 3326 * power heuristics and for reporting, not to make 100% correct 3327 * calculation. 3328 * Return 3329 * Requested value, or 0, if *flags* are not recognized. 3330 * 3331 * long bpf_csum_level(struct sk_buff *skb, u64 level) 3332 * Description 3333 * Change the skbs checksum level by one layer up or down, or 3334 * reset it entirely to none in order to have the stack perform 3335 * checksum validation. The level is applicable to the following 3336 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of 3337 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | 3338 * through **bpf_skb_adjust_room**\ () helper with passing in 3339 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call 3340 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since 3341 * the UDP header is removed. Similarly, an encap of the latter 3342 * into the former could be accompanied by a helper call to 3343 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the 3344 * skb is still intended to be processed in higher layers of the 3345 * stack instead of just egressing at tc. 3346 * 3347 * There are three supported level settings at this time: 3348 * 3349 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs 3350 * with CHECKSUM_UNNECESSARY. 3351 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs 3352 * with CHECKSUM_UNNECESSARY. 3353 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and 3354 * sets CHECKSUM_NONE to force checksum validation by the stack. 3355 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current 3356 * skb->csum_level. 3357 * Return 3358 * 0 on success, or a negative error in case of failure. In the 3359 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level 3360 * is returned or the error code -EACCES in case the skb is not 3361 * subject to CHECKSUM_UNNECESSARY. 3362 * 3363 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) 3364 * Description 3365 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. 3366 * Return 3367 * *sk* if casting is valid, or **NULL** otherwise. 3368 * 3369 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) 3370 * Description 3371 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. 3372 * Return 3373 * *sk* if casting is valid, or **NULL** otherwise. 3374 * 3375 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) 3376 * Description 3377 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. 3378 * Return 3379 * *sk* if casting is valid, or **NULL** otherwise. 3380 * 3381 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) 3382 * Description 3383 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. 3384 * Return 3385 * *sk* if casting is valid, or **NULL** otherwise. 3386 * 3387 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) 3388 * Description 3389 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. 3390 * Return 3391 * *sk* if casting is valid, or **NULL** otherwise. 3392 * 3393 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) 3394 * Description 3395 * Return a user or a kernel stack in bpf program provided buffer. 3396 * To achieve this, the helper needs *task*, which is a valid 3397 * pointer to **struct task_struct**. To store the stacktrace, the 3398 * bpf program provides *buf* with a nonnegative *size*. 3399 * 3400 * The last argument, *flags*, holds the number of stack frames to 3401 * skip (from 0 to 255), masked with 3402 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 3403 * the following flags: 3404 * 3405 * **BPF_F_USER_STACK** 3406 * Collect a user space stack instead of a kernel stack. 3407 * **BPF_F_USER_BUILD_ID** 3408 * Collect buildid+offset instead of ips for user stack, 3409 * only valid if **BPF_F_USER_STACK** is also specified. 3410 * 3411 * **bpf_get_task_stack**\ () can collect up to 3412 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 3413 * to sufficient large buffer size. Note that 3414 * this limit can be controlled with the **sysctl** program, and 3415 * that it should be manually increased in order to profile long 3416 * user stacks (such as stacks for Java programs). To do so, use: 3417 * 3418 * :: 3419 * 3420 * # sysctl kernel.perf_event_max_stack=<new value> 3421 * Return 3422 * A non-negative value equal to or less than *size* on success, 3423 * or a negative error in case of failure. 3424 * 3425 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) 3426 * Description 3427 * Load header option. Support reading a particular TCP header 3428 * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). 3429 * 3430 * If *flags* is 0, it will search the option from the 3431 * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** 3432 * has details on what skb_data contains under different 3433 * *skops*\ **->op**. 3434 * 3435 * The first byte of the *searchby_res* specifies the 3436 * kind that it wants to search. 3437 * 3438 * If the searching kind is an experimental kind 3439 * (i.e. 253 or 254 according to RFC6994). It also 3440 * needs to specify the "magic" which is either 3441 * 2 bytes or 4 bytes. It then also needs to 3442 * specify the size of the magic by using 3443 * the 2nd byte which is "kind-length" of a TCP 3444 * header option and the "kind-length" also 3445 * includes the first 2 bytes "kind" and "kind-length" 3446 * itself as a normal TCP header option also does. 3447 * 3448 * For example, to search experimental kind 254 with 3449 * 2 byte magic 0xeB9F, the searchby_res should be 3450 * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. 3451 * 3452 * To search for the standard window scale option (3), 3453 * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. 3454 * Note, kind-length must be 0 for regular option. 3455 * 3456 * Searching for No-Op (0) and End-of-Option-List (1) are 3457 * not supported. 3458 * 3459 * *len* must be at least 2 bytes which is the minimal size 3460 * of a header option. 3461 * 3462 * Supported flags: 3463 * 3464 * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the 3465 * saved_syn packet or the just-received syn packet. 3466 * 3467 * Return 3468 * > 0 when found, the header option is copied to *searchby_res*. 3469 * The return value is the total length copied. On failure, a 3470 * negative error code is returned: 3471 * 3472 * **-EINVAL** if a parameter is invalid. 3473 * 3474 * **-ENOMSG** if the option is not found. 3475 * 3476 * **-ENOENT** if no syn packet is available when 3477 * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. 3478 * 3479 * **-ENOSPC** if there is not enough space. Only *len* number of 3480 * bytes are copied. 3481 * 3482 * **-EFAULT** on failure to parse the header options in the 3483 * packet. 3484 * 3485 * **-EPERM** if the helper cannot be used under the current 3486 * *skops*\ **->op**. 3487 * 3488 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) 3489 * Description 3490 * Store header option. The data will be copied 3491 * from buffer *from* with length *len* to the TCP header. 3492 * 3493 * The buffer *from* should have the whole option that 3494 * includes the kind, kind-length, and the actual 3495 * option data. The *len* must be at least kind-length 3496 * long. The kind-length does not have to be 4 byte 3497 * aligned. The kernel will take care of the padding 3498 * and setting the 4 bytes aligned value to th->doff. 3499 * 3500 * This helper will check for duplicated option 3501 * by searching the same option in the outgoing skb. 3502 * 3503 * This helper can only be called during 3504 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 3505 * 3506 * Return 3507 * 0 on success, or negative error in case of failure: 3508 * 3509 * **-EINVAL** If param is invalid. 3510 * 3511 * **-ENOSPC** if there is not enough space in the header. 3512 * Nothing has been written 3513 * 3514 * **-EEXIST** if the option already exists. 3515 * 3516 * **-EFAULT** on failrue to parse the existing header options. 3517 * 3518 * **-EPERM** if the helper cannot be used under the current 3519 * *skops*\ **->op**. 3520 * 3521 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) 3522 * Description 3523 * Reserve *len* bytes for the bpf header option. The 3524 * space will be used by **bpf_store_hdr_opt**\ () later in 3525 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 3526 * 3527 * If **bpf_reserve_hdr_opt**\ () is called multiple times, 3528 * the total number of bytes will be reserved. 3529 * 3530 * This helper can only be called during 3531 * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. 3532 * 3533 * Return 3534 * 0 on success, or negative error in case of failure: 3535 * 3536 * **-EINVAL** if a parameter is invalid. 3537 * 3538 * **-ENOSPC** if there is not enough space in the header. 3539 * 3540 * **-EPERM** if the helper cannot be used under the current 3541 * *skops*\ **->op**. 3542 * 3543 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) 3544 * Description 3545 * Get a bpf_local_storage from an *inode*. 3546 * 3547 * Logically, it could be thought of as getting the value from 3548 * a *map* with *inode* as the **key**. From this 3549 * perspective, the usage is not much different from 3550 * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this 3551 * helper enforces the key must be an inode and the map must also 3552 * be a **BPF_MAP_TYPE_INODE_STORAGE**. 3553 * 3554 * Underneath, the value is stored locally at *inode* instead of 3555 * the *map*. The *map* is used as the bpf-local-storage 3556 * "type". The bpf-local-storage "type" (i.e. the *map*) is 3557 * searched against all bpf_local_storage residing at *inode*. 3558 * 3559 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 3560 * used such that a new bpf_local_storage will be 3561 * created if one does not exist. *value* can be used 3562 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 3563 * the initial value of a bpf_local_storage. If *value* is 3564 * **NULL**, the new bpf_local_storage will be zero initialized. 3565 * Return 3566 * A bpf_local_storage pointer is returned on success. 3567 * 3568 * **NULL** if not found or there was an error in adding 3569 * a new bpf_local_storage. 3570 * 3571 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) 3572 * Description 3573 * Delete a bpf_local_storage from an *inode*. 3574 * Return 3575 * 0 on success. 3576 * 3577 * **-ENOENT** if the bpf_local_storage cannot be found. 3578 * 3579 * long bpf_d_path(struct path *path, char *buf, u32 sz) 3580 * Description 3581 * Return full path for given **struct path** object, which 3582 * needs to be the kernel BTF *path* object. The path is 3583 * returned in the provided buffer *buf* of size *sz* and 3584 * is zero terminated. 3585 * 3586 * Return 3587 * On success, the strictly positive length of the string, 3588 * including the trailing NUL character. On error, a negative 3589 * value. 3590 * 3591 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) 3592 * Description 3593 * Read *size* bytes from user space address *user_ptr* and store 3594 * the data in *dst*. This is a wrapper of **copy_from_user**\ (). 3595 * Return 3596 * 0 on success, or a negative error in case of failure. 3597 * 3598 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) 3599 * Description 3600 * Use BTF to store a string representation of *ptr*->ptr in *str*, 3601 * using *ptr*->type_id. This value should specify the type 3602 * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) 3603 * can be used to look up vmlinux BTF type ids. Traversing the 3604 * data structure using BTF, the type information and values are 3605 * stored in the first *str_size* - 1 bytes of *str*. Safe copy of 3606 * the pointer data is carried out to avoid kernel crashes during 3607 * operation. Smaller types can use string space on the stack; 3608 * larger programs can use map data to store the string 3609 * representation. 3610 * 3611 * The string can be subsequently shared with userspace via 3612 * bpf_perf_event_output() or ring buffer interfaces. 3613 * bpf_trace_printk() is to be avoided as it places too small 3614 * a limit on string size to be useful. 3615 * 3616 * *flags* is a combination of 3617 * 3618 * **BTF_F_COMPACT** 3619 * no formatting around type information 3620 * **BTF_F_NONAME** 3621 * no struct/union member names/types 3622 * **BTF_F_PTR_RAW** 3623 * show raw (unobfuscated) pointer values; 3624 * equivalent to printk specifier %px. 3625 * **BTF_F_ZERO** 3626 * show zero-valued struct/union members; they 3627 * are not displayed by default 3628 * 3629 * Return 3630 * The number of bytes that were written (or would have been 3631 * written if output had to be truncated due to string size), 3632 * or a negative error in cases of failure. 3633 * 3634 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) 3635 * Description 3636 * Use BTF to write to seq_write a string representation of 3637 * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). 3638 * *flags* are identical to those used for bpf_snprintf_btf. 3639 * Return 3640 * 0 on success or a negative error in case of failure. 3641 */ 3642 #define __BPF_FUNC_MAPPER(FN) \ 3643 FN(unspec), \ 3644 FN(map_lookup_elem), \ 3645 FN(map_update_elem), \ 3646 FN(map_delete_elem), \ 3647 FN(probe_read), \ 3648 FN(ktime_get_ns), \ 3649 FN(trace_printk), \ 3650 FN(get_prandom_u32), \ 3651 FN(get_smp_processor_id), \ 3652 FN(skb_store_bytes), \ 3653 FN(l3_csum_replace), \ 3654 FN(l4_csum_replace), \ 3655 FN(tail_call), \ 3656 FN(clone_redirect), \ 3657 FN(get_current_pid_tgid), \ 3658 FN(get_current_uid_gid), \ 3659 FN(get_current_comm), \ 3660 FN(get_cgroup_classid), \ 3661 FN(skb_vlan_push), \ 3662 FN(skb_vlan_pop), \ 3663 FN(skb_get_tunnel_key), \ 3664 FN(skb_set_tunnel_key), \ 3665 FN(perf_event_read), \ 3666 FN(redirect), \ 3667 FN(get_route_realm), \ 3668 FN(perf_event_output), \ 3669 FN(skb_load_bytes), \ 3670 FN(get_stackid), \ 3671 FN(csum_diff), \ 3672 FN(skb_get_tunnel_opt), \ 3673 FN(skb_set_tunnel_opt), \ 3674 FN(skb_change_proto), \ 3675 FN(skb_change_type), \ 3676 FN(skb_under_cgroup), \ 3677 FN(get_hash_recalc), \ 3678 FN(get_current_task), \ 3679 FN(probe_write_user), \ 3680 FN(current_task_under_cgroup), \ 3681 FN(skb_change_tail), \ 3682 FN(skb_pull_data), \ 3683 FN(csum_update), \ 3684 FN(set_hash_invalid), \ 3685 FN(get_numa_node_id), \ 3686 FN(skb_change_head), \ 3687 FN(xdp_adjust_head), \ 3688 FN(probe_read_str), \ 3689 FN(get_socket_cookie), \ 3690 FN(get_socket_uid), \ 3691 FN(set_hash), \ 3692 FN(setsockopt), \ 3693 FN(skb_adjust_room), \ 3694 FN(redirect_map), \ 3695 FN(sk_redirect_map), \ 3696 FN(sock_map_update), \ 3697 FN(xdp_adjust_meta), \ 3698 FN(perf_event_read_value), \ 3699 FN(perf_prog_read_value), \ 3700 FN(getsockopt), \ 3701 FN(override_return), \ 3702 FN(sock_ops_cb_flags_set), \ 3703 FN(msg_redirect_map), \ 3704 FN(msg_apply_bytes), \ 3705 FN(msg_cork_bytes), \ 3706 FN(msg_pull_data), \ 3707 FN(bind), \ 3708 FN(xdp_adjust_tail), \ 3709 FN(skb_get_xfrm_state), \ 3710 FN(get_stack), \ 3711 FN(skb_load_bytes_relative), \ 3712 FN(fib_lookup), \ 3713 FN(sock_hash_update), \ 3714 FN(msg_redirect_hash), \ 3715 FN(sk_redirect_hash), \ 3716 FN(lwt_push_encap), \ 3717 FN(lwt_seg6_store_bytes), \ 3718 FN(lwt_seg6_adjust_srh), \ 3719 FN(lwt_seg6_action), \ 3720 FN(rc_repeat), \ 3721 FN(rc_keydown), \ 3722 FN(skb_cgroup_id), \ 3723 FN(get_current_cgroup_id), \ 3724 FN(get_local_storage), \ 3725 FN(sk_select_reuseport), \ 3726 FN(skb_ancestor_cgroup_id), \ 3727 FN(sk_lookup_tcp), \ 3728 FN(sk_lookup_udp), \ 3729 FN(sk_release), \ 3730 FN(map_push_elem), \ 3731 FN(map_pop_elem), \ 3732 FN(map_peek_elem), \ 3733 FN(msg_push_data), \ 3734 FN(msg_pop_data), \ 3735 FN(rc_pointer_rel), \ 3736 FN(spin_lock), \ 3737 FN(spin_unlock), \ 3738 FN(sk_fullsock), \ 3739 FN(tcp_sock), \ 3740 FN(skb_ecn_set_ce), \ 3741 FN(get_listener_sock), \ 3742 FN(skc_lookup_tcp), \ 3743 FN(tcp_check_syncookie), \ 3744 FN(sysctl_get_name), \ 3745 FN(sysctl_get_current_value), \ 3746 FN(sysctl_get_new_value), \ 3747 FN(sysctl_set_new_value), \ 3748 FN(strtol), \ 3749 FN(strtoul), \ 3750 FN(sk_storage_get), \ 3751 FN(sk_storage_delete), \ 3752 FN(send_signal), \ 3753 FN(tcp_gen_syncookie), \ 3754 FN(skb_output), \ 3755 FN(probe_read_user), \ 3756 FN(probe_read_kernel), \ 3757 FN(probe_read_user_str), \ 3758 FN(probe_read_kernel_str), \ 3759 FN(tcp_send_ack), \ 3760 FN(send_signal_thread), \ 3761 FN(jiffies64), \ 3762 FN(read_branch_records), \ 3763 FN(get_ns_current_pid_tgid), \ 3764 FN(xdp_output), \ 3765 FN(get_netns_cookie), \ 3766 FN(get_current_ancestor_cgroup_id), \ 3767 FN(sk_assign), \ 3768 FN(ktime_get_boot_ns), \ 3769 FN(seq_printf), \ 3770 FN(seq_write), \ 3771 FN(sk_cgroup_id), \ 3772 FN(sk_ancestor_cgroup_id), \ 3773 FN(ringbuf_output), \ 3774 FN(ringbuf_reserve), \ 3775 FN(ringbuf_submit), \ 3776 FN(ringbuf_discard), \ 3777 FN(ringbuf_query), \ 3778 FN(csum_level), \ 3779 FN(skc_to_tcp6_sock), \ 3780 FN(skc_to_tcp_sock), \ 3781 FN(skc_to_tcp_timewait_sock), \ 3782 FN(skc_to_tcp_request_sock), \ 3783 FN(skc_to_udp6_sock), \ 3784 FN(get_task_stack), \ 3785 FN(load_hdr_opt), \ 3786 FN(store_hdr_opt), \ 3787 FN(reserve_hdr_opt), \ 3788 FN(inode_storage_get), \ 3789 FN(inode_storage_delete), \ 3790 FN(d_path), \ 3791 FN(copy_from_user), \ 3792 FN(snprintf_btf), \ 3793 FN(seq_printf_btf), \ 3794 /* */ 3795 3796 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 3797 * function eBPF program intends to call 3798 */ 3799 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 3800 enum bpf_func_id { 3801 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 3802 __BPF_FUNC_MAX_ID, 3803 }; 3804 #undef __BPF_ENUM_FN 3805 3806 /* All flags used by eBPF helper functions, placed here. */ 3807 3808 /* BPF_FUNC_skb_store_bytes flags. */ 3809 enum { 3810 BPF_F_RECOMPUTE_CSUM = (1ULL << 0), 3811 BPF_F_INVALIDATE_HASH = (1ULL << 1), 3812 }; 3813 3814 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 3815 * First 4 bits are for passing the header field size. 3816 */ 3817 enum { 3818 BPF_F_HDR_FIELD_MASK = 0xfULL, 3819 }; 3820 3821 /* BPF_FUNC_l4_csum_replace flags. */ 3822 enum { 3823 BPF_F_PSEUDO_HDR = (1ULL << 4), 3824 BPF_F_MARK_MANGLED_0 = (1ULL << 5), 3825 BPF_F_MARK_ENFORCE = (1ULL << 6), 3826 }; 3827 3828 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 3829 enum { 3830 BPF_F_INGRESS = (1ULL << 0), 3831 }; 3832 3833 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 3834 enum { 3835 BPF_F_TUNINFO_IPV6 = (1ULL << 0), 3836 }; 3837 3838 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 3839 enum { 3840 BPF_F_SKIP_FIELD_MASK = 0xffULL, 3841 BPF_F_USER_STACK = (1ULL << 8), 3842 /* flags used by BPF_FUNC_get_stackid only. */ 3843 BPF_F_FAST_STACK_CMP = (1ULL << 9), 3844 BPF_F_REUSE_STACKID = (1ULL << 10), 3845 /* flags used by BPF_FUNC_get_stack only. */ 3846 BPF_F_USER_BUILD_ID = (1ULL << 11), 3847 }; 3848 3849 /* BPF_FUNC_skb_set_tunnel_key flags. */ 3850 enum { 3851 BPF_F_ZERO_CSUM_TX = (1ULL << 1), 3852 BPF_F_DONT_FRAGMENT = (1ULL << 2), 3853 BPF_F_SEQ_NUMBER = (1ULL << 3), 3854 }; 3855 3856 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 3857 * BPF_FUNC_perf_event_read_value flags. 3858 */ 3859 enum { 3860 BPF_F_INDEX_MASK = 0xffffffffULL, 3861 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, 3862 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 3863 BPF_F_CTXLEN_MASK = (0xfffffULL << 32), 3864 }; 3865 3866 /* Current network namespace */ 3867 enum { 3868 BPF_F_CURRENT_NETNS = (-1L), 3869 }; 3870 3871 /* BPF_FUNC_csum_level level values. */ 3872 enum { 3873 BPF_CSUM_LEVEL_QUERY, 3874 BPF_CSUM_LEVEL_INC, 3875 BPF_CSUM_LEVEL_DEC, 3876 BPF_CSUM_LEVEL_RESET, 3877 }; 3878 3879 /* BPF_FUNC_skb_adjust_room flags. */ 3880 enum { 3881 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), 3882 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), 3883 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), 3884 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), 3885 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), 3886 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), 3887 }; 3888 3889 enum { 3890 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, 3891 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, 3892 }; 3893 3894 #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ 3895 BPF_ADJ_ROOM_ENCAP_L2_MASK) \ 3896 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) 3897 3898 /* BPF_FUNC_sysctl_get_name flags. */ 3899 enum { 3900 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), 3901 }; 3902 3903 /* BPF_FUNC_<kernel_obj>_storage_get flags */ 3904 enum { 3905 BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), 3906 /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility 3907 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. 3908 */ 3909 BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, 3910 }; 3911 3912 /* BPF_FUNC_read_branch_records flags. */ 3913 enum { 3914 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), 3915 }; 3916 3917 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and 3918 * BPF_FUNC_bpf_ringbuf_output flags. 3919 */ 3920 enum { 3921 BPF_RB_NO_WAKEUP = (1ULL << 0), 3922 BPF_RB_FORCE_WAKEUP = (1ULL << 1), 3923 }; 3924 3925 /* BPF_FUNC_bpf_ringbuf_query flags */ 3926 enum { 3927 BPF_RB_AVAIL_DATA = 0, 3928 BPF_RB_RING_SIZE = 1, 3929 BPF_RB_CONS_POS = 2, 3930 BPF_RB_PROD_POS = 3, 3931 }; 3932 3933 /* BPF ring buffer constants */ 3934 enum { 3935 BPF_RINGBUF_BUSY_BIT = (1U << 31), 3936 BPF_RINGBUF_DISCARD_BIT = (1U << 30), 3937 BPF_RINGBUF_HDR_SZ = 8, 3938 }; 3939 3940 /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ 3941 enum { 3942 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), 3943 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), 3944 }; 3945 3946 /* Mode for BPF_FUNC_skb_adjust_room helper. */ 3947 enum bpf_adj_room_mode { 3948 BPF_ADJ_ROOM_NET, 3949 BPF_ADJ_ROOM_MAC, 3950 }; 3951 3952 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 3953 enum bpf_hdr_start_off { 3954 BPF_HDR_START_MAC, 3955 BPF_HDR_START_NET, 3956 }; 3957 3958 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 3959 enum bpf_lwt_encap_mode { 3960 BPF_LWT_ENCAP_SEG6, 3961 BPF_LWT_ENCAP_SEG6_INLINE, 3962 BPF_LWT_ENCAP_IP, 3963 }; 3964 3965 #define __bpf_md_ptr(type, name) \ 3966 union { \ 3967 type name; \ 3968 __u64 :64; \ 3969 } __attribute__((aligned(8))) 3970 3971 /* user accessible mirror of in-kernel sk_buff. 3972 * new fields can only be added to the end of this structure 3973 */ 3974 struct __sk_buff { 3975 __u32 len; 3976 __u32 pkt_type; 3977 __u32 mark; 3978 __u32 queue_mapping; 3979 __u32 protocol; 3980 __u32 vlan_present; 3981 __u32 vlan_tci; 3982 __u32 vlan_proto; 3983 __u32 priority; 3984 __u32 ingress_ifindex; 3985 __u32 ifindex; 3986 __u32 tc_index; 3987 __u32 cb[5]; 3988 __u32 hash; 3989 __u32 tc_classid; 3990 __u32 data; 3991 __u32 data_end; 3992 __u32 napi_id; 3993 3994 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 3995 __u32 family; 3996 __u32 remote_ip4; /* Stored in network byte order */ 3997 __u32 local_ip4; /* Stored in network byte order */ 3998 __u32 remote_ip6[4]; /* Stored in network byte order */ 3999 __u32 local_ip6[4]; /* Stored in network byte order */ 4000 __u32 remote_port; /* Stored in network byte order */ 4001 __u32 local_port; /* stored in host byte order */ 4002 /* ... here. */ 4003 4004 __u32 data_meta; 4005 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); 4006 __u64 tstamp; 4007 __u32 wire_len; 4008 __u32 gso_segs; 4009 __bpf_md_ptr(struct bpf_sock *, sk); 4010 __u32 gso_size; 4011 }; 4012 4013 struct bpf_tunnel_key { 4014 __u32 tunnel_id; 4015 union { 4016 __u32 remote_ipv4; 4017 __u32 remote_ipv6[4]; 4018 }; 4019 __u8 tunnel_tos; 4020 __u8 tunnel_ttl; 4021 __u16 tunnel_ext; /* Padding, future use. */ 4022 __u32 tunnel_label; 4023 }; 4024 4025 /* user accessible mirror of in-kernel xfrm_state. 4026 * new fields can only be added to the end of this structure 4027 */ 4028 struct bpf_xfrm_state { 4029 __u32 reqid; 4030 __u32 spi; /* Stored in network byte order */ 4031 __u16 family; 4032 __u16 ext; /* Padding, future use. */ 4033 union { 4034 __u32 remote_ipv4; /* Stored in network byte order */ 4035 __u32 remote_ipv6[4]; /* Stored in network byte order */ 4036 }; 4037 }; 4038 4039 /* Generic BPF return codes which all BPF program types may support. 4040 * The values are binary compatible with their TC_ACT_* counter-part to 4041 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 4042 * programs. 4043 * 4044 * XDP is handled seprately, see XDP_*. 4045 */ 4046 enum bpf_ret_code { 4047 BPF_OK = 0, 4048 /* 1 reserved */ 4049 BPF_DROP = 2, 4050 /* 3-6 reserved */ 4051 BPF_REDIRECT = 7, 4052 /* >127 are reserved for prog type specific return codes. 4053 * 4054 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and 4055 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been 4056 * changed and should be routed based on its new L3 header. 4057 * (This is an L3 redirect, as opposed to L2 redirect 4058 * represented by BPF_REDIRECT above). 4059 */ 4060 BPF_LWT_REROUTE = 128, 4061 }; 4062 4063 struct bpf_sock { 4064 __u32 bound_dev_if; 4065 __u32 family; 4066 __u32 type; 4067 __u32 protocol; 4068 __u32 mark; 4069 __u32 priority; 4070 /* IP address also allows 1 and 2 bytes access */ 4071 __u32 src_ip4; 4072 __u32 src_ip6[4]; 4073 __u32 src_port; /* host byte order */ 4074 __u32 dst_port; /* network byte order */ 4075 __u32 dst_ip4; 4076 __u32 dst_ip6[4]; 4077 __u32 state; 4078 __s32 rx_queue_mapping; 4079 }; 4080 4081 struct bpf_tcp_sock { 4082 __u32 snd_cwnd; /* Sending congestion window */ 4083 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 4084 __u32 rtt_min; 4085 __u32 snd_ssthresh; /* Slow start size threshold */ 4086 __u32 rcv_nxt; /* What we want to receive next */ 4087 __u32 snd_nxt; /* Next sequence we send */ 4088 __u32 snd_una; /* First byte we want an ack for */ 4089 __u32 mss_cache; /* Cached effective mss, not including SACKS */ 4090 __u32 ecn_flags; /* ECN status bits. */ 4091 __u32 rate_delivered; /* saved rate sample: packets delivered */ 4092 __u32 rate_interval_us; /* saved rate sample: time elapsed */ 4093 __u32 packets_out; /* Packets which are "in flight" */ 4094 __u32 retrans_out; /* Retransmitted packets out */ 4095 __u32 total_retrans; /* Total retransmits for entire connection */ 4096 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 4097 * total number of segments in. 4098 */ 4099 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 4100 * total number of data segments in. 4101 */ 4102 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 4103 * The total number of segments sent. 4104 */ 4105 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 4106 * total number of data segments sent. 4107 */ 4108 __u32 lost_out; /* Lost packets */ 4109 __u32 sacked_out; /* SACK'd packets */ 4110 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived 4111 * sum(delta(rcv_nxt)), or how many bytes 4112 * were acked. 4113 */ 4114 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 4115 * sum(delta(snd_una)), or how many bytes 4116 * were acked. 4117 */ 4118 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 4119 * total number of DSACK blocks received 4120 */ 4121 __u32 delivered; /* Total data packets delivered incl. rexmits */ 4122 __u32 delivered_ce; /* Like the above but only ECE marked packets */ 4123 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ 4124 }; 4125 4126 struct bpf_sock_tuple { 4127 union { 4128 struct { 4129 __be32 saddr; 4130 __be32 daddr; 4131 __be16 sport; 4132 __be16 dport; 4133 } ipv4; 4134 struct { 4135 __be32 saddr[4]; 4136 __be32 daddr[4]; 4137 __be16 sport; 4138 __be16 dport; 4139 } ipv6; 4140 }; 4141 }; 4142 4143 struct bpf_xdp_sock { 4144 __u32 queue_id; 4145 }; 4146 4147 #define XDP_PACKET_HEADROOM 256 4148 4149 /* User return codes for XDP prog type. 4150 * A valid XDP program must return one of these defined values. All other 4151 * return codes are reserved for future use. Unknown return codes will 4152 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 4153 */ 4154 enum xdp_action { 4155 XDP_ABORTED = 0, 4156 XDP_DROP, 4157 XDP_PASS, 4158 XDP_TX, 4159 XDP_REDIRECT, 4160 }; 4161 4162 /* user accessible metadata for XDP packet hook 4163 * new fields must be added to the end of this structure 4164 */ 4165 struct xdp_md { 4166 __u32 data; 4167 __u32 data_end; 4168 __u32 data_meta; 4169 /* Below access go through struct xdp_rxq_info */ 4170 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 4171 __u32 rx_queue_index; /* rxq->queue_index */ 4172 4173 __u32 egress_ifindex; /* txq->dev->ifindex */ 4174 }; 4175 4176 /* DEVMAP map-value layout 4177 * 4178 * The struct data-layout of map-value is a configuration interface. 4179 * New members can only be added to the end of this structure. 4180 */ 4181 struct bpf_devmap_val { 4182 __u32 ifindex; /* device index */ 4183 union { 4184 int fd; /* prog fd on map write */ 4185 __u32 id; /* prog id on map read */ 4186 } bpf_prog; 4187 }; 4188 4189 /* CPUMAP map-value layout 4190 * 4191 * The struct data-layout of map-value is a configuration interface. 4192 * New members can only be added to the end of this structure. 4193 */ 4194 struct bpf_cpumap_val { 4195 __u32 qsize; /* queue size to remote target CPU */ 4196 union { 4197 int fd; /* prog fd on map write */ 4198 __u32 id; /* prog id on map read */ 4199 } bpf_prog; 4200 }; 4201 4202 enum sk_action { 4203 SK_DROP = 0, 4204 SK_PASS, 4205 }; 4206 4207 /* user accessible metadata for SK_MSG packet hook, new fields must 4208 * be added to the end of this structure 4209 */ 4210 struct sk_msg_md { 4211 __bpf_md_ptr(void *, data); 4212 __bpf_md_ptr(void *, data_end); 4213 4214 __u32 family; 4215 __u32 remote_ip4; /* Stored in network byte order */ 4216 __u32 local_ip4; /* Stored in network byte order */ 4217 __u32 remote_ip6[4]; /* Stored in network byte order */ 4218 __u32 local_ip6[4]; /* Stored in network byte order */ 4219 __u32 remote_port; /* Stored in network byte order */ 4220 __u32 local_port; /* stored in host byte order */ 4221 __u32 size; /* Total size of sk_msg */ 4222 4223 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ 4224 }; 4225 4226 struct sk_reuseport_md { 4227 /* 4228 * Start of directly accessible data. It begins from 4229 * the tcp/udp header. 4230 */ 4231 __bpf_md_ptr(void *, data); 4232 /* End of directly accessible data */ 4233 __bpf_md_ptr(void *, data_end); 4234 /* 4235 * Total length of packet (starting from the tcp/udp header). 4236 * Note that the directly accessible bytes (data_end - data) 4237 * could be less than this "len". Those bytes could be 4238 * indirectly read by a helper "bpf_skb_load_bytes()". 4239 */ 4240 __u32 len; 4241 /* 4242 * Eth protocol in the mac header (network byte order). e.g. 4243 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) 4244 */ 4245 __u32 eth_protocol; 4246 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ 4247 __u32 bind_inany; /* Is sock bound to an INANY address? */ 4248 __u32 hash; /* A hash of the packet 4 tuples */ 4249 }; 4250 4251 #define BPF_TAG_SIZE 8 4252 4253 struct bpf_prog_info { 4254 __u32 type; 4255 __u32 id; 4256 __u8 tag[BPF_TAG_SIZE]; 4257 __u32 jited_prog_len; 4258 __u32 xlated_prog_len; 4259 __aligned_u64 jited_prog_insns; 4260 __aligned_u64 xlated_prog_insns; 4261 __u64 load_time; /* ns since boottime */ 4262 __u32 created_by_uid; 4263 __u32 nr_map_ids; 4264 __aligned_u64 map_ids; 4265 char name[BPF_OBJ_NAME_LEN]; 4266 __u32 ifindex; 4267 __u32 gpl_compatible:1; 4268 __u32 :31; /* alignment pad */ 4269 __u64 netns_dev; 4270 __u64 netns_ino; 4271 __u32 nr_jited_ksyms; 4272 __u32 nr_jited_func_lens; 4273 __aligned_u64 jited_ksyms; 4274 __aligned_u64 jited_func_lens; 4275 __u32 btf_id; 4276 __u32 func_info_rec_size; 4277 __aligned_u64 func_info; 4278 __u32 nr_func_info; 4279 __u32 nr_line_info; 4280 __aligned_u64 line_info; 4281 __aligned_u64 jited_line_info; 4282 __u32 nr_jited_line_info; 4283 __u32 line_info_rec_size; 4284 __u32 jited_line_info_rec_size; 4285 __u32 nr_prog_tags; 4286 __aligned_u64 prog_tags; 4287 __u64 run_time_ns; 4288 __u64 run_cnt; 4289 } __attribute__((aligned(8))); 4290 4291 struct bpf_map_info { 4292 __u32 type; 4293 __u32 id; 4294 __u32 key_size; 4295 __u32 value_size; 4296 __u32 max_entries; 4297 __u32 map_flags; 4298 char name[BPF_OBJ_NAME_LEN]; 4299 __u32 ifindex; 4300 __u32 btf_vmlinux_value_type_id; 4301 __u64 netns_dev; 4302 __u64 netns_ino; 4303 __u32 btf_id; 4304 __u32 btf_key_type_id; 4305 __u32 btf_value_type_id; 4306 } __attribute__((aligned(8))); 4307 4308 struct bpf_btf_info { 4309 __aligned_u64 btf; 4310 __u32 btf_size; 4311 __u32 id; 4312 } __attribute__((aligned(8))); 4313 4314 struct bpf_link_info { 4315 __u32 type; 4316 __u32 id; 4317 __u32 prog_id; 4318 union { 4319 struct { 4320 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ 4321 __u32 tp_name_len; /* in/out: tp_name buffer len */ 4322 } raw_tracepoint; 4323 struct { 4324 __u32 attach_type; 4325 } tracing; 4326 struct { 4327 __u64 cgroup_id; 4328 __u32 attach_type; 4329 } cgroup; 4330 struct { 4331 __aligned_u64 target_name; /* in/out: target_name buffer ptr */ 4332 __u32 target_name_len; /* in/out: target_name buffer len */ 4333 union { 4334 struct { 4335 __u32 map_id; 4336 } map; 4337 }; 4338 } iter; 4339 struct { 4340 __u32 netns_ino; 4341 __u32 attach_type; 4342 } netns; 4343 struct { 4344 __u32 ifindex; 4345 } xdp; 4346 }; 4347 } __attribute__((aligned(8))); 4348 4349 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 4350 * by user and intended to be used by socket (e.g. to bind to, depends on 4351 * attach type). 4352 */ 4353 struct bpf_sock_addr { 4354 __u32 user_family; /* Allows 4-byte read, but no write. */ 4355 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 4356 * Stored in network byte order. 4357 */ 4358 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 4359 * Stored in network byte order. 4360 */ 4361 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. 4362 * Stored in network byte order 4363 */ 4364 __u32 family; /* Allows 4-byte read, but no write */ 4365 __u32 type; /* Allows 4-byte read, but no write */ 4366 __u32 protocol; /* Allows 4-byte read, but no write */ 4367 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. 4368 * Stored in network byte order. 4369 */ 4370 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 4371 * Stored in network byte order. 4372 */ 4373 __bpf_md_ptr(struct bpf_sock *, sk); 4374 }; 4375 4376 /* User bpf_sock_ops struct to access socket values and specify request ops 4377 * and their replies. 4378 * Some of this fields are in network (bigendian) byte order and may need 4379 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 4380 * New fields can only be added at the end of this structure 4381 */ 4382 struct bpf_sock_ops { 4383 __u32 op; 4384 union { 4385 __u32 args[4]; /* Optionally passed to bpf program */ 4386 __u32 reply; /* Returned by bpf program */ 4387 __u32 replylong[4]; /* Optionally returned by bpf prog */ 4388 }; 4389 __u32 family; 4390 __u32 remote_ip4; /* Stored in network byte order */ 4391 __u32 local_ip4; /* Stored in network byte order */ 4392 __u32 remote_ip6[4]; /* Stored in network byte order */ 4393 __u32 local_ip6[4]; /* Stored in network byte order */ 4394 __u32 remote_port; /* Stored in network byte order */ 4395 __u32 local_port; /* stored in host byte order */ 4396 __u32 is_fullsock; /* Some TCP fields are only valid if 4397 * there is a full socket. If not, the 4398 * fields read as zero. 4399 */ 4400 __u32 snd_cwnd; 4401 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 4402 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 4403 __u32 state; 4404 __u32 rtt_min; 4405 __u32 snd_ssthresh; 4406 __u32 rcv_nxt; 4407 __u32 snd_nxt; 4408 __u32 snd_una; 4409 __u32 mss_cache; 4410 __u32 ecn_flags; 4411 __u32 rate_delivered; 4412 __u32 rate_interval_us; 4413 __u32 packets_out; 4414 __u32 retrans_out; 4415 __u32 total_retrans; 4416 __u32 segs_in; 4417 __u32 data_segs_in; 4418 __u32 segs_out; 4419 __u32 data_segs_out; 4420 __u32 lost_out; 4421 __u32 sacked_out; 4422 __u32 sk_txhash; 4423 __u64 bytes_received; 4424 __u64 bytes_acked; 4425 __bpf_md_ptr(struct bpf_sock *, sk); 4426 /* [skb_data, skb_data_end) covers the whole TCP header. 4427 * 4428 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received 4429 * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the 4430 * header has not been written. 4431 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have 4432 * been written so far. 4433 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes 4434 * the 3WHS. 4435 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes 4436 * the 3WHS. 4437 * 4438 * bpf_load_hdr_opt() can also be used to read a particular option. 4439 */ 4440 __bpf_md_ptr(void *, skb_data); 4441 __bpf_md_ptr(void *, skb_data_end); 4442 __u32 skb_len; /* The total length of a packet. 4443 * It includes the header, options, 4444 * and payload. 4445 */ 4446 __u32 skb_tcp_flags; /* tcp_flags of the header. It provides 4447 * an easy way to check for tcp_flags 4448 * without parsing skb_data. 4449 * 4450 * In particular, the skb_tcp_flags 4451 * will still be available in 4452 * BPF_SOCK_OPS_HDR_OPT_LEN even though 4453 * the outgoing header has not 4454 * been written yet. 4455 */ 4456 }; 4457 4458 /* Definitions for bpf_sock_ops_cb_flags */ 4459 enum { 4460 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), 4461 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), 4462 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), 4463 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), 4464 /* Call bpf for all received TCP headers. The bpf prog will be 4465 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB 4466 * 4467 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 4468 * for the header option related helpers that will be useful 4469 * to the bpf programs. 4470 * 4471 * It could be used at the client/active side (i.e. connect() side) 4472 * when the server told it that the server was in syncookie 4473 * mode and required the active side to resend the bpf-written 4474 * options. The active side can keep writing the bpf-options until 4475 * it received a valid packet from the server side to confirm 4476 * the earlier packet (and options) has been received. The later 4477 * example patch is using it like this at the active side when the 4478 * server is in syncookie mode. 4479 * 4480 * The bpf prog will usually turn this off in the common cases. 4481 */ 4482 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), 4483 /* Call bpf when kernel has received a header option that 4484 * the kernel cannot handle. The bpf prog will be called under 4485 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. 4486 * 4487 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 4488 * for the header option related helpers that will be useful 4489 * to the bpf programs. 4490 */ 4491 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), 4492 /* Call bpf when the kernel is writing header options for the 4493 * outgoing packet. The bpf prog will first be called 4494 * to reserve space in a skb under 4495 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then 4496 * the bpf prog will be called to write the header option(s) 4497 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 4498 * 4499 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB 4500 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option 4501 * related helpers that will be useful to the bpf programs. 4502 * 4503 * The kernel gets its chance to reserve space and write 4504 * options first before the BPF program does. 4505 */ 4506 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), 4507 /* Mask of all currently supported cb flags */ 4508 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, 4509 }; 4510 4511 /* List of known BPF sock_ops operators. 4512 * New entries can only be added at the end 4513 */ 4514 enum { 4515 BPF_SOCK_OPS_VOID, 4516 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 4517 * -1 if default value should be used 4518 */ 4519 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 4520 * window (in packets) or -1 if default 4521 * value should be used 4522 */ 4523 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 4524 * active connection is initialized 4525 */ 4526 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 4527 * active connection is 4528 * established 4529 */ 4530 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 4531 * passive connection is 4532 * established 4533 */ 4534 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 4535 * needs ECN 4536 */ 4537 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 4538 * based on the path and may be 4539 * dependent on the congestion control 4540 * algorithm. In general it indicates 4541 * a congestion threshold. RTTs above 4542 * this indicate congestion 4543 */ 4544 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 4545 * Arg1: value of icsk_retransmits 4546 * Arg2: value of icsk_rto 4547 * Arg3: whether RTO has expired 4548 */ 4549 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 4550 * Arg1: sequence number of 1st byte 4551 * Arg2: # segments 4552 * Arg3: return value of 4553 * tcp_transmit_skb (0 => success) 4554 */ 4555 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 4556 * Arg1: old_state 4557 * Arg2: new_state 4558 */ 4559 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after 4560 * socket transition to LISTEN state. 4561 */ 4562 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. 4563 */ 4564 BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. 4565 * It will be called to handle 4566 * the packets received at 4567 * an already established 4568 * connection. 4569 * 4570 * sock_ops->skb_data: 4571 * Referring to the received skb. 4572 * It covers the TCP header only. 4573 * 4574 * bpf_load_hdr_opt() can also 4575 * be used to search for a 4576 * particular option. 4577 */ 4578 BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the 4579 * header option later in 4580 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 4581 * Arg1: bool want_cookie. (in 4582 * writing SYNACK only) 4583 * 4584 * sock_ops->skb_data: 4585 * Not available because no header has 4586 * been written yet. 4587 * 4588 * sock_ops->skb_tcp_flags: 4589 * The tcp_flags of the 4590 * outgoing skb. (e.g. SYN, ACK, FIN). 4591 * 4592 * bpf_reserve_hdr_opt() should 4593 * be used to reserve space. 4594 */ 4595 BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options 4596 * Arg1: bool want_cookie. (in 4597 * writing SYNACK only) 4598 * 4599 * sock_ops->skb_data: 4600 * Referring to the outgoing skb. 4601 * It covers the TCP header 4602 * that has already been written 4603 * by the kernel and the 4604 * earlier bpf-progs. 4605 * 4606 * sock_ops->skb_tcp_flags: 4607 * The tcp_flags of the outgoing 4608 * skb. (e.g. SYN, ACK, FIN). 4609 * 4610 * bpf_store_hdr_opt() should 4611 * be used to write the 4612 * option. 4613 * 4614 * bpf_load_hdr_opt() can also 4615 * be used to search for a 4616 * particular option that 4617 * has already been written 4618 * by the kernel or the 4619 * earlier bpf-progs. 4620 */ 4621 }; 4622 4623 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 4624 * changes between the TCP and BPF versions. Ideally this should never happen. 4625 * If it does, we need to add code to convert them before calling 4626 * the BPF sock_ops function. 4627 */ 4628 enum { 4629 BPF_TCP_ESTABLISHED = 1, 4630 BPF_TCP_SYN_SENT, 4631 BPF_TCP_SYN_RECV, 4632 BPF_TCP_FIN_WAIT1, 4633 BPF_TCP_FIN_WAIT2, 4634 BPF_TCP_TIME_WAIT, 4635 BPF_TCP_CLOSE, 4636 BPF_TCP_CLOSE_WAIT, 4637 BPF_TCP_LAST_ACK, 4638 BPF_TCP_LISTEN, 4639 BPF_TCP_CLOSING, /* Now a valid state */ 4640 BPF_TCP_NEW_SYN_RECV, 4641 4642 BPF_TCP_MAX_STATES /* Leave at the end! */ 4643 }; 4644 4645 enum { 4646 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ 4647 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ 4648 TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ 4649 TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ 4650 /* Copy the SYN pkt to optval 4651 * 4652 * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the 4653 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit 4654 * to only getting from the saved_syn. It can either get the 4655 * syn packet from: 4656 * 4657 * 1. the just-received SYN packet (only available when writing the 4658 * SYNACK). It will be useful when it is not necessary to 4659 * save the SYN packet for latter use. It is also the only way 4660 * to get the SYN during syncookie mode because the syn 4661 * packet cannot be saved during syncookie. 4662 * 4663 * OR 4664 * 4665 * 2. the earlier saved syn which was done by 4666 * bpf_setsockopt(TCP_SAVE_SYN). 4667 * 4668 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the 4669 * SYN packet is obtained. 4670 * 4671 * If the bpf-prog does not need the IP[46] header, the 4672 * bpf-prog can avoid parsing the IP header by using 4673 * TCP_BPF_SYN. Otherwise, the bpf-prog can get both 4674 * IP[46] and TCP header by using TCP_BPF_SYN_IP. 4675 * 4676 * >0: Total number of bytes copied 4677 * -ENOSPC: Not enough space in optval. Only optlen number of 4678 * bytes is copied. 4679 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt 4680 * is not saved by setsockopt(TCP_SAVE_SYN). 4681 */ 4682 TCP_BPF_SYN = 1005, /* Copy the TCP header */ 4683 TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ 4684 TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ 4685 }; 4686 4687 enum { 4688 BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), 4689 }; 4690 4691 /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and 4692 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 4693 */ 4694 enum { 4695 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the 4696 * total option spaces 4697 * required for an established 4698 * sk in order to calculate the 4699 * MSS. No skb is actually 4700 * sent. 4701 */ 4702 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode 4703 * when sending a SYN. 4704 */ 4705 }; 4706 4707 struct bpf_perf_event_value { 4708 __u64 counter; 4709 __u64 enabled; 4710 __u64 running; 4711 }; 4712 4713 enum { 4714 BPF_DEVCG_ACC_MKNOD = (1ULL << 0), 4715 BPF_DEVCG_ACC_READ = (1ULL << 1), 4716 BPF_DEVCG_ACC_WRITE = (1ULL << 2), 4717 }; 4718 4719 enum { 4720 BPF_DEVCG_DEV_BLOCK = (1ULL << 0), 4721 BPF_DEVCG_DEV_CHAR = (1ULL << 1), 4722 }; 4723 4724 struct bpf_cgroup_dev_ctx { 4725 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 4726 __u32 access_type; 4727 __u32 major; 4728 __u32 minor; 4729 }; 4730 4731 struct bpf_raw_tracepoint_args { 4732 __u64 args[0]; 4733 }; 4734 4735 /* DIRECT: Skip the FIB rules and go to FIB table associated with device 4736 * OUTPUT: Do lookup from egress perspective; default is ingress 4737 */ 4738 enum { 4739 BPF_FIB_LOOKUP_DIRECT = (1U << 0), 4740 BPF_FIB_LOOKUP_OUTPUT = (1U << 1), 4741 }; 4742 4743 enum { 4744 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 4745 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 4746 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 4747 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 4748 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 4749 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 4750 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 4751 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 4752 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 4753 }; 4754 4755 struct bpf_fib_lookup { 4756 /* input: network family for lookup (AF_INET, AF_INET6) 4757 * output: network family of egress nexthop 4758 */ 4759 __u8 family; 4760 4761 /* set if lookup is to consider L4 data - e.g., FIB rules */ 4762 __u8 l4_protocol; 4763 __be16 sport; 4764 __be16 dport; 4765 4766 /* total length of packet from network header - used for MTU check */ 4767 __u16 tot_len; 4768 4769 /* input: L3 device index for lookup 4770 * output: device index from FIB lookup 4771 */ 4772 __u32 ifindex; 4773 4774 union { 4775 /* inputs to lookup */ 4776 __u8 tos; /* AF_INET */ 4777 __be32 flowinfo; /* AF_INET6, flow_label + priority */ 4778 4779 /* output: metric of fib result (IPv4/IPv6 only) */ 4780 __u32 rt_metric; 4781 }; 4782 4783 union { 4784 __be32 ipv4_src; 4785 __u32 ipv6_src[4]; /* in6_addr; network order */ 4786 }; 4787 4788 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 4789 * network header. output: bpf_fib_lookup sets to gateway address 4790 * if FIB lookup returns gateway route 4791 */ 4792 union { 4793 __be32 ipv4_dst; 4794 __u32 ipv6_dst[4]; /* in6_addr; network order */ 4795 }; 4796 4797 /* output */ 4798 __be16 h_vlan_proto; 4799 __be16 h_vlan_TCI; 4800 __u8 smac[6]; /* ETH_ALEN */ 4801 __u8 dmac[6]; /* ETH_ALEN */ 4802 }; 4803 4804 enum bpf_task_fd_type { 4805 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 4806 BPF_FD_TYPE_TRACEPOINT, /* tp name */ 4807 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 4808 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 4809 BPF_FD_TYPE_UPROBE, /* filename + offset */ 4810 BPF_FD_TYPE_URETPROBE, /* filename + offset */ 4811 }; 4812 4813 enum { 4814 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), 4815 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), 4816 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), 4817 }; 4818 4819 struct bpf_flow_keys { 4820 __u16 nhoff; 4821 __u16 thoff; 4822 __u16 addr_proto; /* ETH_P_* of valid addrs */ 4823 __u8 is_frag; 4824 __u8 is_first_frag; 4825 __u8 is_encap; 4826 __u8 ip_proto; 4827 __be16 n_proto; 4828 __be16 sport; 4829 __be16 dport; 4830 union { 4831 struct { 4832 __be32 ipv4_src; 4833 __be32 ipv4_dst; 4834 }; 4835 struct { 4836 __u32 ipv6_src[4]; /* in6_addr; network order */ 4837 __u32 ipv6_dst[4]; /* in6_addr; network order */ 4838 }; 4839 }; 4840 __u32 flags; 4841 __be32 flow_label; 4842 }; 4843 4844 struct bpf_func_info { 4845 __u32 insn_off; 4846 __u32 type_id; 4847 }; 4848 4849 #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) 4850 #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 4851 4852 struct bpf_line_info { 4853 __u32 insn_off; 4854 __u32 file_name_off; 4855 __u32 line_off; 4856 __u32 line_col; 4857 }; 4858 4859 struct bpf_spin_lock { 4860 __u32 val; 4861 }; 4862 4863 struct bpf_sysctl { 4864 __u32 write; /* Sysctl is being read (= 0) or written (= 1). 4865 * Allows 1,2,4-byte read, but no write. 4866 */ 4867 __u32 file_pos; /* Sysctl file position to read from, write to. 4868 * Allows 1,2,4-byte read an 4-byte write. 4869 */ 4870 }; 4871 4872 struct bpf_sockopt { 4873 __bpf_md_ptr(struct bpf_sock *, sk); 4874 __bpf_md_ptr(void *, optval); 4875 __bpf_md_ptr(void *, optval_end); 4876 4877 __s32 level; 4878 __s32 optname; 4879 __s32 optlen; 4880 __s32 retval; 4881 }; 4882 4883 struct bpf_pidns_info { 4884 __u32 pid; 4885 __u32 tgid; 4886 }; 4887 4888 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ 4889 struct bpf_sk_lookup { 4890 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ 4891 4892 __u32 family; /* Protocol family (AF_INET, AF_INET6) */ 4893 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ 4894 __u32 remote_ip4; /* Network byte order */ 4895 __u32 remote_ip6[4]; /* Network byte order */ 4896 __u32 remote_port; /* Network byte order */ 4897 __u32 local_ip4; /* Network byte order */ 4898 __u32 local_ip6[4]; /* Network byte order */ 4899 __u32 local_port; /* Host byte order */ 4900 }; 4901 4902 /* 4903 * struct btf_ptr is used for typed pointer representation; the 4904 * type id is used to render the pointer data as the appropriate type 4905 * via the bpf_snprintf_btf() helper described above. A flags field - 4906 * potentially to specify additional details about the BTF pointer 4907 * (rather than its mode of display) - is included for future use. 4908 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. 4909 */ 4910 struct btf_ptr { 4911 void *ptr; 4912 __u32 type_id; 4913 __u32 flags; /* BTF ptr flags; unused at present. */ 4914 }; 4915 4916 /* 4917 * Flags to control bpf_snprintf_btf() behaviour. 4918 * - BTF_F_COMPACT: no formatting around type information 4919 * - BTF_F_NONAME: no struct/union member names/types 4920 * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; 4921 * equivalent to %px. 4922 * - BTF_F_ZERO: show zero-valued struct/union members; they 4923 * are not displayed by default 4924 */ 4925 enum { 4926 BTF_F_COMPACT = (1ULL << 0), 4927 BTF_F_NONAME = (1ULL << 1), 4928 BTF_F_PTR_RAW = (1ULL << 2), 4929 BTF_F_ZERO = (1ULL << 3), 4930 }; 4931 4932 #endif /* _UAPI__LINUX_BPF_H__ */ 4933