1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8 #ifndef _UAPI__LINUX_BPF_H__ 9 #define _UAPI__LINUX_BPF_H__ 10 11 #include <linux/types.h> 12 #include <linux/bpf_common.h> 13 14 /* Extended instruction set based on top of classic BPF */ 15 16 /* instruction classes */ 17 #define BPF_ALU64 0x07 /* alu mode in double word width */ 18 19 /* ld/ldx fields */ 20 #define BPF_DW 0x18 /* double word (64-bit) */ 21 #define BPF_XADD 0xc0 /* exclusive add */ 22 23 /* alu/jmp fields */ 24 #define BPF_MOV 0xb0 /* mov reg to reg */ 25 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 26 27 /* change endianness of a register */ 28 #define BPF_END 0xd0 /* flags for endianness conversion: */ 29 #define BPF_TO_LE 0x00 /* convert to little-endian */ 30 #define BPF_TO_BE 0x08 /* convert to big-endian */ 31 #define BPF_FROM_LE BPF_TO_LE 32 #define BPF_FROM_BE BPF_TO_BE 33 34 /* jmp encodings */ 35 #define BPF_JNE 0x50 /* jump != */ 36 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 37 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 38 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 39 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 40 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 41 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 42 #define BPF_CALL 0x80 /* function call */ 43 #define BPF_EXIT 0x90 /* function return */ 44 45 /* Register numbers */ 46 enum { 47 BPF_REG_0 = 0, 48 BPF_REG_1, 49 BPF_REG_2, 50 BPF_REG_3, 51 BPF_REG_4, 52 BPF_REG_5, 53 BPF_REG_6, 54 BPF_REG_7, 55 BPF_REG_8, 56 BPF_REG_9, 57 BPF_REG_10, 58 __MAX_BPF_REG, 59 }; 60 61 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 62 #define MAX_BPF_REG __MAX_BPF_REG 63 64 struct bpf_insn { 65 __u8 code; /* opcode */ 66 __u8 dst_reg:4; /* dest register */ 67 __u8 src_reg:4; /* source register */ 68 __s16 off; /* signed offset */ 69 __s32 imm; /* signed immediate constant */ 70 }; 71 72 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 73 struct bpf_lpm_trie_key { 74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 75 __u8 data[0]; /* Arbitrary size */ 76 }; 77 78 /* BPF syscall commands, see bpf(2) man-page for details. */ 79 enum bpf_cmd { 80 BPF_MAP_CREATE, 81 BPF_MAP_LOOKUP_ELEM, 82 BPF_MAP_UPDATE_ELEM, 83 BPF_MAP_DELETE_ELEM, 84 BPF_MAP_GET_NEXT_KEY, 85 BPF_PROG_LOAD, 86 BPF_OBJ_PIN, 87 BPF_OBJ_GET, 88 BPF_PROG_ATTACH, 89 BPF_PROG_DETACH, 90 BPF_PROG_TEST_RUN, 91 BPF_PROG_GET_NEXT_ID, 92 BPF_MAP_GET_NEXT_ID, 93 BPF_PROG_GET_FD_BY_ID, 94 BPF_MAP_GET_FD_BY_ID, 95 BPF_OBJ_GET_INFO_BY_FD, 96 BPF_PROG_QUERY, 97 BPF_RAW_TRACEPOINT_OPEN, 98 BPF_BTF_LOAD, 99 }; 100 101 enum bpf_map_type { 102 BPF_MAP_TYPE_UNSPEC, 103 BPF_MAP_TYPE_HASH, 104 BPF_MAP_TYPE_ARRAY, 105 BPF_MAP_TYPE_PROG_ARRAY, 106 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 107 BPF_MAP_TYPE_PERCPU_HASH, 108 BPF_MAP_TYPE_PERCPU_ARRAY, 109 BPF_MAP_TYPE_STACK_TRACE, 110 BPF_MAP_TYPE_CGROUP_ARRAY, 111 BPF_MAP_TYPE_LRU_HASH, 112 BPF_MAP_TYPE_LRU_PERCPU_HASH, 113 BPF_MAP_TYPE_LPM_TRIE, 114 BPF_MAP_TYPE_ARRAY_OF_MAPS, 115 BPF_MAP_TYPE_HASH_OF_MAPS, 116 BPF_MAP_TYPE_DEVMAP, 117 BPF_MAP_TYPE_SOCKMAP, 118 BPF_MAP_TYPE_CPUMAP, 119 }; 120 121 enum bpf_prog_type { 122 BPF_PROG_TYPE_UNSPEC, 123 BPF_PROG_TYPE_SOCKET_FILTER, 124 BPF_PROG_TYPE_KPROBE, 125 BPF_PROG_TYPE_SCHED_CLS, 126 BPF_PROG_TYPE_SCHED_ACT, 127 BPF_PROG_TYPE_TRACEPOINT, 128 BPF_PROG_TYPE_XDP, 129 BPF_PROG_TYPE_PERF_EVENT, 130 BPF_PROG_TYPE_CGROUP_SKB, 131 BPF_PROG_TYPE_CGROUP_SOCK, 132 BPF_PROG_TYPE_LWT_IN, 133 BPF_PROG_TYPE_LWT_OUT, 134 BPF_PROG_TYPE_LWT_XMIT, 135 BPF_PROG_TYPE_SOCK_OPS, 136 BPF_PROG_TYPE_SK_SKB, 137 BPF_PROG_TYPE_CGROUP_DEVICE, 138 BPF_PROG_TYPE_SK_MSG, 139 BPF_PROG_TYPE_RAW_TRACEPOINT, 140 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 141 }; 142 143 enum bpf_attach_type { 144 BPF_CGROUP_INET_INGRESS, 145 BPF_CGROUP_INET_EGRESS, 146 BPF_CGROUP_INET_SOCK_CREATE, 147 BPF_CGROUP_SOCK_OPS, 148 BPF_SK_SKB_STREAM_PARSER, 149 BPF_SK_SKB_STREAM_VERDICT, 150 BPF_CGROUP_DEVICE, 151 BPF_SK_MSG_VERDICT, 152 BPF_CGROUP_INET4_BIND, 153 BPF_CGROUP_INET6_BIND, 154 BPF_CGROUP_INET4_CONNECT, 155 BPF_CGROUP_INET6_CONNECT, 156 BPF_CGROUP_INET4_POST_BIND, 157 BPF_CGROUP_INET6_POST_BIND, 158 __MAX_BPF_ATTACH_TYPE 159 }; 160 161 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 162 163 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 164 * 165 * NONE(default): No further bpf programs allowed in the subtree. 166 * 167 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 168 * the program in this cgroup yields to sub-cgroup program. 169 * 170 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 171 * that cgroup program gets run in addition to the program in this cgroup. 172 * 173 * Only one program is allowed to be attached to a cgroup with 174 * NONE or BPF_F_ALLOW_OVERRIDE flag. 175 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 176 * release old program and attach the new one. Attach flags has to match. 177 * 178 * Multiple programs are allowed to be attached to a cgroup with 179 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 180 * (those that were attached first, run first) 181 * The programs of sub-cgroup are executed first, then programs of 182 * this cgroup and then programs of parent cgroup. 183 * When children program makes decision (like picking TCP CA or sock bind) 184 * parent program has a chance to override it. 185 * 186 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 187 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 188 * Ex1: 189 * cgrp1 (MULTI progs A, B) -> 190 * cgrp2 (OVERRIDE prog C) -> 191 * cgrp3 (MULTI prog D) -> 192 * cgrp4 (OVERRIDE prog E) -> 193 * cgrp5 (NONE prog F) 194 * the event in cgrp5 triggers execution of F,D,A,B in that order. 195 * if prog F is detached, the execution is E,D,A,B 196 * if prog F and D are detached, the execution is E,A,B 197 * if prog F, E and D are detached, the execution is C,A,B 198 * 199 * All eligible programs are executed regardless of return code from 200 * earlier programs. 201 */ 202 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 203 #define BPF_F_ALLOW_MULTI (1U << 1) 204 205 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 206 * verifier will perform strict alignment checking as if the kernel 207 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 208 * and NET_IP_ALIGN defined to 2. 209 */ 210 #define BPF_F_STRICT_ALIGNMENT (1U << 0) 211 212 /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ 213 #define BPF_PSEUDO_MAP_FD 1 214 215 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 216 * offset to another bpf function 217 */ 218 #define BPF_PSEUDO_CALL 1 219 220 /* flags for BPF_MAP_UPDATE_ELEM command */ 221 #define BPF_ANY 0 /* create new element or update existing */ 222 #define BPF_NOEXIST 1 /* create new element if it didn't exist */ 223 #define BPF_EXIST 2 /* update existing element */ 224 225 /* flags for BPF_MAP_CREATE command */ 226 #define BPF_F_NO_PREALLOC (1U << 0) 227 /* Instead of having one common LRU list in the 228 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 229 * which can scale and perform better. 230 * Note, the LRU nodes (including free nodes) cannot be moved 231 * across different LRU lists. 232 */ 233 #define BPF_F_NO_COMMON_LRU (1U << 1) 234 /* Specify numa node during map creation */ 235 #define BPF_F_NUMA_NODE (1U << 2) 236 237 /* flags for BPF_PROG_QUERY */ 238 #define BPF_F_QUERY_EFFECTIVE (1U << 0) 239 240 #define BPF_OBJ_NAME_LEN 16U 241 242 /* Flags for accessing BPF object */ 243 #define BPF_F_RDONLY (1U << 3) 244 #define BPF_F_WRONLY (1U << 4) 245 246 /* Flag for stack_map, store build_id+offset instead of pointer */ 247 #define BPF_F_STACK_BUILD_ID (1U << 5) 248 249 enum bpf_stack_build_id_status { 250 /* user space need an empty entry to identify end of a trace */ 251 BPF_STACK_BUILD_ID_EMPTY = 0, 252 /* with valid build_id and offset */ 253 BPF_STACK_BUILD_ID_VALID = 1, 254 /* couldn't get build_id, fallback to ip */ 255 BPF_STACK_BUILD_ID_IP = 2, 256 }; 257 258 #define BPF_BUILD_ID_SIZE 20 259 struct bpf_stack_build_id { 260 __s32 status; 261 unsigned char build_id[BPF_BUILD_ID_SIZE]; 262 union { 263 __u64 offset; 264 __u64 ip; 265 }; 266 }; 267 268 union bpf_attr { 269 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 270 __u32 map_type; /* one of enum bpf_map_type */ 271 __u32 key_size; /* size of key in bytes */ 272 __u32 value_size; /* size of value in bytes */ 273 __u32 max_entries; /* max number of entries in a map */ 274 __u32 map_flags; /* BPF_MAP_CREATE related 275 * flags defined above. 276 */ 277 __u32 inner_map_fd; /* fd pointing to the inner map */ 278 __u32 numa_node; /* numa node (effective only if 279 * BPF_F_NUMA_NODE is set). 280 */ 281 char map_name[BPF_OBJ_NAME_LEN]; 282 __u32 map_ifindex; /* ifindex of netdev to create on */ 283 __u32 btf_fd; /* fd pointing to a BTF type data */ 284 __u32 btf_key_id; /* BTF type_id of the key */ 285 __u32 btf_value_id; /* BTF type_id of the value */ 286 }; 287 288 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 289 __u32 map_fd; 290 __aligned_u64 key; 291 union { 292 __aligned_u64 value; 293 __aligned_u64 next_key; 294 }; 295 __u64 flags; 296 }; 297 298 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 299 __u32 prog_type; /* one of enum bpf_prog_type */ 300 __u32 insn_cnt; 301 __aligned_u64 insns; 302 __aligned_u64 license; 303 __u32 log_level; /* verbosity level of verifier */ 304 __u32 log_size; /* size of user buffer */ 305 __aligned_u64 log_buf; /* user supplied buffer */ 306 __u32 kern_version; /* checked when prog_type=kprobe */ 307 __u32 prog_flags; 308 char prog_name[BPF_OBJ_NAME_LEN]; 309 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 310 /* For some prog types expected attach type must be known at 311 * load time to verify attach type specific parts of prog 312 * (context accesses, allowed helpers, etc). 313 */ 314 __u32 expected_attach_type; 315 }; 316 317 struct { /* anonymous struct used by BPF_OBJ_* commands */ 318 __aligned_u64 pathname; 319 __u32 bpf_fd; 320 __u32 file_flags; 321 }; 322 323 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 324 __u32 target_fd; /* container object to attach to */ 325 __u32 attach_bpf_fd; /* eBPF program to attach */ 326 __u32 attach_type; 327 __u32 attach_flags; 328 }; 329 330 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 331 __u32 prog_fd; 332 __u32 retval; 333 __u32 data_size_in; 334 __u32 data_size_out; 335 __aligned_u64 data_in; 336 __aligned_u64 data_out; 337 __u32 repeat; 338 __u32 duration; 339 } test; 340 341 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 342 union { 343 __u32 start_id; 344 __u32 prog_id; 345 __u32 map_id; 346 }; 347 __u32 next_id; 348 __u32 open_flags; 349 }; 350 351 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 352 __u32 bpf_fd; 353 __u32 info_len; 354 __aligned_u64 info; 355 } info; 356 357 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 358 __u32 target_fd; /* container object to query */ 359 __u32 attach_type; 360 __u32 query_flags; 361 __u32 attach_flags; 362 __aligned_u64 prog_ids; 363 __u32 prog_cnt; 364 } query; 365 366 struct { 367 __u64 name; 368 __u32 prog_fd; 369 } raw_tracepoint; 370 371 struct { /* anonymous struct for BPF_BTF_LOAD */ 372 __aligned_u64 btf; 373 __aligned_u64 btf_log_buf; 374 __u32 btf_size; 375 __u32 btf_log_size; 376 __u32 btf_log_level; 377 }; 378 } __attribute__((aligned(8))); 379 380 /* BPF helper function descriptions: 381 * 382 * void *bpf_map_lookup_elem(&map, &key) 383 * Return: Map value or NULL 384 * 385 * int bpf_map_update_elem(&map, &key, &value, flags) 386 * Return: 0 on success or negative error 387 * 388 * int bpf_map_delete_elem(&map, &key) 389 * Return: 0 on success or negative error 390 * 391 * int bpf_probe_read(void *dst, int size, void *src) 392 * Return: 0 on success or negative error 393 * 394 * u64 bpf_ktime_get_ns(void) 395 * Return: current ktime 396 * 397 * int bpf_trace_printk(const char *fmt, int fmt_size, ...) 398 * Return: length of buffer written or negative error 399 * 400 * u32 bpf_prandom_u32(void) 401 * Return: random value 402 * 403 * u32 bpf_raw_smp_processor_id(void) 404 * Return: SMP processor ID 405 * 406 * int bpf_skb_store_bytes(skb, offset, from, len, flags) 407 * store bytes into packet 408 * @skb: pointer to skb 409 * @offset: offset within packet from skb->mac_header 410 * @from: pointer where to copy bytes from 411 * @len: number of bytes to store into packet 412 * @flags: bit 0 - if true, recompute skb->csum 413 * other bits - reserved 414 * Return: 0 on success or negative error 415 * 416 * int bpf_l3_csum_replace(skb, offset, from, to, flags) 417 * recompute IP checksum 418 * @skb: pointer to skb 419 * @offset: offset within packet where IP checksum is located 420 * @from: old value of header field 421 * @to: new value of header field 422 * @flags: bits 0-3 - size of header field 423 * other bits - reserved 424 * Return: 0 on success or negative error 425 * 426 * int bpf_l4_csum_replace(skb, offset, from, to, flags) 427 * recompute TCP/UDP checksum 428 * @skb: pointer to skb 429 * @offset: offset within packet where TCP/UDP checksum is located 430 * @from: old value of header field 431 * @to: new value of header field 432 * @flags: bits 0-3 - size of header field 433 * bit 4 - is pseudo header 434 * other bits - reserved 435 * Return: 0 on success or negative error 436 * 437 * int bpf_tail_call(ctx, prog_array_map, index) 438 * jump into another BPF program 439 * @ctx: context pointer passed to next program 440 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 441 * @index: 32-bit index inside array that selects specific program to run 442 * Return: 0 on success or negative error 443 * 444 * int bpf_clone_redirect(skb, ifindex, flags) 445 * redirect to another netdev 446 * @skb: pointer to skb 447 * @ifindex: ifindex of the net device 448 * @flags: bit 0 - if set, redirect to ingress instead of egress 449 * other bits - reserved 450 * Return: 0 on success or negative error 451 * 452 * u64 bpf_get_current_pid_tgid(void) 453 * Return: current->tgid << 32 | current->pid 454 * 455 * u64 bpf_get_current_uid_gid(void) 456 * Return: current_gid << 32 | current_uid 457 * 458 * int bpf_get_current_comm(char *buf, int size_of_buf) 459 * stores current->comm into buf 460 * Return: 0 on success or negative error 461 * 462 * u32 bpf_get_cgroup_classid(skb) 463 * retrieve a proc's classid 464 * @skb: pointer to skb 465 * Return: classid if != 0 466 * 467 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) 468 * Return: 0 on success or negative error 469 * 470 * int bpf_skb_vlan_pop(skb) 471 * Return: 0 on success or negative error 472 * 473 * int bpf_skb_get_tunnel_key(skb, key, size, flags) 474 * int bpf_skb_set_tunnel_key(skb, key, size, flags) 475 * retrieve or populate tunnel metadata 476 * @skb: pointer to skb 477 * @key: pointer to 'struct bpf_tunnel_key' 478 * @size: size of 'struct bpf_tunnel_key' 479 * @flags: room for future extensions 480 * Return: 0 on success or negative error 481 * 482 * u64 bpf_perf_event_read(map, flags) 483 * read perf event counter value 484 * @map: pointer to perf_event_array map 485 * @flags: index of event in the map or bitmask flags 486 * Return: value of perf event counter read or error code 487 * 488 * int bpf_redirect(ifindex, flags) 489 * redirect to another netdev 490 * @ifindex: ifindex of the net device 491 * @flags: 492 * cls_bpf: 493 * bit 0 - if set, redirect to ingress instead of egress 494 * other bits - reserved 495 * xdp_bpf: 496 * all bits - reserved 497 * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error 498 * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error 499 * int bpf_redirect_map(map, key, flags) 500 * redirect to endpoint in map 501 * @map: pointer to dev map 502 * @key: index in map to lookup 503 * @flags: -- 504 * Return: XDP_REDIRECT on success or XDP_ABORT on error 505 * 506 * u32 bpf_get_route_realm(skb) 507 * retrieve a dst's tclassid 508 * @skb: pointer to skb 509 * Return: realm if != 0 510 * 511 * int bpf_perf_event_output(ctx, map, flags, data, size) 512 * output perf raw sample 513 * @ctx: struct pt_regs* 514 * @map: pointer to perf_event_array map 515 * @flags: index of event in the map or bitmask flags 516 * @data: data on stack to be output as raw data 517 * @size: size of data 518 * Return: 0 on success or negative error 519 * 520 * int bpf_get_stackid(ctx, map, flags) 521 * walk user or kernel stack and return id 522 * @ctx: struct pt_regs* 523 * @map: pointer to stack_trace map 524 * @flags: bits 0-7 - numer of stack frames to skip 525 * bit 8 - collect user stack instead of kernel 526 * bit 9 - compare stacks by hash only 527 * bit 10 - if two different stacks hash into the same stackid 528 * discard old 529 * other bits - reserved 530 * Return: >= 0 stackid on success or negative error 531 * 532 * s64 bpf_csum_diff(from, from_size, to, to_size, seed) 533 * calculate csum diff 534 * @from: raw from buffer 535 * @from_size: length of from buffer 536 * @to: raw to buffer 537 * @to_size: length of to buffer 538 * @seed: optional seed 539 * Return: csum result or negative error code 540 * 541 * int bpf_skb_get_tunnel_opt(skb, opt, size) 542 * retrieve tunnel options metadata 543 * @skb: pointer to skb 544 * @opt: pointer to raw tunnel option data 545 * @size: size of @opt 546 * Return: option size 547 * 548 * int bpf_skb_set_tunnel_opt(skb, opt, size) 549 * populate tunnel options metadata 550 * @skb: pointer to skb 551 * @opt: pointer to raw tunnel option data 552 * @size: size of @opt 553 * Return: 0 on success or negative error 554 * 555 * int bpf_skb_change_proto(skb, proto, flags) 556 * Change protocol of the skb. Currently supported is v4 -> v6, 557 * v6 -> v4 transitions. The helper will also resize the skb. eBPF 558 * program is expected to fill the new headers via skb_store_bytes 559 * and lX_csum_replace. 560 * @skb: pointer to skb 561 * @proto: new skb->protocol type 562 * @flags: reserved 563 * Return: 0 on success or negative error 564 * 565 * int bpf_skb_change_type(skb, type) 566 * Change packet type of skb. 567 * @skb: pointer to skb 568 * @type: new skb->pkt_type type 569 * Return: 0 on success or negative error 570 * 571 * int bpf_skb_under_cgroup(skb, map, index) 572 * Check cgroup2 membership of skb 573 * @skb: pointer to skb 574 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 575 * @index: index of the cgroup in the bpf_map 576 * Return: 577 * == 0 skb failed the cgroup2 descendant test 578 * == 1 skb succeeded the cgroup2 descendant test 579 * < 0 error 580 * 581 * u32 bpf_get_hash_recalc(skb) 582 * Retrieve and possibly recalculate skb->hash. 583 * @skb: pointer to skb 584 * Return: hash 585 * 586 * u64 bpf_get_current_task(void) 587 * Returns current task_struct 588 * Return: current 589 * 590 * int bpf_probe_write_user(void *dst, void *src, int len) 591 * safely attempt to write to a location 592 * @dst: destination address in userspace 593 * @src: source address on stack 594 * @len: number of bytes to copy 595 * Return: 0 on success or negative error 596 * 597 * int bpf_current_task_under_cgroup(map, index) 598 * Check cgroup2 membership of current task 599 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 600 * @index: index of the cgroup in the bpf_map 601 * Return: 602 * == 0 current failed the cgroup2 descendant test 603 * == 1 current succeeded the cgroup2 descendant test 604 * < 0 error 605 * 606 * int bpf_skb_change_tail(skb, len, flags) 607 * The helper will resize the skb to the given new size, to be used f.e. 608 * with control messages. 609 * @skb: pointer to skb 610 * @len: new skb length 611 * @flags: reserved 612 * Return: 0 on success or negative error 613 * 614 * int bpf_skb_pull_data(skb, len) 615 * The helper will pull in non-linear data in case the skb is non-linear 616 * and not all of len are part of the linear section. Only needed for 617 * read/write with direct packet access. 618 * @skb: pointer to skb 619 * @len: len to make read/writeable 620 * Return: 0 on success or negative error 621 * 622 * s64 bpf_csum_update(skb, csum) 623 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. 624 * @skb: pointer to skb 625 * @csum: csum to add 626 * Return: csum on success or negative error 627 * 628 * void bpf_set_hash_invalid(skb) 629 * Invalidate current skb->hash. 630 * @skb: pointer to skb 631 * 632 * int bpf_get_numa_node_id() 633 * Return: Id of current NUMA node. 634 * 635 * int bpf_skb_change_head() 636 * Grows headroom of skb and adjusts MAC header offset accordingly. 637 * Will extends/reallocae as required automatically. 638 * May change skb data pointer and will thus invalidate any check 639 * performed for direct packet access. 640 * @skb: pointer to skb 641 * @len: length of header to be pushed in front 642 * @flags: Flags (unused for now) 643 * Return: 0 on success or negative error 644 * 645 * int bpf_xdp_adjust_head(xdp_md, delta) 646 * Adjust the xdp_md.data by delta 647 * @xdp_md: pointer to xdp_md 648 * @delta: An positive/negative integer to be added to xdp_md.data 649 * Return: 0 on success or negative on error 650 * 651 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) 652 * Copy a NUL terminated string from unsafe address. In case the string 653 * length is smaller than size, the target is not padded with further NUL 654 * bytes. In case the string length is larger than size, just count-1 655 * bytes are copied and the last byte is set to NUL. 656 * @dst: destination address 657 * @size: maximum number of bytes to copy, including the trailing NUL 658 * @unsafe_ptr: unsafe address 659 * Return: 660 * > 0 length of the string including the trailing NUL on success 661 * < 0 error 662 * 663 * u64 bpf_get_socket_cookie(skb) 664 * Get the cookie for the socket stored inside sk_buff. 665 * @skb: pointer to skb 666 * Return: 8 Bytes non-decreasing number on success or 0 if the socket 667 * field is missing inside sk_buff 668 * 669 * u32 bpf_get_socket_uid(skb) 670 * Get the owner uid of the socket stored inside sk_buff. 671 * @skb: pointer to skb 672 * Return: uid of the socket owner on success or overflowuid if failed. 673 * 674 * u32 bpf_set_hash(skb, hash) 675 * Set full skb->hash. 676 * @skb: pointer to skb 677 * @hash: hash to set 678 * 679 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen) 680 * Calls setsockopt. Not all opts are available, only those with 681 * integer optvals plus TCP_CONGESTION. 682 * Supported levels: SOL_SOCKET and IPPROTO_TCP 683 * @bpf_socket: pointer to bpf_socket 684 * @level: SOL_SOCKET or IPPROTO_TCP 685 * @optname: option name 686 * @optval: pointer to option value 687 * @optlen: length of optval in bytes 688 * Return: 0 or negative error 689 * 690 * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen) 691 * Calls getsockopt. Not all opts are available. 692 * Supported levels: IPPROTO_TCP 693 * @bpf_socket: pointer to bpf_socket 694 * @level: IPPROTO_TCP 695 * @optname: option name 696 * @optval: pointer to option value 697 * @optlen: length of optval in bytes 698 * Return: 0 or negative error 699 * 700 * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags) 701 * Set callback flags for sock_ops 702 * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct 703 * @flags: flags value 704 * Return: 0 for no error 705 * -EINVAL if there is no full tcp socket 706 * bits in flags that are not supported by current kernel 707 * 708 * int bpf_skb_adjust_room(skb, len_diff, mode, flags) 709 * Grow or shrink room in sk_buff. 710 * @skb: pointer to skb 711 * @len_diff: (signed) amount of room to grow/shrink 712 * @mode: operation mode (enum bpf_adj_room_mode) 713 * @flags: reserved for future use 714 * Return: 0 on success or negative error code 715 * 716 * int bpf_sk_redirect_map(map, key, flags) 717 * Redirect skb to a sock in map using key as a lookup key for the 718 * sock in map. 719 * @map: pointer to sockmap 720 * @key: key to lookup sock in map 721 * @flags: reserved for future use 722 * Return: SK_PASS 723 * 724 * int bpf_sock_map_update(skops, map, key, flags) 725 * @skops: pointer to bpf_sock_ops 726 * @map: pointer to sockmap to update 727 * @key: key to insert/update sock in map 728 * @flags: same flags as map update elem 729 * 730 * int bpf_xdp_adjust_meta(xdp_md, delta) 731 * Adjust the xdp_md.data_meta by delta 732 * @xdp_md: pointer to xdp_md 733 * @delta: An positive/negative integer to be added to xdp_md.data_meta 734 * Return: 0 on success or negative on error 735 * 736 * int bpf_perf_event_read_value(map, flags, buf, buf_size) 737 * read perf event counter value and perf event enabled/running time 738 * @map: pointer to perf_event_array map 739 * @flags: index of event in the map or bitmask flags 740 * @buf: buf to fill 741 * @buf_size: size of the buf 742 * Return: 0 on success or negative error code 743 * 744 * int bpf_perf_prog_read_value(ctx, buf, buf_size) 745 * read perf prog attached perf event counter and enabled/running time 746 * @ctx: pointer to ctx 747 * @buf: buf to fill 748 * @buf_size: size of the buf 749 * Return : 0 on success or negative error code 750 * 751 * int bpf_override_return(pt_regs, rc) 752 * @pt_regs: pointer to struct pt_regs 753 * @rc: the return value to set 754 * 755 * int bpf_msg_redirect_map(map, key, flags) 756 * Redirect msg to a sock in map using key as a lookup key for the 757 * sock in map. 758 * @map: pointer to sockmap 759 * @key: key to lookup sock in map 760 * @flags: reserved for future use 761 * Return: SK_PASS 762 * 763 * int bpf_bind(ctx, addr, addr_len) 764 * Bind socket to address. Only binding to IP is supported, no port can be 765 * set in addr. 766 * @ctx: pointer to context of type bpf_sock_addr 767 * @addr: pointer to struct sockaddr to bind socket to 768 * @addr_len: length of sockaddr structure 769 * Return: 0 on success or negative error code 770 * 771 * int bpf_xdp_adjust_tail(xdp_md, delta) 772 * Adjust the xdp_md.data_end by delta. Only shrinking of packet's 773 * size is supported. 774 * @xdp_md: pointer to xdp_md 775 * @delta: A negative integer to be added to xdp_md.data_end 776 * Return: 0 on success or negative on error 777 */ 778 #define __BPF_FUNC_MAPPER(FN) \ 779 FN(unspec), \ 780 FN(map_lookup_elem), \ 781 FN(map_update_elem), \ 782 FN(map_delete_elem), \ 783 FN(probe_read), \ 784 FN(ktime_get_ns), \ 785 FN(trace_printk), \ 786 FN(get_prandom_u32), \ 787 FN(get_smp_processor_id), \ 788 FN(skb_store_bytes), \ 789 FN(l3_csum_replace), \ 790 FN(l4_csum_replace), \ 791 FN(tail_call), \ 792 FN(clone_redirect), \ 793 FN(get_current_pid_tgid), \ 794 FN(get_current_uid_gid), \ 795 FN(get_current_comm), \ 796 FN(get_cgroup_classid), \ 797 FN(skb_vlan_push), \ 798 FN(skb_vlan_pop), \ 799 FN(skb_get_tunnel_key), \ 800 FN(skb_set_tunnel_key), \ 801 FN(perf_event_read), \ 802 FN(redirect), \ 803 FN(get_route_realm), \ 804 FN(perf_event_output), \ 805 FN(skb_load_bytes), \ 806 FN(get_stackid), \ 807 FN(csum_diff), \ 808 FN(skb_get_tunnel_opt), \ 809 FN(skb_set_tunnel_opt), \ 810 FN(skb_change_proto), \ 811 FN(skb_change_type), \ 812 FN(skb_under_cgroup), \ 813 FN(get_hash_recalc), \ 814 FN(get_current_task), \ 815 FN(probe_write_user), \ 816 FN(current_task_under_cgroup), \ 817 FN(skb_change_tail), \ 818 FN(skb_pull_data), \ 819 FN(csum_update), \ 820 FN(set_hash_invalid), \ 821 FN(get_numa_node_id), \ 822 FN(skb_change_head), \ 823 FN(xdp_adjust_head), \ 824 FN(probe_read_str), \ 825 FN(get_socket_cookie), \ 826 FN(get_socket_uid), \ 827 FN(set_hash), \ 828 FN(setsockopt), \ 829 FN(skb_adjust_room), \ 830 FN(redirect_map), \ 831 FN(sk_redirect_map), \ 832 FN(sock_map_update), \ 833 FN(xdp_adjust_meta), \ 834 FN(perf_event_read_value), \ 835 FN(perf_prog_read_value), \ 836 FN(getsockopt), \ 837 FN(override_return), \ 838 FN(sock_ops_cb_flags_set), \ 839 FN(msg_redirect_map), \ 840 FN(msg_apply_bytes), \ 841 FN(msg_cork_bytes), \ 842 FN(msg_pull_data), \ 843 FN(bind), \ 844 FN(xdp_adjust_tail), 845 846 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 847 * function eBPF program intends to call 848 */ 849 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 850 enum bpf_func_id { 851 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 852 __BPF_FUNC_MAX_ID, 853 }; 854 #undef __BPF_ENUM_FN 855 856 /* All flags used by eBPF helper functions, placed here. */ 857 858 /* BPF_FUNC_skb_store_bytes flags. */ 859 #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) 860 #define BPF_F_INVALIDATE_HASH (1ULL << 1) 861 862 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 863 * First 4 bits are for passing the header field size. 864 */ 865 #define BPF_F_HDR_FIELD_MASK 0xfULL 866 867 /* BPF_FUNC_l4_csum_replace flags. */ 868 #define BPF_F_PSEUDO_HDR (1ULL << 4) 869 #define BPF_F_MARK_MANGLED_0 (1ULL << 5) 870 #define BPF_F_MARK_ENFORCE (1ULL << 6) 871 872 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 873 #define BPF_F_INGRESS (1ULL << 0) 874 875 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 876 #define BPF_F_TUNINFO_IPV6 (1ULL << 0) 877 878 /* BPF_FUNC_get_stackid flags. */ 879 #define BPF_F_SKIP_FIELD_MASK 0xffULL 880 #define BPF_F_USER_STACK (1ULL << 8) 881 #define BPF_F_FAST_STACK_CMP (1ULL << 9) 882 #define BPF_F_REUSE_STACKID (1ULL << 10) 883 884 /* BPF_FUNC_skb_set_tunnel_key flags. */ 885 #define BPF_F_ZERO_CSUM_TX (1ULL << 1) 886 #define BPF_F_DONT_FRAGMENT (1ULL << 2) 887 #define BPF_F_SEQ_NUMBER (1ULL << 3) 888 889 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 890 * BPF_FUNC_perf_event_read_value flags. 891 */ 892 #define BPF_F_INDEX_MASK 0xffffffffULL 893 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 894 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 895 #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) 896 897 /* Mode for BPF_FUNC_skb_adjust_room helper. */ 898 enum bpf_adj_room_mode { 899 BPF_ADJ_ROOM_NET, 900 }; 901 902 /* user accessible mirror of in-kernel sk_buff. 903 * new fields can only be added to the end of this structure 904 */ 905 struct __sk_buff { 906 __u32 len; 907 __u32 pkt_type; 908 __u32 mark; 909 __u32 queue_mapping; 910 __u32 protocol; 911 __u32 vlan_present; 912 __u32 vlan_tci; 913 __u32 vlan_proto; 914 __u32 priority; 915 __u32 ingress_ifindex; 916 __u32 ifindex; 917 __u32 tc_index; 918 __u32 cb[5]; 919 __u32 hash; 920 __u32 tc_classid; 921 __u32 data; 922 __u32 data_end; 923 __u32 napi_id; 924 925 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 926 __u32 family; 927 __u32 remote_ip4; /* Stored in network byte order */ 928 __u32 local_ip4; /* Stored in network byte order */ 929 __u32 remote_ip6[4]; /* Stored in network byte order */ 930 __u32 local_ip6[4]; /* Stored in network byte order */ 931 __u32 remote_port; /* Stored in network byte order */ 932 __u32 local_port; /* stored in host byte order */ 933 /* ... here. */ 934 935 __u32 data_meta; 936 }; 937 938 struct bpf_tunnel_key { 939 __u32 tunnel_id; 940 union { 941 __u32 remote_ipv4; 942 __u32 remote_ipv6[4]; 943 }; 944 __u8 tunnel_tos; 945 __u8 tunnel_ttl; 946 __u16 tunnel_ext; 947 __u32 tunnel_label; 948 }; 949 950 /* Generic BPF return codes which all BPF program types may support. 951 * The values are binary compatible with their TC_ACT_* counter-part to 952 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 953 * programs. 954 * 955 * XDP is handled seprately, see XDP_*. 956 */ 957 enum bpf_ret_code { 958 BPF_OK = 0, 959 /* 1 reserved */ 960 BPF_DROP = 2, 961 /* 3-6 reserved */ 962 BPF_REDIRECT = 7, 963 /* >127 are reserved for prog type specific return codes */ 964 }; 965 966 struct bpf_sock { 967 __u32 bound_dev_if; 968 __u32 family; 969 __u32 type; 970 __u32 protocol; 971 __u32 mark; 972 __u32 priority; 973 __u32 src_ip4; /* Allows 1,2,4-byte read. 974 * Stored in network byte order. 975 */ 976 __u32 src_ip6[4]; /* Allows 1,2,4-byte read. 977 * Stored in network byte order. 978 */ 979 __u32 src_port; /* Allows 4-byte read. 980 * Stored in host byte order 981 */ 982 }; 983 984 #define XDP_PACKET_HEADROOM 256 985 986 /* User return codes for XDP prog type. 987 * A valid XDP program must return one of these defined values. All other 988 * return codes are reserved for future use. Unknown return codes will 989 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 990 */ 991 enum xdp_action { 992 XDP_ABORTED = 0, 993 XDP_DROP, 994 XDP_PASS, 995 XDP_TX, 996 XDP_REDIRECT, 997 }; 998 999 /* user accessible metadata for XDP packet hook 1000 * new fields must be added to the end of this structure 1001 */ 1002 struct xdp_md { 1003 __u32 data; 1004 __u32 data_end; 1005 __u32 data_meta; 1006 /* Below access go through struct xdp_rxq_info */ 1007 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 1008 __u32 rx_queue_index; /* rxq->queue_index */ 1009 }; 1010 1011 enum sk_action { 1012 SK_DROP = 0, 1013 SK_PASS, 1014 }; 1015 1016 /* user accessible metadata for SK_MSG packet hook, new fields must 1017 * be added to the end of this structure 1018 */ 1019 struct sk_msg_md { 1020 void *data; 1021 void *data_end; 1022 }; 1023 1024 #define BPF_TAG_SIZE 8 1025 1026 struct bpf_prog_info { 1027 __u32 type; 1028 __u32 id; 1029 __u8 tag[BPF_TAG_SIZE]; 1030 __u32 jited_prog_len; 1031 __u32 xlated_prog_len; 1032 __aligned_u64 jited_prog_insns; 1033 __aligned_u64 xlated_prog_insns; 1034 __u64 load_time; /* ns since boottime */ 1035 __u32 created_by_uid; 1036 __u32 nr_map_ids; 1037 __aligned_u64 map_ids; 1038 char name[BPF_OBJ_NAME_LEN]; 1039 __u32 ifindex; 1040 __u64 netns_dev; 1041 __u64 netns_ino; 1042 } __attribute__((aligned(8))); 1043 1044 struct bpf_map_info { 1045 __u32 type; 1046 __u32 id; 1047 __u32 key_size; 1048 __u32 value_size; 1049 __u32 max_entries; 1050 __u32 map_flags; 1051 char name[BPF_OBJ_NAME_LEN]; 1052 __u32 ifindex; 1053 __u64 netns_dev; 1054 __u64 netns_ino; 1055 } __attribute__((aligned(8))); 1056 1057 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 1058 * by user and intended to be used by socket (e.g. to bind to, depends on 1059 * attach attach type). 1060 */ 1061 struct bpf_sock_addr { 1062 __u32 user_family; /* Allows 4-byte read, but no write. */ 1063 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 1064 * Stored in network byte order. 1065 */ 1066 __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. 1067 * Stored in network byte order. 1068 */ 1069 __u32 user_port; /* Allows 4-byte read and write. 1070 * Stored in network byte order 1071 */ 1072 __u32 family; /* Allows 4-byte read, but no write */ 1073 __u32 type; /* Allows 4-byte read, but no write */ 1074 __u32 protocol; /* Allows 4-byte read, but no write */ 1075 }; 1076 1077 /* User bpf_sock_ops struct to access socket values and specify request ops 1078 * and their replies. 1079 * Some of this fields are in network (bigendian) byte order and may need 1080 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 1081 * New fields can only be added at the end of this structure 1082 */ 1083 struct bpf_sock_ops { 1084 __u32 op; 1085 union { 1086 __u32 args[4]; /* Optionally passed to bpf program */ 1087 __u32 reply; /* Returned by bpf program */ 1088 __u32 replylong[4]; /* Optionally returned by bpf prog */ 1089 }; 1090 __u32 family; 1091 __u32 remote_ip4; /* Stored in network byte order */ 1092 __u32 local_ip4; /* Stored in network byte order */ 1093 __u32 remote_ip6[4]; /* Stored in network byte order */ 1094 __u32 local_ip6[4]; /* Stored in network byte order */ 1095 __u32 remote_port; /* Stored in network byte order */ 1096 __u32 local_port; /* stored in host byte order */ 1097 __u32 is_fullsock; /* Some TCP fields are only valid if 1098 * there is a full socket. If not, the 1099 * fields read as zero. 1100 */ 1101 __u32 snd_cwnd; 1102 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 1103 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 1104 __u32 state; 1105 __u32 rtt_min; 1106 __u32 snd_ssthresh; 1107 __u32 rcv_nxt; 1108 __u32 snd_nxt; 1109 __u32 snd_una; 1110 __u32 mss_cache; 1111 __u32 ecn_flags; 1112 __u32 rate_delivered; 1113 __u32 rate_interval_us; 1114 __u32 packets_out; 1115 __u32 retrans_out; 1116 __u32 total_retrans; 1117 __u32 segs_in; 1118 __u32 data_segs_in; 1119 __u32 segs_out; 1120 __u32 data_segs_out; 1121 __u32 lost_out; 1122 __u32 sacked_out; 1123 __u32 sk_txhash; 1124 __u64 bytes_received; 1125 __u64 bytes_acked; 1126 }; 1127 1128 /* Definitions for bpf_sock_ops_cb_flags */ 1129 #define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) 1130 #define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) 1131 #define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) 1132 #define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently 1133 * supported cb flags 1134 */ 1135 1136 /* List of known BPF sock_ops operators. 1137 * New entries can only be added at the end 1138 */ 1139 enum { 1140 BPF_SOCK_OPS_VOID, 1141 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 1142 * -1 if default value should be used 1143 */ 1144 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 1145 * window (in packets) or -1 if default 1146 * value should be used 1147 */ 1148 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 1149 * active connection is initialized 1150 */ 1151 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 1152 * active connection is 1153 * established 1154 */ 1155 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 1156 * passive connection is 1157 * established 1158 */ 1159 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 1160 * needs ECN 1161 */ 1162 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 1163 * based on the path and may be 1164 * dependent on the congestion control 1165 * algorithm. In general it indicates 1166 * a congestion threshold. RTTs above 1167 * this indicate congestion 1168 */ 1169 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 1170 * Arg1: value of icsk_retransmits 1171 * Arg2: value of icsk_rto 1172 * Arg3: whether RTO has expired 1173 */ 1174 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 1175 * Arg1: sequence number of 1st byte 1176 * Arg2: # segments 1177 * Arg3: return value of 1178 * tcp_transmit_skb (0 => success) 1179 */ 1180 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 1181 * Arg1: old_state 1182 * Arg2: new_state 1183 */ 1184 }; 1185 1186 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 1187 * changes between the TCP and BPF versions. Ideally this should never happen. 1188 * If it does, we need to add code to convert them before calling 1189 * the BPF sock_ops function. 1190 */ 1191 enum { 1192 BPF_TCP_ESTABLISHED = 1, 1193 BPF_TCP_SYN_SENT, 1194 BPF_TCP_SYN_RECV, 1195 BPF_TCP_FIN_WAIT1, 1196 BPF_TCP_FIN_WAIT2, 1197 BPF_TCP_TIME_WAIT, 1198 BPF_TCP_CLOSE, 1199 BPF_TCP_CLOSE_WAIT, 1200 BPF_TCP_LAST_ACK, 1201 BPF_TCP_LISTEN, 1202 BPF_TCP_CLOSING, /* Now a valid state */ 1203 BPF_TCP_NEW_SYN_RECV, 1204 1205 BPF_TCP_MAX_STATES /* Leave at the end! */ 1206 }; 1207 1208 #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ 1209 #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ 1210 1211 struct bpf_perf_event_value { 1212 __u64 counter; 1213 __u64 enabled; 1214 __u64 running; 1215 }; 1216 1217 #define BPF_DEVCG_ACC_MKNOD (1ULL << 0) 1218 #define BPF_DEVCG_ACC_READ (1ULL << 1) 1219 #define BPF_DEVCG_ACC_WRITE (1ULL << 2) 1220 1221 #define BPF_DEVCG_DEV_BLOCK (1ULL << 0) 1222 #define BPF_DEVCG_DEV_CHAR (1ULL << 1) 1223 1224 struct bpf_cgroup_dev_ctx { 1225 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 1226 __u32 access_type; 1227 __u32 major; 1228 __u32 minor; 1229 }; 1230 1231 struct bpf_raw_tracepoint_args { 1232 __u64 args[0]; 1233 }; 1234 1235 #endif /* _UAPI__LINUX_BPF_H__ */ 1236