1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #ifndef _UAPI__LINUX_BPF_H__ 8 #define _UAPI__LINUX_BPF_H__ 9 10 #include <linux/types.h> 11 #include <linux/bpf_common.h> 12 13 /* Extended instruction set based on top of classic BPF */ 14 15 /* instruction classes */ 16 #define BPF_ALU64 0x07 /* alu mode in double word width */ 17 18 /* ld/ldx fields */ 19 #define BPF_DW 0x18 /* double word */ 20 #define BPF_XADD 0xc0 /* exclusive add */ 21 22 /* alu/jmp fields */ 23 #define BPF_MOV 0xb0 /* mov reg to reg */ 24 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 25 26 /* change endianness of a register */ 27 #define BPF_END 0xd0 /* flags for endianness conversion: */ 28 #define BPF_TO_LE 0x00 /* convert to little-endian */ 29 #define BPF_TO_BE 0x08 /* convert to big-endian */ 30 #define BPF_FROM_LE BPF_TO_LE 31 #define BPF_FROM_BE BPF_TO_BE 32 33 /* jmp encodings */ 34 #define BPF_JNE 0x50 /* jump != */ 35 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 36 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 37 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 38 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 39 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 40 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 41 #define BPF_CALL 0x80 /* function call */ 42 #define BPF_EXIT 0x90 /* function return */ 43 44 /* Register numbers */ 45 enum { 46 BPF_REG_0 = 0, 47 BPF_REG_1, 48 BPF_REG_2, 49 BPF_REG_3, 50 BPF_REG_4, 51 BPF_REG_5, 52 BPF_REG_6, 53 BPF_REG_7, 54 BPF_REG_8, 55 BPF_REG_9, 56 BPF_REG_10, 57 __MAX_BPF_REG, 58 }; 59 60 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 61 #define MAX_BPF_REG __MAX_BPF_REG 62 63 struct bpf_insn { 64 __u8 code; /* opcode */ 65 __u8 dst_reg:4; /* dest register */ 66 __u8 src_reg:4; /* source register */ 67 __s16 off; /* signed offset */ 68 __s32 imm; /* signed immediate constant */ 69 }; 70 71 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 72 struct bpf_lpm_trie_key { 73 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 74 __u8 data[0]; /* Arbitrary size */ 75 }; 76 77 /* BPF syscall commands, see bpf(2) man-page for details. */ 78 enum bpf_cmd { 79 BPF_MAP_CREATE, 80 BPF_MAP_LOOKUP_ELEM, 81 BPF_MAP_UPDATE_ELEM, 82 BPF_MAP_DELETE_ELEM, 83 BPF_MAP_GET_NEXT_KEY, 84 BPF_PROG_LOAD, 85 BPF_OBJ_PIN, 86 BPF_OBJ_GET, 87 BPF_PROG_ATTACH, 88 BPF_PROG_DETACH, 89 BPF_PROG_TEST_RUN, 90 BPF_PROG_GET_NEXT_ID, 91 BPF_MAP_GET_NEXT_ID, 92 BPF_PROG_GET_FD_BY_ID, 93 BPF_MAP_GET_FD_BY_ID, 94 BPF_OBJ_GET_INFO_BY_FD, 95 }; 96 97 enum bpf_map_type { 98 BPF_MAP_TYPE_UNSPEC, 99 BPF_MAP_TYPE_HASH, 100 BPF_MAP_TYPE_ARRAY, 101 BPF_MAP_TYPE_PROG_ARRAY, 102 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 103 BPF_MAP_TYPE_PERCPU_HASH, 104 BPF_MAP_TYPE_PERCPU_ARRAY, 105 BPF_MAP_TYPE_STACK_TRACE, 106 BPF_MAP_TYPE_CGROUP_ARRAY, 107 BPF_MAP_TYPE_LRU_HASH, 108 BPF_MAP_TYPE_LRU_PERCPU_HASH, 109 BPF_MAP_TYPE_LPM_TRIE, 110 BPF_MAP_TYPE_ARRAY_OF_MAPS, 111 BPF_MAP_TYPE_HASH_OF_MAPS, 112 BPF_MAP_TYPE_DEVMAP, 113 BPF_MAP_TYPE_SOCKMAP, 114 }; 115 116 enum bpf_prog_type { 117 BPF_PROG_TYPE_UNSPEC, 118 BPF_PROG_TYPE_SOCKET_FILTER, 119 BPF_PROG_TYPE_KPROBE, 120 BPF_PROG_TYPE_SCHED_CLS, 121 BPF_PROG_TYPE_SCHED_ACT, 122 BPF_PROG_TYPE_TRACEPOINT, 123 BPF_PROG_TYPE_XDP, 124 BPF_PROG_TYPE_PERF_EVENT, 125 BPF_PROG_TYPE_CGROUP_SKB, 126 BPF_PROG_TYPE_CGROUP_SOCK, 127 BPF_PROG_TYPE_LWT_IN, 128 BPF_PROG_TYPE_LWT_OUT, 129 BPF_PROG_TYPE_LWT_XMIT, 130 BPF_PROG_TYPE_SOCK_OPS, 131 BPF_PROG_TYPE_SK_SKB, 132 }; 133 134 enum bpf_attach_type { 135 BPF_CGROUP_INET_INGRESS, 136 BPF_CGROUP_INET_EGRESS, 137 BPF_CGROUP_INET_SOCK_CREATE, 138 BPF_CGROUP_SOCK_OPS, 139 BPF_CGROUP_SMAP_INGRESS, 140 __MAX_BPF_ATTACH_TYPE 141 }; 142 143 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 144 145 /* If BPF_SOCKMAP_STRPARSER is used sockmap will use strparser on receive */ 146 #define BPF_SOCKMAP_STRPARSER (1U << 0) 147 148 /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command 149 * to the given target_fd cgroup the descendent cgroup will be able to 150 * override effective bpf program that was inherited from this cgroup 151 */ 152 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 153 154 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 155 * verifier will perform strict alignment checking as if the kernel 156 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 157 * and NET_IP_ALIGN defined to 2. 158 */ 159 #define BPF_F_STRICT_ALIGNMENT (1U << 0) 160 161 #define BPF_PSEUDO_MAP_FD 1 162 163 /* flags for BPF_MAP_UPDATE_ELEM command */ 164 #define BPF_ANY 0 /* create new element or update existing */ 165 #define BPF_NOEXIST 1 /* create new element if it didn't exist */ 166 #define BPF_EXIST 2 /* update existing element */ 167 168 /* flags for BPF_MAP_CREATE command */ 169 #define BPF_F_NO_PREALLOC (1U << 0) 170 /* Instead of having one common LRU list in the 171 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 172 * which can scale and perform better. 173 * Note, the LRU nodes (including free nodes) cannot be moved 174 * across different LRU lists. 175 */ 176 #define BPF_F_NO_COMMON_LRU (1U << 1) 177 /* Specify numa node during map creation */ 178 #define BPF_F_NUMA_NODE (1U << 2) 179 180 union bpf_attr { 181 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 182 __u32 map_type; /* one of enum bpf_map_type */ 183 __u32 key_size; /* size of key in bytes */ 184 __u32 value_size; /* size of value in bytes */ 185 __u32 max_entries; /* max number of entries in a map */ 186 __u32 map_flags; /* BPF_MAP_CREATE related 187 * flags defined above. 188 */ 189 __u32 inner_map_fd; /* fd pointing to the inner map */ 190 __u32 numa_node; /* numa node (effective only if 191 * BPF_F_NUMA_NODE is set). 192 */ 193 }; 194 195 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 196 __u32 map_fd; 197 __aligned_u64 key; 198 union { 199 __aligned_u64 value; 200 __aligned_u64 next_key; 201 }; 202 __u64 flags; 203 }; 204 205 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 206 __u32 prog_type; /* one of enum bpf_prog_type */ 207 __u32 insn_cnt; 208 __aligned_u64 insns; 209 __aligned_u64 license; 210 __u32 log_level; /* verbosity level of verifier */ 211 __u32 log_size; /* size of user buffer */ 212 __aligned_u64 log_buf; /* user supplied buffer */ 213 __u32 kern_version; /* checked when prog_type=kprobe */ 214 __u32 prog_flags; 215 }; 216 217 struct { /* anonymous struct used by BPF_OBJ_* commands */ 218 __aligned_u64 pathname; 219 __u32 bpf_fd; 220 }; 221 222 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 223 __u32 target_fd; /* container object to attach to */ 224 __u32 attach_bpf_fd; /* eBPF program to attach */ 225 __u32 attach_type; 226 __u32 attach_flags; 227 __u32 attach_bpf_fd2; 228 }; 229 230 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 231 __u32 prog_fd; 232 __u32 retval; 233 __u32 data_size_in; 234 __u32 data_size_out; 235 __aligned_u64 data_in; 236 __aligned_u64 data_out; 237 __u32 repeat; 238 __u32 duration; 239 } test; 240 241 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 242 union { 243 __u32 start_id; 244 __u32 prog_id; 245 __u32 map_id; 246 }; 247 __u32 next_id; 248 }; 249 250 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 251 __u32 bpf_fd; 252 __u32 info_len; 253 __aligned_u64 info; 254 } info; 255 } __attribute__((aligned(8))); 256 257 /* BPF helper function descriptions: 258 * 259 * void *bpf_map_lookup_elem(&map, &key) 260 * Return: Map value or NULL 261 * 262 * int bpf_map_update_elem(&map, &key, &value, flags) 263 * Return: 0 on success or negative error 264 * 265 * int bpf_map_delete_elem(&map, &key) 266 * Return: 0 on success or negative error 267 * 268 * int bpf_probe_read(void *dst, int size, void *src) 269 * Return: 0 on success or negative error 270 * 271 * u64 bpf_ktime_get_ns(void) 272 * Return: current ktime 273 * 274 * int bpf_trace_printk(const char *fmt, int fmt_size, ...) 275 * Return: length of buffer written or negative error 276 * 277 * u32 bpf_prandom_u32(void) 278 * Return: random value 279 * 280 * u32 bpf_raw_smp_processor_id(void) 281 * Return: SMP processor ID 282 * 283 * int bpf_skb_store_bytes(skb, offset, from, len, flags) 284 * store bytes into packet 285 * @skb: pointer to skb 286 * @offset: offset within packet from skb->mac_header 287 * @from: pointer where to copy bytes from 288 * @len: number of bytes to store into packet 289 * @flags: bit 0 - if true, recompute skb->csum 290 * other bits - reserved 291 * Return: 0 on success or negative error 292 * 293 * int bpf_l3_csum_replace(skb, offset, from, to, flags) 294 * recompute IP checksum 295 * @skb: pointer to skb 296 * @offset: offset within packet where IP checksum is located 297 * @from: old value of header field 298 * @to: new value of header field 299 * @flags: bits 0-3 - size of header field 300 * other bits - reserved 301 * Return: 0 on success or negative error 302 * 303 * int bpf_l4_csum_replace(skb, offset, from, to, flags) 304 * recompute TCP/UDP checksum 305 * @skb: pointer to skb 306 * @offset: offset within packet where TCP/UDP checksum is located 307 * @from: old value of header field 308 * @to: new value of header field 309 * @flags: bits 0-3 - size of header field 310 * bit 4 - is pseudo header 311 * other bits - reserved 312 * Return: 0 on success or negative error 313 * 314 * int bpf_tail_call(ctx, prog_array_map, index) 315 * jump into another BPF program 316 * @ctx: context pointer passed to next program 317 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 318 * @index: index inside array that selects specific program to run 319 * Return: 0 on success or negative error 320 * 321 * int bpf_clone_redirect(skb, ifindex, flags) 322 * redirect to another netdev 323 * @skb: pointer to skb 324 * @ifindex: ifindex of the net device 325 * @flags: bit 0 - if set, redirect to ingress instead of egress 326 * other bits - reserved 327 * Return: 0 on success or negative error 328 * 329 * u64 bpf_get_current_pid_tgid(void) 330 * Return: current->tgid << 32 | current->pid 331 * 332 * u64 bpf_get_current_uid_gid(void) 333 * Return: current_gid << 32 | current_uid 334 * 335 * int bpf_get_current_comm(char *buf, int size_of_buf) 336 * stores current->comm into buf 337 * Return: 0 on success or negative error 338 * 339 * u32 bpf_get_cgroup_classid(skb) 340 * retrieve a proc's classid 341 * @skb: pointer to skb 342 * Return: classid if != 0 343 * 344 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) 345 * Return: 0 on success or negative error 346 * 347 * int bpf_skb_vlan_pop(skb) 348 * Return: 0 on success or negative error 349 * 350 * int bpf_skb_get_tunnel_key(skb, key, size, flags) 351 * int bpf_skb_set_tunnel_key(skb, key, size, flags) 352 * retrieve or populate tunnel metadata 353 * @skb: pointer to skb 354 * @key: pointer to 'struct bpf_tunnel_key' 355 * @size: size of 'struct bpf_tunnel_key' 356 * @flags: room for future extensions 357 * Return: 0 on success or negative error 358 * 359 * u64 bpf_perf_event_read(map, flags) 360 * read perf event counter value 361 * @map: pointer to perf_event_array map 362 * @flags: index of event in the map or bitmask flags 363 * Return: value of perf event counter read or error code 364 * 365 * int bpf_redirect(ifindex, flags) 366 * redirect to another netdev 367 * @ifindex: ifindex of the net device 368 * @flags: 369 * cls_bpf: 370 * bit 0 - if set, redirect to ingress instead of egress 371 * other bits - reserved 372 * xdp_bpf: 373 * all bits - reserved 374 * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error 375 * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error 376 * int bpf_redirect_map(map, key, flags) 377 * redirect to endpoint in map 378 * @map: pointer to dev map 379 * @key: index in map to lookup 380 * @flags: -- 381 * Return: XDP_REDIRECT on success or XDP_ABORT on error 382 * 383 * u32 bpf_get_route_realm(skb) 384 * retrieve a dst's tclassid 385 * @skb: pointer to skb 386 * Return: realm if != 0 387 * 388 * int bpf_perf_event_output(ctx, map, flags, data, size) 389 * output perf raw sample 390 * @ctx: struct pt_regs* 391 * @map: pointer to perf_event_array map 392 * @flags: index of event in the map or bitmask flags 393 * @data: data on stack to be output as raw data 394 * @size: size of data 395 * Return: 0 on success or negative error 396 * 397 * int bpf_get_stackid(ctx, map, flags) 398 * walk user or kernel stack and return id 399 * @ctx: struct pt_regs* 400 * @map: pointer to stack_trace map 401 * @flags: bits 0-7 - numer of stack frames to skip 402 * bit 8 - collect user stack instead of kernel 403 * bit 9 - compare stacks by hash only 404 * bit 10 - if two different stacks hash into the same stackid 405 * discard old 406 * other bits - reserved 407 * Return: >= 0 stackid on success or negative error 408 * 409 * s64 bpf_csum_diff(from, from_size, to, to_size, seed) 410 * calculate csum diff 411 * @from: raw from buffer 412 * @from_size: length of from buffer 413 * @to: raw to buffer 414 * @to_size: length of to buffer 415 * @seed: optional seed 416 * Return: csum result or negative error code 417 * 418 * int bpf_skb_get_tunnel_opt(skb, opt, size) 419 * retrieve tunnel options metadata 420 * @skb: pointer to skb 421 * @opt: pointer to raw tunnel option data 422 * @size: size of @opt 423 * Return: option size 424 * 425 * int bpf_skb_set_tunnel_opt(skb, opt, size) 426 * populate tunnel options metadata 427 * @skb: pointer to skb 428 * @opt: pointer to raw tunnel option data 429 * @size: size of @opt 430 * Return: 0 on success or negative error 431 * 432 * int bpf_skb_change_proto(skb, proto, flags) 433 * Change protocol of the skb. Currently supported is v4 -> v6, 434 * v6 -> v4 transitions. The helper will also resize the skb. eBPF 435 * program is expected to fill the new headers via skb_store_bytes 436 * and lX_csum_replace. 437 * @skb: pointer to skb 438 * @proto: new skb->protocol type 439 * @flags: reserved 440 * Return: 0 on success or negative error 441 * 442 * int bpf_skb_change_type(skb, type) 443 * Change packet type of skb. 444 * @skb: pointer to skb 445 * @type: new skb->pkt_type type 446 * Return: 0 on success or negative error 447 * 448 * int bpf_skb_under_cgroup(skb, map, index) 449 * Check cgroup2 membership of skb 450 * @skb: pointer to skb 451 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 452 * @index: index of the cgroup in the bpf_map 453 * Return: 454 * == 0 skb failed the cgroup2 descendant test 455 * == 1 skb succeeded the cgroup2 descendant test 456 * < 0 error 457 * 458 * u32 bpf_get_hash_recalc(skb) 459 * Retrieve and possibly recalculate skb->hash. 460 * @skb: pointer to skb 461 * Return: hash 462 * 463 * u64 bpf_get_current_task(void) 464 * Returns current task_struct 465 * Return: current 466 * 467 * int bpf_probe_write_user(void *dst, void *src, int len) 468 * safely attempt to write to a location 469 * @dst: destination address in userspace 470 * @src: source address on stack 471 * @len: number of bytes to copy 472 * Return: 0 on success or negative error 473 * 474 * int bpf_current_task_under_cgroup(map, index) 475 * Check cgroup2 membership of current task 476 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 477 * @index: index of the cgroup in the bpf_map 478 * Return: 479 * == 0 current failed the cgroup2 descendant test 480 * == 1 current succeeded the cgroup2 descendant test 481 * < 0 error 482 * 483 * int bpf_skb_change_tail(skb, len, flags) 484 * The helper will resize the skb to the given new size, to be used f.e. 485 * with control messages. 486 * @skb: pointer to skb 487 * @len: new skb length 488 * @flags: reserved 489 * Return: 0 on success or negative error 490 * 491 * int bpf_skb_pull_data(skb, len) 492 * The helper will pull in non-linear data in case the skb is non-linear 493 * and not all of len are part of the linear section. Only needed for 494 * read/write with direct packet access. 495 * @skb: pointer to skb 496 * @len: len to make read/writeable 497 * Return: 0 on success or negative error 498 * 499 * s64 bpf_csum_update(skb, csum) 500 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. 501 * @skb: pointer to skb 502 * @csum: csum to add 503 * Return: csum on success or negative error 504 * 505 * void bpf_set_hash_invalid(skb) 506 * Invalidate current skb->hash. 507 * @skb: pointer to skb 508 * 509 * int bpf_get_numa_node_id() 510 * Return: Id of current NUMA node. 511 * 512 * int bpf_skb_change_head() 513 * Grows headroom of skb and adjusts MAC header offset accordingly. 514 * Will extends/reallocae as required automatically. 515 * May change skb data pointer and will thus invalidate any check 516 * performed for direct packet access. 517 * @skb: pointer to skb 518 * @len: length of header to be pushed in front 519 * @flags: Flags (unused for now) 520 * Return: 0 on success or negative error 521 * 522 * int bpf_xdp_adjust_head(xdp_md, delta) 523 * Adjust the xdp_md.data by delta 524 * @xdp_md: pointer to xdp_md 525 * @delta: An positive/negative integer to be added to xdp_md.data 526 * Return: 0 on success or negative on error 527 * 528 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) 529 * Copy a NUL terminated string from unsafe address. In case the string 530 * length is smaller than size, the target is not padded with further NUL 531 * bytes. In case the string length is larger than size, just count-1 532 * bytes are copied and the last byte is set to NUL. 533 * @dst: destination address 534 * @size: maximum number of bytes to copy, including the trailing NUL 535 * @unsafe_ptr: unsafe address 536 * Return: 537 * > 0 length of the string including the trailing NUL on success 538 * < 0 error 539 * 540 * u64 bpf_get_socket_cookie(skb) 541 * Get the cookie for the socket stored inside sk_buff. 542 * @skb: pointer to skb 543 * Return: 8 Bytes non-decreasing number on success or 0 if the socket 544 * field is missing inside sk_buff 545 * 546 * u32 bpf_get_socket_uid(skb) 547 * Get the owner uid of the socket stored inside sk_buff. 548 * @skb: pointer to skb 549 * Return: uid of the socket owner on success or overflowuid if failed. 550 * 551 * u32 bpf_set_hash(skb, hash) 552 * Set full skb->hash. 553 * @skb: pointer to skb 554 * @hash: hash to set 555 * 556 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen) 557 * Calls setsockopt. Not all opts are available, only those with 558 * integer optvals plus TCP_CONGESTION. 559 * Supported levels: SOL_SOCKET and IPROTO_TCP 560 * @bpf_socket: pointer to bpf_socket 561 * @level: SOL_SOCKET or IPROTO_TCP 562 * @optname: option name 563 * @optval: pointer to option value 564 * @optlen: length of optval in byes 565 * Return: 0 or negative error 566 * 567 * int bpf_skb_adjust_room(skb, len_diff, mode, flags) 568 * Grow or shrink room in sk_buff. 569 * @skb: pointer to skb 570 * @len_diff: (signed) amount of room to grow/shrink 571 * @mode: operation mode (enum bpf_adj_room_mode) 572 * @flags: reserved for future use 573 * Return: 0 on success or negative error code 574 * 575 * int bpf_sk_redirect_map(map, key, flags) 576 * Redirect skb to a sock in map using key as a lookup key for the 577 * sock in map. 578 * @map: pointer to sockmap 579 * @key: key to lookup sock in map 580 * @flags: reserved for future use 581 * Return: SK_REDIRECT 582 * 583 * int bpf_sock_map_update(skops, map, key, flags, map_flags) 584 * @skops: pointer to bpf_sock_ops 585 * @map: pointer to sockmap to update 586 * @key: key to insert/update sock in map 587 * @flags: same flags as map update elem 588 * @map_flags: sock map specific flags 589 * bit 1: Enable strparser 590 * other bits: reserved 591 */ 592 #define __BPF_FUNC_MAPPER(FN) \ 593 FN(unspec), \ 594 FN(map_lookup_elem), \ 595 FN(map_update_elem), \ 596 FN(map_delete_elem), \ 597 FN(probe_read), \ 598 FN(ktime_get_ns), \ 599 FN(trace_printk), \ 600 FN(get_prandom_u32), \ 601 FN(get_smp_processor_id), \ 602 FN(skb_store_bytes), \ 603 FN(l3_csum_replace), \ 604 FN(l4_csum_replace), \ 605 FN(tail_call), \ 606 FN(clone_redirect), \ 607 FN(get_current_pid_tgid), \ 608 FN(get_current_uid_gid), \ 609 FN(get_current_comm), \ 610 FN(get_cgroup_classid), \ 611 FN(skb_vlan_push), \ 612 FN(skb_vlan_pop), \ 613 FN(skb_get_tunnel_key), \ 614 FN(skb_set_tunnel_key), \ 615 FN(perf_event_read), \ 616 FN(redirect), \ 617 FN(get_route_realm), \ 618 FN(perf_event_output), \ 619 FN(skb_load_bytes), \ 620 FN(get_stackid), \ 621 FN(csum_diff), \ 622 FN(skb_get_tunnel_opt), \ 623 FN(skb_set_tunnel_opt), \ 624 FN(skb_change_proto), \ 625 FN(skb_change_type), \ 626 FN(skb_under_cgroup), \ 627 FN(get_hash_recalc), \ 628 FN(get_current_task), \ 629 FN(probe_write_user), \ 630 FN(current_task_under_cgroup), \ 631 FN(skb_change_tail), \ 632 FN(skb_pull_data), \ 633 FN(csum_update), \ 634 FN(set_hash_invalid), \ 635 FN(get_numa_node_id), \ 636 FN(skb_change_head), \ 637 FN(xdp_adjust_head), \ 638 FN(probe_read_str), \ 639 FN(get_socket_cookie), \ 640 FN(get_socket_uid), \ 641 FN(set_hash), \ 642 FN(setsockopt), \ 643 FN(skb_adjust_room), \ 644 FN(redirect_map), \ 645 FN(sk_redirect_map), \ 646 FN(sock_map_update), \ 647 648 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 649 * function eBPF program intends to call 650 */ 651 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 652 enum bpf_func_id { 653 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 654 __BPF_FUNC_MAX_ID, 655 }; 656 #undef __BPF_ENUM_FN 657 658 /* All flags used by eBPF helper functions, placed here. */ 659 660 /* BPF_FUNC_skb_store_bytes flags. */ 661 #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) 662 #define BPF_F_INVALIDATE_HASH (1ULL << 1) 663 664 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 665 * First 4 bits are for passing the header field size. 666 */ 667 #define BPF_F_HDR_FIELD_MASK 0xfULL 668 669 /* BPF_FUNC_l4_csum_replace flags. */ 670 #define BPF_F_PSEUDO_HDR (1ULL << 4) 671 #define BPF_F_MARK_MANGLED_0 (1ULL << 5) 672 #define BPF_F_MARK_ENFORCE (1ULL << 6) 673 674 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 675 #define BPF_F_INGRESS (1ULL << 0) 676 677 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 678 #define BPF_F_TUNINFO_IPV6 (1ULL << 0) 679 680 /* BPF_FUNC_get_stackid flags. */ 681 #define BPF_F_SKIP_FIELD_MASK 0xffULL 682 #define BPF_F_USER_STACK (1ULL << 8) 683 #define BPF_F_FAST_STACK_CMP (1ULL << 9) 684 #define BPF_F_REUSE_STACKID (1ULL << 10) 685 686 /* BPF_FUNC_skb_set_tunnel_key flags. */ 687 #define BPF_F_ZERO_CSUM_TX (1ULL << 1) 688 #define BPF_F_DONT_FRAGMENT (1ULL << 2) 689 690 /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ 691 #define BPF_F_INDEX_MASK 0xffffffffULL 692 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 693 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 694 #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) 695 696 /* Mode for BPF_FUNC_skb_adjust_room helper. */ 697 enum bpf_adj_room_mode { 698 BPF_ADJ_ROOM_NET, 699 }; 700 701 /* user accessible mirror of in-kernel sk_buff. 702 * new fields can only be added to the end of this structure 703 */ 704 struct __sk_buff { 705 __u32 len; 706 __u32 pkt_type; 707 __u32 mark; 708 __u32 queue_mapping; 709 __u32 protocol; 710 __u32 vlan_present; 711 __u32 vlan_tci; 712 __u32 vlan_proto; 713 __u32 priority; 714 __u32 ingress_ifindex; 715 __u32 ifindex; 716 __u32 tc_index; 717 __u32 cb[5]; 718 __u32 hash; 719 __u32 tc_classid; 720 __u32 data; 721 __u32 data_end; 722 __u32 napi_id; 723 724 /* accessed by BPF_PROG_TYPE_sk_skb types */ 725 __u32 family; 726 __u32 remote_ip4; /* Stored in network byte order */ 727 __u32 local_ip4; /* Stored in network byte order */ 728 __u32 remote_ip6[4]; /* Stored in network byte order */ 729 __u32 local_ip6[4]; /* Stored in network byte order */ 730 __u32 remote_port; /* Stored in network byte order */ 731 __u32 local_port; /* stored in host byte order */ 732 }; 733 734 struct bpf_tunnel_key { 735 __u32 tunnel_id; 736 union { 737 __u32 remote_ipv4; 738 __u32 remote_ipv6[4]; 739 }; 740 __u8 tunnel_tos; 741 __u8 tunnel_ttl; 742 __u16 tunnel_ext; 743 __u32 tunnel_label; 744 }; 745 746 /* Generic BPF return codes which all BPF program types may support. 747 * The values are binary compatible with their TC_ACT_* counter-part to 748 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 749 * programs. 750 * 751 * XDP is handled seprately, see XDP_*. 752 */ 753 enum bpf_ret_code { 754 BPF_OK = 0, 755 /* 1 reserved */ 756 BPF_DROP = 2, 757 /* 3-6 reserved */ 758 BPF_REDIRECT = 7, 759 /* >127 are reserved for prog type specific return codes */ 760 }; 761 762 struct bpf_sock { 763 __u32 bound_dev_if; 764 __u32 family; 765 __u32 type; 766 __u32 protocol; 767 }; 768 769 #define XDP_PACKET_HEADROOM 256 770 771 /* User return codes for XDP prog type. 772 * A valid XDP program must return one of these defined values. All other 773 * return codes are reserved for future use. Unknown return codes will result 774 * in packet drop. 775 */ 776 enum xdp_action { 777 XDP_ABORTED = 0, 778 XDP_DROP, 779 XDP_PASS, 780 XDP_TX, 781 XDP_REDIRECT, 782 }; 783 784 /* user accessible metadata for XDP packet hook 785 * new fields must be added to the end of this structure 786 */ 787 struct xdp_md { 788 __u32 data; 789 __u32 data_end; 790 }; 791 792 enum sk_action { 793 SK_ABORTED = 0, 794 SK_DROP, 795 SK_REDIRECT, 796 }; 797 798 #define BPF_TAG_SIZE 8 799 800 struct bpf_prog_info { 801 __u32 type; 802 __u32 id; 803 __u8 tag[BPF_TAG_SIZE]; 804 __u32 jited_prog_len; 805 __u32 xlated_prog_len; 806 __aligned_u64 jited_prog_insns; 807 __aligned_u64 xlated_prog_insns; 808 } __attribute__((aligned(8))); 809 810 struct bpf_map_info { 811 __u32 type; 812 __u32 id; 813 __u32 key_size; 814 __u32 value_size; 815 __u32 max_entries; 816 __u32 map_flags; 817 } __attribute__((aligned(8))); 818 819 /* User bpf_sock_ops struct to access socket values and specify request ops 820 * and their replies. 821 * Some of this fields are in network (bigendian) byte order and may need 822 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 823 * New fields can only be added at the end of this structure 824 */ 825 struct bpf_sock_ops { 826 __u32 op; 827 union { 828 __u32 reply; 829 __u32 replylong[4]; 830 }; 831 __u32 family; 832 __u32 remote_ip4; /* Stored in network byte order */ 833 __u32 local_ip4; /* Stored in network byte order */ 834 __u32 remote_ip6[4]; /* Stored in network byte order */ 835 __u32 local_ip6[4]; /* Stored in network byte order */ 836 __u32 remote_port; /* Stored in network byte order */ 837 __u32 local_port; /* stored in host byte order */ 838 }; 839 840 /* List of known BPF sock_ops operators. 841 * New entries can only be added at the end 842 */ 843 enum { 844 BPF_SOCK_OPS_VOID, 845 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 846 * -1 if default value should be used 847 */ 848 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 849 * window (in packets) or -1 if default 850 * value should be used 851 */ 852 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 853 * active connection is initialized 854 */ 855 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 856 * active connection is 857 * established 858 */ 859 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 860 * passive connection is 861 * established 862 */ 863 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 864 * needs ECN 865 */ 866 }; 867 868 #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ 869 #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ 870 871 #endif /* _UAPI__LINUX_BPF_H__ */ 872