1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #ifndef _UAPI__LINUX_BPF_H__ 8 #define _UAPI__LINUX_BPF_H__ 9 10 #include <linux/types.h> 11 #include <linux/bpf_common.h> 12 13 /* Extended instruction set based on top of classic BPF */ 14 15 /* instruction classes */ 16 #define BPF_ALU64 0x07 /* alu mode in double word width */ 17 18 /* ld/ldx fields */ 19 #define BPF_DW 0x18 /* double word */ 20 #define BPF_XADD 0xc0 /* exclusive add */ 21 22 /* alu/jmp fields */ 23 #define BPF_MOV 0xb0 /* mov reg to reg */ 24 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 25 26 /* change endianness of a register */ 27 #define BPF_END 0xd0 /* flags for endianness conversion: */ 28 #define BPF_TO_LE 0x00 /* convert to little-endian */ 29 #define BPF_TO_BE 0x08 /* convert to big-endian */ 30 #define BPF_FROM_LE BPF_TO_LE 31 #define BPF_FROM_BE BPF_TO_BE 32 33 #define BPF_JNE 0x50 /* jump != */ 34 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 35 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 36 #define BPF_CALL 0x80 /* function call */ 37 #define BPF_EXIT 0x90 /* function return */ 38 39 /* Register numbers */ 40 enum { 41 BPF_REG_0 = 0, 42 BPF_REG_1, 43 BPF_REG_2, 44 BPF_REG_3, 45 BPF_REG_4, 46 BPF_REG_5, 47 BPF_REG_6, 48 BPF_REG_7, 49 BPF_REG_8, 50 BPF_REG_9, 51 BPF_REG_10, 52 __MAX_BPF_REG, 53 }; 54 55 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 56 #define MAX_BPF_REG __MAX_BPF_REG 57 58 struct bpf_insn { 59 __u8 code; /* opcode */ 60 __u8 dst_reg:4; /* dest register */ 61 __u8 src_reg:4; /* source register */ 62 __s16 off; /* signed offset */ 63 __s32 imm; /* signed immediate constant */ 64 }; 65 66 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 67 struct bpf_lpm_trie_key { 68 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 69 __u8 data[0]; /* Arbitrary size */ 70 }; 71 72 /* BPF syscall commands, see bpf(2) man-page for details. */ 73 enum bpf_cmd { 74 BPF_MAP_CREATE, 75 BPF_MAP_LOOKUP_ELEM, 76 BPF_MAP_UPDATE_ELEM, 77 BPF_MAP_DELETE_ELEM, 78 BPF_MAP_GET_NEXT_KEY, 79 BPF_PROG_LOAD, 80 BPF_OBJ_PIN, 81 BPF_OBJ_GET, 82 BPF_PROG_ATTACH, 83 BPF_PROG_DETACH, 84 BPF_PROG_TEST_RUN, 85 BPF_PROG_GET_NEXT_ID, 86 BPF_MAP_GET_NEXT_ID, 87 BPF_PROG_GET_FD_BY_ID, 88 BPF_MAP_GET_FD_BY_ID, 89 BPF_OBJ_GET_INFO_BY_FD, 90 }; 91 92 enum bpf_map_type { 93 BPF_MAP_TYPE_UNSPEC, 94 BPF_MAP_TYPE_HASH, 95 BPF_MAP_TYPE_ARRAY, 96 BPF_MAP_TYPE_PROG_ARRAY, 97 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 98 BPF_MAP_TYPE_PERCPU_HASH, 99 BPF_MAP_TYPE_PERCPU_ARRAY, 100 BPF_MAP_TYPE_STACK_TRACE, 101 BPF_MAP_TYPE_CGROUP_ARRAY, 102 BPF_MAP_TYPE_LRU_HASH, 103 BPF_MAP_TYPE_LRU_PERCPU_HASH, 104 BPF_MAP_TYPE_LPM_TRIE, 105 BPF_MAP_TYPE_ARRAY_OF_MAPS, 106 BPF_MAP_TYPE_HASH_OF_MAPS, 107 }; 108 109 enum bpf_prog_type { 110 BPF_PROG_TYPE_UNSPEC, 111 BPF_PROG_TYPE_SOCKET_FILTER, 112 BPF_PROG_TYPE_KPROBE, 113 BPF_PROG_TYPE_SCHED_CLS, 114 BPF_PROG_TYPE_SCHED_ACT, 115 BPF_PROG_TYPE_TRACEPOINT, 116 BPF_PROG_TYPE_XDP, 117 BPF_PROG_TYPE_PERF_EVENT, 118 BPF_PROG_TYPE_CGROUP_SKB, 119 BPF_PROG_TYPE_CGROUP_SOCK, 120 BPF_PROG_TYPE_LWT_IN, 121 BPF_PROG_TYPE_LWT_OUT, 122 BPF_PROG_TYPE_LWT_XMIT, 123 }; 124 125 enum bpf_attach_type { 126 BPF_CGROUP_INET_INGRESS, 127 BPF_CGROUP_INET_EGRESS, 128 BPF_CGROUP_INET_SOCK_CREATE, 129 __MAX_BPF_ATTACH_TYPE 130 }; 131 132 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 133 134 /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command 135 * to the given target_fd cgroup the descendent cgroup will be able to 136 * override effective bpf program that was inherited from this cgroup 137 */ 138 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 139 140 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 141 * verifier will perform strict alignment checking as if the kernel 142 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 143 * and NET_IP_ALIGN defined to 2. 144 */ 145 #define BPF_F_STRICT_ALIGNMENT (1U << 0) 146 147 #define BPF_PSEUDO_MAP_FD 1 148 149 /* flags for BPF_MAP_UPDATE_ELEM command */ 150 #define BPF_ANY 0 /* create new element or update existing */ 151 #define BPF_NOEXIST 1 /* create new element if it didn't exist */ 152 #define BPF_EXIST 2 /* update existing element */ 153 154 #define BPF_F_NO_PREALLOC (1U << 0) 155 /* Instead of having one common LRU list in the 156 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 157 * which can scale and perform better. 158 * Note, the LRU nodes (including free nodes) cannot be moved 159 * across different LRU lists. 160 */ 161 #define BPF_F_NO_COMMON_LRU (1U << 1) 162 163 union bpf_attr { 164 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 165 __u32 map_type; /* one of enum bpf_map_type */ 166 __u32 key_size; /* size of key in bytes */ 167 __u32 value_size; /* size of value in bytes */ 168 __u32 max_entries; /* max number of entries in a map */ 169 __u32 map_flags; /* prealloc or not */ 170 __u32 inner_map_fd; /* fd pointing to the inner map */ 171 }; 172 173 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 174 __u32 map_fd; 175 __aligned_u64 key; 176 union { 177 __aligned_u64 value; 178 __aligned_u64 next_key; 179 }; 180 __u64 flags; 181 }; 182 183 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 184 __u32 prog_type; /* one of enum bpf_prog_type */ 185 __u32 insn_cnt; 186 __aligned_u64 insns; 187 __aligned_u64 license; 188 __u32 log_level; /* verbosity level of verifier */ 189 __u32 log_size; /* size of user buffer */ 190 __aligned_u64 log_buf; /* user supplied buffer */ 191 __u32 kern_version; /* checked when prog_type=kprobe */ 192 __u32 prog_flags; 193 }; 194 195 struct { /* anonymous struct used by BPF_OBJ_* commands */ 196 __aligned_u64 pathname; 197 __u32 bpf_fd; 198 }; 199 200 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 201 __u32 target_fd; /* container object to attach to */ 202 __u32 attach_bpf_fd; /* eBPF program to attach */ 203 __u32 attach_type; 204 __u32 attach_flags; 205 }; 206 207 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 208 __u32 prog_fd; 209 __u32 retval; 210 __u32 data_size_in; 211 __u32 data_size_out; 212 __aligned_u64 data_in; 213 __aligned_u64 data_out; 214 __u32 repeat; 215 __u32 duration; 216 } test; 217 218 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 219 union { 220 __u32 start_id; 221 __u32 prog_id; 222 __u32 map_id; 223 }; 224 __u32 next_id; 225 }; 226 227 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 228 __u32 bpf_fd; 229 __u32 info_len; 230 __aligned_u64 info; 231 } info; 232 } __attribute__((aligned(8))); 233 234 /* BPF helper function descriptions: 235 * 236 * void *bpf_map_lookup_elem(&map, &key) 237 * Return: Map value or NULL 238 * 239 * int bpf_map_update_elem(&map, &key, &value, flags) 240 * Return: 0 on success or negative error 241 * 242 * int bpf_map_delete_elem(&map, &key) 243 * Return: 0 on success or negative error 244 * 245 * int bpf_probe_read(void *dst, int size, void *src) 246 * Return: 0 on success or negative error 247 * 248 * u64 bpf_ktime_get_ns(void) 249 * Return: current ktime 250 * 251 * int bpf_trace_printk(const char *fmt, int fmt_size, ...) 252 * Return: length of buffer written or negative error 253 * 254 * u32 bpf_prandom_u32(void) 255 * Return: random value 256 * 257 * u32 bpf_raw_smp_processor_id(void) 258 * Return: SMP processor ID 259 * 260 * int bpf_skb_store_bytes(skb, offset, from, len, flags) 261 * store bytes into packet 262 * @skb: pointer to skb 263 * @offset: offset within packet from skb->mac_header 264 * @from: pointer where to copy bytes from 265 * @len: number of bytes to store into packet 266 * @flags: bit 0 - if true, recompute skb->csum 267 * other bits - reserved 268 * Return: 0 on success or negative error 269 * 270 * int bpf_l3_csum_replace(skb, offset, from, to, flags) 271 * recompute IP checksum 272 * @skb: pointer to skb 273 * @offset: offset within packet where IP checksum is located 274 * @from: old value of header field 275 * @to: new value of header field 276 * @flags: bits 0-3 - size of header field 277 * other bits - reserved 278 * Return: 0 on success or negative error 279 * 280 * int bpf_l4_csum_replace(skb, offset, from, to, flags) 281 * recompute TCP/UDP checksum 282 * @skb: pointer to skb 283 * @offset: offset within packet where TCP/UDP checksum is located 284 * @from: old value of header field 285 * @to: new value of header field 286 * @flags: bits 0-3 - size of header field 287 * bit 4 - is pseudo header 288 * other bits - reserved 289 * Return: 0 on success or negative error 290 * 291 * int bpf_tail_call(ctx, prog_array_map, index) 292 * jump into another BPF program 293 * @ctx: context pointer passed to next program 294 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 295 * @index: index inside array that selects specific program to run 296 * Return: 0 on success or negative error 297 * 298 * int bpf_clone_redirect(skb, ifindex, flags) 299 * redirect to another netdev 300 * @skb: pointer to skb 301 * @ifindex: ifindex of the net device 302 * @flags: bit 0 - if set, redirect to ingress instead of egress 303 * other bits - reserved 304 * Return: 0 on success or negative error 305 * 306 * u64 bpf_get_current_pid_tgid(void) 307 * Return: current->tgid << 32 | current->pid 308 * 309 * u64 bpf_get_current_uid_gid(void) 310 * Return: current_gid << 32 | current_uid 311 * 312 * int bpf_get_current_comm(char *buf, int size_of_buf) 313 * stores current->comm into buf 314 * Return: 0 on success or negative error 315 * 316 * u32 bpf_get_cgroup_classid(skb) 317 * retrieve a proc's classid 318 * @skb: pointer to skb 319 * Return: classid if != 0 320 * 321 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) 322 * Return: 0 on success or negative error 323 * 324 * int bpf_skb_vlan_pop(skb) 325 * Return: 0 on success or negative error 326 * 327 * int bpf_skb_get_tunnel_key(skb, key, size, flags) 328 * int bpf_skb_set_tunnel_key(skb, key, size, flags) 329 * retrieve or populate tunnel metadata 330 * @skb: pointer to skb 331 * @key: pointer to 'struct bpf_tunnel_key' 332 * @size: size of 'struct bpf_tunnel_key' 333 * @flags: room for future extensions 334 * Return: 0 on success or negative error 335 * 336 * u64 bpf_perf_event_read(map, flags) 337 * read perf event counter value 338 * @map: pointer to perf_event_array map 339 * @flags: index of event in the map or bitmask flags 340 * Return: value of perf event counter read or error code 341 * 342 * int bpf_redirect(ifindex, flags) 343 * redirect to another netdev 344 * @ifindex: ifindex of the net device 345 * @flags: bit 0 - if set, redirect to ingress instead of egress 346 * other bits - reserved 347 * Return: TC_ACT_REDIRECT 348 * 349 * u32 bpf_get_route_realm(skb) 350 * retrieve a dst's tclassid 351 * @skb: pointer to skb 352 * Return: realm if != 0 353 * 354 * int bpf_perf_event_output(ctx, map, flags, data, size) 355 * output perf raw sample 356 * @ctx: struct pt_regs* 357 * @map: pointer to perf_event_array map 358 * @flags: index of event in the map or bitmask flags 359 * @data: data on stack to be output as raw data 360 * @size: size of data 361 * Return: 0 on success or negative error 362 * 363 * int bpf_get_stackid(ctx, map, flags) 364 * walk user or kernel stack and return id 365 * @ctx: struct pt_regs* 366 * @map: pointer to stack_trace map 367 * @flags: bits 0-7 - numer of stack frames to skip 368 * bit 8 - collect user stack instead of kernel 369 * bit 9 - compare stacks by hash only 370 * bit 10 - if two different stacks hash into the same stackid 371 * discard old 372 * other bits - reserved 373 * Return: >= 0 stackid on success or negative error 374 * 375 * s64 bpf_csum_diff(from, from_size, to, to_size, seed) 376 * calculate csum diff 377 * @from: raw from buffer 378 * @from_size: length of from buffer 379 * @to: raw to buffer 380 * @to_size: length of to buffer 381 * @seed: optional seed 382 * Return: csum result or negative error code 383 * 384 * int bpf_skb_get_tunnel_opt(skb, opt, size) 385 * retrieve tunnel options metadata 386 * @skb: pointer to skb 387 * @opt: pointer to raw tunnel option data 388 * @size: size of @opt 389 * Return: option size 390 * 391 * int bpf_skb_set_tunnel_opt(skb, opt, size) 392 * populate tunnel options metadata 393 * @skb: pointer to skb 394 * @opt: pointer to raw tunnel option data 395 * @size: size of @opt 396 * Return: 0 on success or negative error 397 * 398 * int bpf_skb_change_proto(skb, proto, flags) 399 * Change protocol of the skb. Currently supported is v4 -> v6, 400 * v6 -> v4 transitions. The helper will also resize the skb. eBPF 401 * program is expected to fill the new headers via skb_store_bytes 402 * and lX_csum_replace. 403 * @skb: pointer to skb 404 * @proto: new skb->protocol type 405 * @flags: reserved 406 * Return: 0 on success or negative error 407 * 408 * int bpf_skb_change_type(skb, type) 409 * Change packet type of skb. 410 * @skb: pointer to skb 411 * @type: new skb->pkt_type type 412 * Return: 0 on success or negative error 413 * 414 * int bpf_skb_under_cgroup(skb, map, index) 415 * Check cgroup2 membership of skb 416 * @skb: pointer to skb 417 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 418 * @index: index of the cgroup in the bpf_map 419 * Return: 420 * == 0 skb failed the cgroup2 descendant test 421 * == 1 skb succeeded the cgroup2 descendant test 422 * < 0 error 423 * 424 * u32 bpf_get_hash_recalc(skb) 425 * Retrieve and possibly recalculate skb->hash. 426 * @skb: pointer to skb 427 * Return: hash 428 * 429 * u64 bpf_get_current_task(void) 430 * Returns current task_struct 431 * Return: current 432 * 433 * int bpf_probe_write_user(void *dst, void *src, int len) 434 * safely attempt to write to a location 435 * @dst: destination address in userspace 436 * @src: source address on stack 437 * @len: number of bytes to copy 438 * Return: 0 on success or negative error 439 * 440 * int bpf_current_task_under_cgroup(map, index) 441 * Check cgroup2 membership of current task 442 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 443 * @index: index of the cgroup in the bpf_map 444 * Return: 445 * == 0 current failed the cgroup2 descendant test 446 * == 1 current succeeded the cgroup2 descendant test 447 * < 0 error 448 * 449 * int bpf_skb_change_tail(skb, len, flags) 450 * The helper will resize the skb to the given new size, to be used f.e. 451 * with control messages. 452 * @skb: pointer to skb 453 * @len: new skb length 454 * @flags: reserved 455 * Return: 0 on success or negative error 456 * 457 * int bpf_skb_pull_data(skb, len) 458 * The helper will pull in non-linear data in case the skb is non-linear 459 * and not all of len are part of the linear section. Only needed for 460 * read/write with direct packet access. 461 * @skb: pointer to skb 462 * @len: len to make read/writeable 463 * Return: 0 on success or negative error 464 * 465 * s64 bpf_csum_update(skb, csum) 466 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. 467 * @skb: pointer to skb 468 * @csum: csum to add 469 * Return: csum on success or negative error 470 * 471 * void bpf_set_hash_invalid(skb) 472 * Invalidate current skb->hash. 473 * @skb: pointer to skb 474 * 475 * int bpf_get_numa_node_id() 476 * Return: Id of current NUMA node. 477 * 478 * int bpf_skb_change_head() 479 * Grows headroom of skb and adjusts MAC header offset accordingly. 480 * Will extends/reallocae as required automatically. 481 * May change skb data pointer and will thus invalidate any check 482 * performed for direct packet access. 483 * @skb: pointer to skb 484 * @len: length of header to be pushed in front 485 * @flags: Flags (unused for now) 486 * Return: 0 on success or negative error 487 * 488 * int bpf_xdp_adjust_head(xdp_md, delta) 489 * Adjust the xdp_md.data by delta 490 * @xdp_md: pointer to xdp_md 491 * @delta: An positive/negative integer to be added to xdp_md.data 492 * Return: 0 on success or negative on error 493 * 494 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) 495 * Copy a NUL terminated string from unsafe address. In case the string 496 * length is smaller than size, the target is not padded with further NUL 497 * bytes. In case the string length is larger than size, just count-1 498 * bytes are copied and the last byte is set to NUL. 499 * @dst: destination address 500 * @size: maximum number of bytes to copy, including the trailing NUL 501 * @unsafe_ptr: unsafe address 502 * Return: 503 * > 0 length of the string including the trailing NUL on success 504 * < 0 error 505 * 506 * u64 bpf_get_socket_cookie(skb) 507 * Get the cookie for the socket stored inside sk_buff. 508 * @skb: pointer to skb 509 * Return: 8 Bytes non-decreasing number on success or 0 if the socket 510 * field is missing inside sk_buff 511 * 512 * u32 bpf_get_socket_uid(skb) 513 * Get the owner uid of the socket stored inside sk_buff. 514 * @skb: pointer to skb 515 * Return: uid of the socket owner on success or overflowuid if failed. 516 * 517 * u32 bpf_set_hash(skb, hash) 518 * Set full skb->hash. 519 * @skb: pointer to skb 520 * @hash: hash to set 521 */ 522 #define __BPF_FUNC_MAPPER(FN) \ 523 FN(unspec), \ 524 FN(map_lookup_elem), \ 525 FN(map_update_elem), \ 526 FN(map_delete_elem), \ 527 FN(probe_read), \ 528 FN(ktime_get_ns), \ 529 FN(trace_printk), \ 530 FN(get_prandom_u32), \ 531 FN(get_smp_processor_id), \ 532 FN(skb_store_bytes), \ 533 FN(l3_csum_replace), \ 534 FN(l4_csum_replace), \ 535 FN(tail_call), \ 536 FN(clone_redirect), \ 537 FN(get_current_pid_tgid), \ 538 FN(get_current_uid_gid), \ 539 FN(get_current_comm), \ 540 FN(get_cgroup_classid), \ 541 FN(skb_vlan_push), \ 542 FN(skb_vlan_pop), \ 543 FN(skb_get_tunnel_key), \ 544 FN(skb_set_tunnel_key), \ 545 FN(perf_event_read), \ 546 FN(redirect), \ 547 FN(get_route_realm), \ 548 FN(perf_event_output), \ 549 FN(skb_load_bytes), \ 550 FN(get_stackid), \ 551 FN(csum_diff), \ 552 FN(skb_get_tunnel_opt), \ 553 FN(skb_set_tunnel_opt), \ 554 FN(skb_change_proto), \ 555 FN(skb_change_type), \ 556 FN(skb_under_cgroup), \ 557 FN(get_hash_recalc), \ 558 FN(get_current_task), \ 559 FN(probe_write_user), \ 560 FN(current_task_under_cgroup), \ 561 FN(skb_change_tail), \ 562 FN(skb_pull_data), \ 563 FN(csum_update), \ 564 FN(set_hash_invalid), \ 565 FN(get_numa_node_id), \ 566 FN(skb_change_head), \ 567 FN(xdp_adjust_head), \ 568 FN(probe_read_str), \ 569 FN(get_socket_cookie), \ 570 FN(get_socket_uid), \ 571 FN(set_hash), 572 573 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 574 * function eBPF program intends to call 575 */ 576 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 577 enum bpf_func_id { 578 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 579 __BPF_FUNC_MAX_ID, 580 }; 581 #undef __BPF_ENUM_FN 582 583 /* All flags used by eBPF helper functions, placed here. */ 584 585 /* BPF_FUNC_skb_store_bytes flags. */ 586 #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) 587 #define BPF_F_INVALIDATE_HASH (1ULL << 1) 588 589 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 590 * First 4 bits are for passing the header field size. 591 */ 592 #define BPF_F_HDR_FIELD_MASK 0xfULL 593 594 /* BPF_FUNC_l4_csum_replace flags. */ 595 #define BPF_F_PSEUDO_HDR (1ULL << 4) 596 #define BPF_F_MARK_MANGLED_0 (1ULL << 5) 597 #define BPF_F_MARK_ENFORCE (1ULL << 6) 598 599 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 600 #define BPF_F_INGRESS (1ULL << 0) 601 602 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 603 #define BPF_F_TUNINFO_IPV6 (1ULL << 0) 604 605 /* BPF_FUNC_get_stackid flags. */ 606 #define BPF_F_SKIP_FIELD_MASK 0xffULL 607 #define BPF_F_USER_STACK (1ULL << 8) 608 #define BPF_F_FAST_STACK_CMP (1ULL << 9) 609 #define BPF_F_REUSE_STACKID (1ULL << 10) 610 611 /* BPF_FUNC_skb_set_tunnel_key flags. */ 612 #define BPF_F_ZERO_CSUM_TX (1ULL << 1) 613 #define BPF_F_DONT_FRAGMENT (1ULL << 2) 614 615 /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ 616 #define BPF_F_INDEX_MASK 0xffffffffULL 617 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 618 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 619 #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) 620 621 /* user accessible mirror of in-kernel sk_buff. 622 * new fields can only be added to the end of this structure 623 */ 624 struct __sk_buff { 625 __u32 len; 626 __u32 pkt_type; 627 __u32 mark; 628 __u32 queue_mapping; 629 __u32 protocol; 630 __u32 vlan_present; 631 __u32 vlan_tci; 632 __u32 vlan_proto; 633 __u32 priority; 634 __u32 ingress_ifindex; 635 __u32 ifindex; 636 __u32 tc_index; 637 __u32 cb[5]; 638 __u32 hash; 639 __u32 tc_classid; 640 __u32 data; 641 __u32 data_end; 642 __u32 napi_id; 643 }; 644 645 struct bpf_tunnel_key { 646 __u32 tunnel_id; 647 union { 648 __u32 remote_ipv4; 649 __u32 remote_ipv6[4]; 650 }; 651 __u8 tunnel_tos; 652 __u8 tunnel_ttl; 653 __u16 tunnel_ext; 654 __u32 tunnel_label; 655 }; 656 657 /* Generic BPF return codes which all BPF program types may support. 658 * The values are binary compatible with their TC_ACT_* counter-part to 659 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 660 * programs. 661 * 662 * XDP is handled seprately, see XDP_*. 663 */ 664 enum bpf_ret_code { 665 BPF_OK = 0, 666 /* 1 reserved */ 667 BPF_DROP = 2, 668 /* 3-6 reserved */ 669 BPF_REDIRECT = 7, 670 /* >127 are reserved for prog type specific return codes */ 671 }; 672 673 struct bpf_sock { 674 __u32 bound_dev_if; 675 __u32 family; 676 __u32 type; 677 __u32 protocol; 678 }; 679 680 #define XDP_PACKET_HEADROOM 256 681 682 /* User return codes for XDP prog type. 683 * A valid XDP program must return one of these defined values. All other 684 * return codes are reserved for future use. Unknown return codes will result 685 * in packet drop. 686 */ 687 enum xdp_action { 688 XDP_ABORTED = 0, 689 XDP_DROP, 690 XDP_PASS, 691 XDP_TX, 692 }; 693 694 /* user accessible metadata for XDP packet hook 695 * new fields must be added to the end of this structure 696 */ 697 struct xdp_md { 698 __u32 data; 699 __u32 data_end; 700 }; 701 702 #define BPF_TAG_SIZE 8 703 704 struct bpf_prog_info { 705 __u32 type; 706 __u32 id; 707 __u8 tag[BPF_TAG_SIZE]; 708 __u32 jited_prog_len; 709 __u32 xlated_prog_len; 710 __aligned_u64 jited_prog_insns; 711 __aligned_u64 xlated_prog_insns; 712 } __attribute__((aligned(8))); 713 714 struct bpf_map_info { 715 __u32 type; 716 __u32 id; 717 __u32 key_size; 718 __u32 value_size; 719 __u32 max_entries; 720 __u32 map_flags; 721 } __attribute__((aligned(8))); 722 723 #endif /* _UAPI__LINUX_BPF_H__ */ 724