1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #ifndef _UAPI__LINUX_BPF_H__ 8 #define _UAPI__LINUX_BPF_H__ 9 10 #include <linux/types.h> 11 #include <linux/bpf_common.h> 12 13 /* Extended instruction set based on top of classic BPF */ 14 15 /* instruction classes */ 16 #define BPF_ALU64 0x07 /* alu mode in double word width */ 17 18 /* ld/ldx fields */ 19 #define BPF_DW 0x18 /* double word */ 20 #define BPF_XADD 0xc0 /* exclusive add */ 21 22 /* alu/jmp fields */ 23 #define BPF_MOV 0xb0 /* mov reg to reg */ 24 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 25 26 /* change endianness of a register */ 27 #define BPF_END 0xd0 /* flags for endianness conversion: */ 28 #define BPF_TO_LE 0x00 /* convert to little-endian */ 29 #define BPF_TO_BE 0x08 /* convert to big-endian */ 30 #define BPF_FROM_LE BPF_TO_LE 31 #define BPF_FROM_BE BPF_TO_BE 32 33 #define BPF_JNE 0x50 /* jump != */ 34 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 35 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 36 #define BPF_CALL 0x80 /* function call */ 37 #define BPF_EXIT 0x90 /* function return */ 38 39 /* Register numbers */ 40 enum { 41 BPF_REG_0 = 0, 42 BPF_REG_1, 43 BPF_REG_2, 44 BPF_REG_3, 45 BPF_REG_4, 46 BPF_REG_5, 47 BPF_REG_6, 48 BPF_REG_7, 49 BPF_REG_8, 50 BPF_REG_9, 51 BPF_REG_10, 52 __MAX_BPF_REG, 53 }; 54 55 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 56 #define MAX_BPF_REG __MAX_BPF_REG 57 58 struct bpf_insn { 59 __u8 code; /* opcode */ 60 __u8 dst_reg:4; /* dest register */ 61 __u8 src_reg:4; /* source register */ 62 __s16 off; /* signed offset */ 63 __s32 imm; /* signed immediate constant */ 64 }; 65 66 /* BPF syscall commands, see bpf(2) man-page for details. */ 67 enum bpf_cmd { 68 BPF_MAP_CREATE, 69 BPF_MAP_LOOKUP_ELEM, 70 BPF_MAP_UPDATE_ELEM, 71 BPF_MAP_DELETE_ELEM, 72 BPF_MAP_GET_NEXT_KEY, 73 BPF_PROG_LOAD, 74 BPF_OBJ_PIN, 75 BPF_OBJ_GET, 76 BPF_PROG_ATTACH, 77 BPF_PROG_DETACH, 78 }; 79 80 enum bpf_map_type { 81 BPF_MAP_TYPE_UNSPEC, 82 BPF_MAP_TYPE_HASH, 83 BPF_MAP_TYPE_ARRAY, 84 BPF_MAP_TYPE_PROG_ARRAY, 85 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 86 BPF_MAP_TYPE_PERCPU_HASH, 87 BPF_MAP_TYPE_PERCPU_ARRAY, 88 BPF_MAP_TYPE_STACK_TRACE, 89 BPF_MAP_TYPE_CGROUP_ARRAY, 90 BPF_MAP_TYPE_LRU_HASH, 91 BPF_MAP_TYPE_LRU_PERCPU_HASH, 92 }; 93 94 enum bpf_prog_type { 95 BPF_PROG_TYPE_UNSPEC, 96 BPF_PROG_TYPE_SOCKET_FILTER, 97 BPF_PROG_TYPE_KPROBE, 98 BPF_PROG_TYPE_SCHED_CLS, 99 BPF_PROG_TYPE_SCHED_ACT, 100 BPF_PROG_TYPE_TRACEPOINT, 101 BPF_PROG_TYPE_XDP, 102 BPF_PROG_TYPE_PERF_EVENT, 103 BPF_PROG_TYPE_CGROUP_SKB, 104 BPF_PROG_TYPE_CGROUP_SOCK, 105 BPF_PROG_TYPE_LWT_IN, 106 BPF_PROG_TYPE_LWT_OUT, 107 BPF_PROG_TYPE_LWT_XMIT, 108 }; 109 110 enum bpf_attach_type { 111 BPF_CGROUP_INET_INGRESS, 112 BPF_CGROUP_INET_EGRESS, 113 BPF_CGROUP_INET_SOCK_CREATE, 114 __MAX_BPF_ATTACH_TYPE 115 }; 116 117 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 118 119 #define BPF_PSEUDO_MAP_FD 1 120 121 /* flags for BPF_MAP_UPDATE_ELEM command */ 122 #define BPF_ANY 0 /* create new element or update existing */ 123 #define BPF_NOEXIST 1 /* create new element if it didn't exist */ 124 #define BPF_EXIST 2 /* update existing element */ 125 126 #define BPF_F_NO_PREALLOC (1U << 0) 127 /* Instead of having one common LRU list in the 128 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 129 * which can scale and perform better. 130 * Note, the LRU nodes (including free nodes) cannot be moved 131 * across different LRU lists. 132 */ 133 #define BPF_F_NO_COMMON_LRU (1U << 1) 134 135 union bpf_attr { 136 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 137 __u32 map_type; /* one of enum bpf_map_type */ 138 __u32 key_size; /* size of key in bytes */ 139 __u32 value_size; /* size of value in bytes */ 140 __u32 max_entries; /* max number of entries in a map */ 141 __u32 map_flags; /* prealloc or not */ 142 }; 143 144 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 145 __u32 map_fd; 146 __aligned_u64 key; 147 union { 148 __aligned_u64 value; 149 __aligned_u64 next_key; 150 }; 151 __u64 flags; 152 }; 153 154 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 155 __u32 prog_type; /* one of enum bpf_prog_type */ 156 __u32 insn_cnt; 157 __aligned_u64 insns; 158 __aligned_u64 license; 159 __u32 log_level; /* verbosity level of verifier */ 160 __u32 log_size; /* size of user buffer */ 161 __aligned_u64 log_buf; /* user supplied buffer */ 162 __u32 kern_version; /* checked when prog_type=kprobe */ 163 }; 164 165 struct { /* anonymous struct used by BPF_OBJ_* commands */ 166 __aligned_u64 pathname; 167 __u32 bpf_fd; 168 }; 169 170 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 171 __u32 target_fd; /* container object to attach to */ 172 __u32 attach_bpf_fd; /* eBPF program to attach */ 173 __u32 attach_type; 174 }; 175 } __attribute__((aligned(8))); 176 177 /* BPF helper function descriptions: 178 * 179 * void *bpf_map_lookup_elem(&map, &key) 180 * Return: Map value or NULL 181 * 182 * int bpf_map_update_elem(&map, &key, &value, flags) 183 * Return: 0 on success or negative error 184 * 185 * int bpf_map_delete_elem(&map, &key) 186 * Return: 0 on success or negative error 187 * 188 * int bpf_probe_read(void *dst, int size, void *src) 189 * Return: 0 on success or negative error 190 * 191 * u64 bpf_ktime_get_ns(void) 192 * Return: current ktime 193 * 194 * int bpf_trace_printk(const char *fmt, int fmt_size, ...) 195 * Return: length of buffer written or negative error 196 * 197 * u32 bpf_prandom_u32(void) 198 * Return: random value 199 * 200 * u32 bpf_raw_smp_processor_id(void) 201 * Return: SMP processor ID 202 * 203 * int bpf_skb_store_bytes(skb, offset, from, len, flags) 204 * store bytes into packet 205 * @skb: pointer to skb 206 * @offset: offset within packet from skb->mac_header 207 * @from: pointer where to copy bytes from 208 * @len: number of bytes to store into packet 209 * @flags: bit 0 - if true, recompute skb->csum 210 * other bits - reserved 211 * Return: 0 on success or negative error 212 * 213 * int bpf_l3_csum_replace(skb, offset, from, to, flags) 214 * recompute IP checksum 215 * @skb: pointer to skb 216 * @offset: offset within packet where IP checksum is located 217 * @from: old value of header field 218 * @to: new value of header field 219 * @flags: bits 0-3 - size of header field 220 * other bits - reserved 221 * Return: 0 on success or negative error 222 * 223 * int bpf_l4_csum_replace(skb, offset, from, to, flags) 224 * recompute TCP/UDP checksum 225 * @skb: pointer to skb 226 * @offset: offset within packet where TCP/UDP checksum is located 227 * @from: old value of header field 228 * @to: new value of header field 229 * @flags: bits 0-3 - size of header field 230 * bit 4 - is pseudo header 231 * other bits - reserved 232 * Return: 0 on success or negative error 233 * 234 * int bpf_tail_call(ctx, prog_array_map, index) 235 * jump into another BPF program 236 * @ctx: context pointer passed to next program 237 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 238 * @index: index inside array that selects specific program to run 239 * Return: 0 on success or negative error 240 * 241 * int bpf_clone_redirect(skb, ifindex, flags) 242 * redirect to another netdev 243 * @skb: pointer to skb 244 * @ifindex: ifindex of the net device 245 * @flags: bit 0 - if set, redirect to ingress instead of egress 246 * other bits - reserved 247 * Return: 0 on success or negative error 248 * 249 * u64 bpf_get_current_pid_tgid(void) 250 * Return: current->tgid << 32 | current->pid 251 * 252 * u64 bpf_get_current_uid_gid(void) 253 * Return: current_gid << 32 | current_uid 254 * 255 * int bpf_get_current_comm(char *buf, int size_of_buf) 256 * stores current->comm into buf 257 * Return: 0 on success or negative error 258 * 259 * u32 bpf_get_cgroup_classid(skb) 260 * retrieve a proc's classid 261 * @skb: pointer to skb 262 * Return: classid if != 0 263 * 264 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) 265 * Return: 0 on success or negative error 266 * 267 * int bpf_skb_vlan_pop(skb) 268 * Return: 0 on success or negative error 269 * 270 * int bpf_skb_get_tunnel_key(skb, key, size, flags) 271 * int bpf_skb_set_tunnel_key(skb, key, size, flags) 272 * retrieve or populate tunnel metadata 273 * @skb: pointer to skb 274 * @key: pointer to 'struct bpf_tunnel_key' 275 * @size: size of 'struct bpf_tunnel_key' 276 * @flags: room for future extensions 277 * Return: 0 on success or negative error 278 * 279 * u64 bpf_perf_event_read(&map, index) 280 * Return: Number events read or error code 281 * 282 * int bpf_redirect(ifindex, flags) 283 * redirect to another netdev 284 * @ifindex: ifindex of the net device 285 * @flags: bit 0 - if set, redirect to ingress instead of egress 286 * other bits - reserved 287 * Return: TC_ACT_REDIRECT 288 * 289 * u32 bpf_get_route_realm(skb) 290 * retrieve a dst's tclassid 291 * @skb: pointer to skb 292 * Return: realm if != 0 293 * 294 * int bpf_perf_event_output(ctx, map, index, data, size) 295 * output perf raw sample 296 * @ctx: struct pt_regs* 297 * @map: pointer to perf_event_array map 298 * @index: index of event in the map 299 * @data: data on stack to be output as raw data 300 * @size: size of data 301 * Return: 0 on success or negative error 302 * 303 * int bpf_get_stackid(ctx, map, flags) 304 * walk user or kernel stack and return id 305 * @ctx: struct pt_regs* 306 * @map: pointer to stack_trace map 307 * @flags: bits 0-7 - numer of stack frames to skip 308 * bit 8 - collect user stack instead of kernel 309 * bit 9 - compare stacks by hash only 310 * bit 10 - if two different stacks hash into the same stackid 311 * discard old 312 * other bits - reserved 313 * Return: >= 0 stackid on success or negative error 314 * 315 * s64 bpf_csum_diff(from, from_size, to, to_size, seed) 316 * calculate csum diff 317 * @from: raw from buffer 318 * @from_size: length of from buffer 319 * @to: raw to buffer 320 * @to_size: length of to buffer 321 * @seed: optional seed 322 * Return: csum result or negative error code 323 * 324 * int bpf_skb_get_tunnel_opt(skb, opt, size) 325 * retrieve tunnel options metadata 326 * @skb: pointer to skb 327 * @opt: pointer to raw tunnel option data 328 * @size: size of @opt 329 * Return: option size 330 * 331 * int bpf_skb_set_tunnel_opt(skb, opt, size) 332 * populate tunnel options metadata 333 * @skb: pointer to skb 334 * @opt: pointer to raw tunnel option data 335 * @size: size of @opt 336 * Return: 0 on success or negative error 337 * 338 * int bpf_skb_change_proto(skb, proto, flags) 339 * Change protocol of the skb. Currently supported is v4 -> v6, 340 * v6 -> v4 transitions. The helper will also resize the skb. eBPF 341 * program is expected to fill the new headers via skb_store_bytes 342 * and lX_csum_replace. 343 * @skb: pointer to skb 344 * @proto: new skb->protocol type 345 * @flags: reserved 346 * Return: 0 on success or negative error 347 * 348 * int bpf_skb_change_type(skb, type) 349 * Change packet type of skb. 350 * @skb: pointer to skb 351 * @type: new skb->pkt_type type 352 * Return: 0 on success or negative error 353 * 354 * int bpf_skb_under_cgroup(skb, map, index) 355 * Check cgroup2 membership of skb 356 * @skb: pointer to skb 357 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 358 * @index: index of the cgroup in the bpf_map 359 * Return: 360 * == 0 skb failed the cgroup2 descendant test 361 * == 1 skb succeeded the cgroup2 descendant test 362 * < 0 error 363 * 364 * u32 bpf_get_hash_recalc(skb) 365 * Retrieve and possibly recalculate skb->hash. 366 * @skb: pointer to skb 367 * Return: hash 368 * 369 * u64 bpf_get_current_task(void) 370 * Returns current task_struct 371 * Return: current 372 * 373 * int bpf_probe_write_user(void *dst, void *src, int len) 374 * safely attempt to write to a location 375 * @dst: destination address in userspace 376 * @src: source address on stack 377 * @len: number of bytes to copy 378 * Return: 0 on success or negative error 379 * 380 * int bpf_current_task_under_cgroup(map, index) 381 * Check cgroup2 membership of current task 382 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 383 * @index: index of the cgroup in the bpf_map 384 * Return: 385 * == 0 current failed the cgroup2 descendant test 386 * == 1 current succeeded the cgroup2 descendant test 387 * < 0 error 388 * 389 * int bpf_skb_change_tail(skb, len, flags) 390 * The helper will resize the skb to the given new size, to be used f.e. 391 * with control messages. 392 * @skb: pointer to skb 393 * @len: new skb length 394 * @flags: reserved 395 * Return: 0 on success or negative error 396 * 397 * int bpf_skb_pull_data(skb, len) 398 * The helper will pull in non-linear data in case the skb is non-linear 399 * and not all of len are part of the linear section. Only needed for 400 * read/write with direct packet access. 401 * @skb: pointer to skb 402 * @len: len to make read/writeable 403 * Return: 0 on success or negative error 404 * 405 * s64 bpf_csum_update(skb, csum) 406 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. 407 * @skb: pointer to skb 408 * @csum: csum to add 409 * Return: csum on success or negative error 410 * 411 * void bpf_set_hash_invalid(skb) 412 * Invalidate current skb->hash. 413 * @skb: pointer to skb 414 * 415 * int bpf_get_numa_node_id() 416 * Return: Id of current NUMA node. 417 * 418 * int bpf_skb_change_head() 419 * Grows headroom of skb and adjusts MAC header offset accordingly. 420 * Will extends/reallocae as required automatically. 421 * May change skb data pointer and will thus invalidate any check 422 * performed for direct packet access. 423 * @skb: pointer to skb 424 * @len: length of header to be pushed in front 425 * @flags: Flags (unused for now) 426 * Return: 0 on success or negative error 427 * 428 * int bpf_xdp_adjust_head(xdp_md, delta) 429 * Adjust the xdp_md.data by delta 430 * @xdp_md: pointer to xdp_md 431 * @delta: An positive/negative integer to be added to xdp_md.data 432 * Return: 0 on success or negative on error 433 */ 434 #define __BPF_FUNC_MAPPER(FN) \ 435 FN(unspec), \ 436 FN(map_lookup_elem), \ 437 FN(map_update_elem), \ 438 FN(map_delete_elem), \ 439 FN(probe_read), \ 440 FN(ktime_get_ns), \ 441 FN(trace_printk), \ 442 FN(get_prandom_u32), \ 443 FN(get_smp_processor_id), \ 444 FN(skb_store_bytes), \ 445 FN(l3_csum_replace), \ 446 FN(l4_csum_replace), \ 447 FN(tail_call), \ 448 FN(clone_redirect), \ 449 FN(get_current_pid_tgid), \ 450 FN(get_current_uid_gid), \ 451 FN(get_current_comm), \ 452 FN(get_cgroup_classid), \ 453 FN(skb_vlan_push), \ 454 FN(skb_vlan_pop), \ 455 FN(skb_get_tunnel_key), \ 456 FN(skb_set_tunnel_key), \ 457 FN(perf_event_read), \ 458 FN(redirect), \ 459 FN(get_route_realm), \ 460 FN(perf_event_output), \ 461 FN(skb_load_bytes), \ 462 FN(get_stackid), \ 463 FN(csum_diff), \ 464 FN(skb_get_tunnel_opt), \ 465 FN(skb_set_tunnel_opt), \ 466 FN(skb_change_proto), \ 467 FN(skb_change_type), \ 468 FN(skb_under_cgroup), \ 469 FN(get_hash_recalc), \ 470 FN(get_current_task), \ 471 FN(probe_write_user), \ 472 FN(current_task_under_cgroup), \ 473 FN(skb_change_tail), \ 474 FN(skb_pull_data), \ 475 FN(csum_update), \ 476 FN(set_hash_invalid), \ 477 FN(get_numa_node_id), \ 478 FN(skb_change_head), \ 479 FN(xdp_adjust_head), 480 481 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 482 * function eBPF program intends to call 483 */ 484 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 485 enum bpf_func_id { 486 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 487 __BPF_FUNC_MAX_ID, 488 }; 489 #undef __BPF_ENUM_FN 490 491 /* All flags used by eBPF helper functions, placed here. */ 492 493 /* BPF_FUNC_skb_store_bytes flags. */ 494 #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) 495 #define BPF_F_INVALIDATE_HASH (1ULL << 1) 496 497 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 498 * First 4 bits are for passing the header field size. 499 */ 500 #define BPF_F_HDR_FIELD_MASK 0xfULL 501 502 /* BPF_FUNC_l4_csum_replace flags. */ 503 #define BPF_F_PSEUDO_HDR (1ULL << 4) 504 #define BPF_F_MARK_MANGLED_0 (1ULL << 5) 505 506 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 507 #define BPF_F_INGRESS (1ULL << 0) 508 509 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 510 #define BPF_F_TUNINFO_IPV6 (1ULL << 0) 511 512 /* BPF_FUNC_get_stackid flags. */ 513 #define BPF_F_SKIP_FIELD_MASK 0xffULL 514 #define BPF_F_USER_STACK (1ULL << 8) 515 #define BPF_F_FAST_STACK_CMP (1ULL << 9) 516 #define BPF_F_REUSE_STACKID (1ULL << 10) 517 518 /* BPF_FUNC_skb_set_tunnel_key flags. */ 519 #define BPF_F_ZERO_CSUM_TX (1ULL << 1) 520 #define BPF_F_DONT_FRAGMENT (1ULL << 2) 521 522 /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ 523 #define BPF_F_INDEX_MASK 0xffffffffULL 524 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 525 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 526 #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) 527 528 /* user accessible mirror of in-kernel sk_buff. 529 * new fields can only be added to the end of this structure 530 */ 531 struct __sk_buff { 532 __u32 len; 533 __u32 pkt_type; 534 __u32 mark; 535 __u32 queue_mapping; 536 __u32 protocol; 537 __u32 vlan_present; 538 __u32 vlan_tci; 539 __u32 vlan_proto; 540 __u32 priority; 541 __u32 ingress_ifindex; 542 __u32 ifindex; 543 __u32 tc_index; 544 __u32 cb[5]; 545 __u32 hash; 546 __u32 tc_classid; 547 __u32 data; 548 __u32 data_end; 549 }; 550 551 struct bpf_tunnel_key { 552 __u32 tunnel_id; 553 union { 554 __u32 remote_ipv4; 555 __u32 remote_ipv6[4]; 556 }; 557 __u8 tunnel_tos; 558 __u8 tunnel_ttl; 559 __u16 tunnel_ext; 560 __u32 tunnel_label; 561 }; 562 563 /* Generic BPF return codes which all BPF program types may support. 564 * The values are binary compatible with their TC_ACT_* counter-part to 565 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 566 * programs. 567 * 568 * XDP is handled seprately, see XDP_*. 569 */ 570 enum bpf_ret_code { 571 BPF_OK = 0, 572 /* 1 reserved */ 573 BPF_DROP = 2, 574 /* 3-6 reserved */ 575 BPF_REDIRECT = 7, 576 /* >127 are reserved for prog type specific return codes */ 577 }; 578 579 struct bpf_sock { 580 __u32 bound_dev_if; 581 __u32 family; 582 __u32 type; 583 __u32 protocol; 584 }; 585 586 #define XDP_PACKET_HEADROOM 256 587 588 /* User return codes for XDP prog type. 589 * A valid XDP program must return one of these defined values. All other 590 * return codes are reserved for future use. Unknown return codes will result 591 * in packet drop. 592 */ 593 enum xdp_action { 594 XDP_ABORTED = 0, 595 XDP_DROP, 596 XDP_PASS, 597 XDP_TX, 598 }; 599 600 /* user accessible metadata for XDP packet hook 601 * new fields must be added to the end of this structure 602 */ 603 struct xdp_md { 604 __u32 data; 605 __u32 data_end; 606 }; 607 608 #endif /* _UAPI__LINUX_BPF_H__ */ 609