1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #ifndef _LINUX_BPF_H 8 #define _LINUX_BPF_H 1 9 10 #include <uapi/linux/bpf.h> 11 12 #include <linux/workqueue.h> 13 #include <linux/file.h> 14 #include <linux/percpu.h> 15 #include <linux/err.h> 16 #include <linux/rbtree_latch.h> 17 #include <linux/numa.h> 18 #include <linux/wait.h> 19 #include <linux/u64_stats_sync.h> 20 21 struct bpf_verifier_env; 22 struct perf_event; 23 struct bpf_prog; 24 struct bpf_map; 25 struct sock; 26 struct seq_file; 27 struct btf; 28 struct btf_type; 29 30 /* map is generic key/value storage optionally accesible by eBPF programs */ 31 struct bpf_map_ops { 32 /* funcs callable from userspace (via syscall) */ 33 int (*map_alloc_check)(union bpf_attr *attr); 34 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 35 void (*map_release)(struct bpf_map *map, struct file *map_file); 36 void (*map_free)(struct bpf_map *map); 37 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 38 void (*map_release_uref)(struct bpf_map *map); 39 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 40 41 /* funcs callable from userspace and from eBPF programs */ 42 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 43 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 44 int (*map_delete_elem)(struct bpf_map *map, void *key); 45 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 46 int (*map_pop_elem)(struct bpf_map *map, void *value); 47 int (*map_peek_elem)(struct bpf_map *map, void *value); 48 49 /* funcs called by prog_array and perf_event_array map */ 50 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 51 int fd); 52 void (*map_fd_put_ptr)(void *ptr); 53 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 54 u32 (*map_fd_sys_lookup_elem)(void *ptr); 55 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 56 struct seq_file *m); 57 int (*map_check_btf)(const struct bpf_map *map, 58 const struct btf *btf, 59 const struct btf_type *key_type, 60 const struct btf_type *value_type); 61 62 /* Direct value access helpers. */ 63 int (*map_direct_value_addr)(const struct bpf_map *map, 64 u64 *imm, u32 off); 65 int (*map_direct_value_meta)(const struct bpf_map *map, 66 u64 imm, u32 *off); 67 }; 68 69 struct bpf_map_memory { 70 u32 pages; 71 struct user_struct *user; 72 }; 73 74 struct bpf_map { 75 /* The first two cachelines with read-mostly members of which some 76 * are also accessed in fast-path (e.g. ops, max_entries). 77 */ 78 const struct bpf_map_ops *ops ____cacheline_aligned; 79 struct bpf_map *inner_map_meta; 80 #ifdef CONFIG_SECURITY 81 void *security; 82 #endif 83 enum bpf_map_type map_type; 84 u32 key_size; 85 u32 value_size; 86 u32 max_entries; 87 u32 map_flags; 88 int spin_lock_off; /* >=0 valid offset, <0 error */ 89 u32 id; 90 int numa_node; 91 u32 btf_key_type_id; 92 u32 btf_value_type_id; 93 struct btf *btf; 94 struct bpf_map_memory memory; 95 bool unpriv_array; 96 bool frozen; /* write-once */ 97 /* 48 bytes hole */ 98 99 /* The 3rd and 4th cacheline with misc members to avoid false sharing 100 * particularly with refcounting. 101 */ 102 atomic_t refcnt ____cacheline_aligned; 103 atomic_t usercnt; 104 struct work_struct work; 105 char name[BPF_OBJ_NAME_LEN]; 106 }; 107 108 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 109 { 110 return map->spin_lock_off >= 0; 111 } 112 113 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 114 { 115 if (likely(!map_value_has_spin_lock(map))) 116 return; 117 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 118 (struct bpf_spin_lock){}; 119 } 120 121 /* copy everything but bpf_spin_lock */ 122 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 123 { 124 if (unlikely(map_value_has_spin_lock(map))) { 125 u32 off = map->spin_lock_off; 126 127 memcpy(dst, src, off); 128 memcpy(dst + off + sizeof(struct bpf_spin_lock), 129 src + off + sizeof(struct bpf_spin_lock), 130 map->value_size - off - sizeof(struct bpf_spin_lock)); 131 } else { 132 memcpy(dst, src, map->value_size); 133 } 134 } 135 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 136 bool lock_src); 137 138 struct bpf_offload_dev; 139 struct bpf_offloaded_map; 140 141 struct bpf_map_dev_ops { 142 int (*map_get_next_key)(struct bpf_offloaded_map *map, 143 void *key, void *next_key); 144 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 145 void *key, void *value); 146 int (*map_update_elem)(struct bpf_offloaded_map *map, 147 void *key, void *value, u64 flags); 148 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 149 }; 150 151 struct bpf_offloaded_map { 152 struct bpf_map map; 153 struct net_device *netdev; 154 const struct bpf_map_dev_ops *dev_ops; 155 void *dev_priv; 156 struct list_head offloads; 157 }; 158 159 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 160 { 161 return container_of(map, struct bpf_offloaded_map, map); 162 } 163 164 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 165 { 166 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 167 } 168 169 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 170 { 171 return map->btf && map->ops->map_seq_show_elem; 172 } 173 174 int map_check_no_btf(const struct bpf_map *map, 175 const struct btf *btf, 176 const struct btf_type *key_type, 177 const struct btf_type *value_type); 178 179 extern const struct bpf_map_ops bpf_map_offload_ops; 180 181 /* function argument constraints */ 182 enum bpf_arg_type { 183 ARG_DONTCARE = 0, /* unused argument in helper function */ 184 185 /* the following constraints used to prototype 186 * bpf_map_lookup/update/delete_elem() functions 187 */ 188 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 189 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 190 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 191 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 192 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 193 194 /* the following constraints used to prototype bpf_memcmp() and other 195 * functions that access data on eBPF program stack 196 */ 197 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 198 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 199 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 200 * helper function must fill all bytes or clear 201 * them in error case. 202 */ 203 204 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 205 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 206 207 ARG_PTR_TO_CTX, /* pointer to context */ 208 ARG_ANYTHING, /* any (initialized) argument is ok */ 209 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 210 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 211 ARG_PTR_TO_INT, /* pointer to int */ 212 ARG_PTR_TO_LONG, /* pointer to long */ 213 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 214 }; 215 216 /* type of values returned from helper functions */ 217 enum bpf_return_type { 218 RET_INTEGER, /* function returns integer */ 219 RET_VOID, /* function doesn't return anything */ 220 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 221 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 222 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 223 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 224 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 225 }; 226 227 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 228 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 229 * instructions after verifying 230 */ 231 struct bpf_func_proto { 232 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 233 bool gpl_only; 234 bool pkt_access; 235 enum bpf_return_type ret_type; 236 enum bpf_arg_type arg1_type; 237 enum bpf_arg_type arg2_type; 238 enum bpf_arg_type arg3_type; 239 enum bpf_arg_type arg4_type; 240 enum bpf_arg_type arg5_type; 241 }; 242 243 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 244 * the first argument to eBPF programs. 245 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 246 */ 247 struct bpf_context; 248 249 enum bpf_access_type { 250 BPF_READ = 1, 251 BPF_WRITE = 2 252 }; 253 254 /* types of values stored in eBPF registers */ 255 /* Pointer types represent: 256 * pointer 257 * pointer + imm 258 * pointer + (u16) var 259 * pointer + (u16) var + imm 260 * if (range > 0) then [ptr, ptr + range - off) is safe to access 261 * if (id > 0) means that some 'var' was added 262 * if (off > 0) means that 'imm' was added 263 */ 264 enum bpf_reg_type { 265 NOT_INIT = 0, /* nothing was written into register */ 266 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 267 PTR_TO_CTX, /* reg points to bpf_context */ 268 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 269 PTR_TO_MAP_VALUE, /* reg points to map element value */ 270 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 271 PTR_TO_STACK, /* reg == frame_pointer + offset */ 272 PTR_TO_PACKET_META, /* skb->data - meta_len */ 273 PTR_TO_PACKET, /* reg points to skb->data */ 274 PTR_TO_PACKET_END, /* skb->data + headlen */ 275 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 276 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 277 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 278 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 279 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 280 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 281 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 282 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 283 }; 284 285 /* The information passed from prog-specific *_is_valid_access 286 * back to the verifier. 287 */ 288 struct bpf_insn_access_aux { 289 enum bpf_reg_type reg_type; 290 int ctx_field_size; 291 }; 292 293 static inline void 294 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 295 { 296 aux->ctx_field_size = size; 297 } 298 299 struct bpf_prog_ops { 300 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 301 union bpf_attr __user *uattr); 302 }; 303 304 struct bpf_verifier_ops { 305 /* return eBPF function prototype for verification */ 306 const struct bpf_func_proto * 307 (*get_func_proto)(enum bpf_func_id func_id, 308 const struct bpf_prog *prog); 309 310 /* return true if 'size' wide access at offset 'off' within bpf_context 311 * with 'type' (read or write) is allowed 312 */ 313 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 314 const struct bpf_prog *prog, 315 struct bpf_insn_access_aux *info); 316 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 317 const struct bpf_prog *prog); 318 int (*gen_ld_abs)(const struct bpf_insn *orig, 319 struct bpf_insn *insn_buf); 320 u32 (*convert_ctx_access)(enum bpf_access_type type, 321 const struct bpf_insn *src, 322 struct bpf_insn *dst, 323 struct bpf_prog *prog, u32 *target_size); 324 }; 325 326 struct bpf_prog_offload_ops { 327 /* verifier basic callbacks */ 328 int (*insn_hook)(struct bpf_verifier_env *env, 329 int insn_idx, int prev_insn_idx); 330 int (*finalize)(struct bpf_verifier_env *env); 331 /* verifier optimization callbacks (called after .finalize) */ 332 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 333 struct bpf_insn *insn); 334 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 335 /* program management callbacks */ 336 int (*prepare)(struct bpf_prog *prog); 337 int (*translate)(struct bpf_prog *prog); 338 void (*destroy)(struct bpf_prog *prog); 339 }; 340 341 struct bpf_prog_offload { 342 struct bpf_prog *prog; 343 struct net_device *netdev; 344 struct bpf_offload_dev *offdev; 345 void *dev_priv; 346 struct list_head offloads; 347 bool dev_state; 348 bool opt_failed; 349 void *jited_image; 350 u32 jited_len; 351 }; 352 353 enum bpf_cgroup_storage_type { 354 BPF_CGROUP_STORAGE_SHARED, 355 BPF_CGROUP_STORAGE_PERCPU, 356 __BPF_CGROUP_STORAGE_MAX 357 }; 358 359 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 360 361 struct bpf_prog_stats { 362 u64 cnt; 363 u64 nsecs; 364 struct u64_stats_sync syncp; 365 }; 366 367 struct bpf_prog_aux { 368 atomic_t refcnt; 369 u32 used_map_cnt; 370 u32 max_ctx_offset; 371 u32 max_pkt_offset; 372 u32 max_tp_access; 373 u32 stack_depth; 374 u32 id; 375 u32 func_cnt; /* used by non-func prog as the number of func progs */ 376 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 377 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 378 bool offload_requested; 379 struct bpf_prog **func; 380 void *jit_data; /* JIT specific data. arch dependent */ 381 struct latch_tree_node ksym_tnode; 382 struct list_head ksym_lnode; 383 const struct bpf_prog_ops *ops; 384 struct bpf_map **used_maps; 385 struct bpf_prog *prog; 386 struct user_struct *user; 387 u64 load_time; /* ns since boottime */ 388 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 389 char name[BPF_OBJ_NAME_LEN]; 390 #ifdef CONFIG_SECURITY 391 void *security; 392 #endif 393 struct bpf_prog_offload *offload; 394 struct btf *btf; 395 struct bpf_func_info *func_info; 396 /* bpf_line_info loaded from userspace. linfo->insn_off 397 * has the xlated insn offset. 398 * Both the main and sub prog share the same linfo. 399 * The subprog can access its first linfo by 400 * using the linfo_idx. 401 */ 402 struct bpf_line_info *linfo; 403 /* jited_linfo is the jited addr of the linfo. It has a 404 * one to one mapping to linfo: 405 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 406 * Both the main and sub prog share the same jited_linfo. 407 * The subprog can access its first jited_linfo by 408 * using the linfo_idx. 409 */ 410 void **jited_linfo; 411 u32 func_info_cnt; 412 u32 nr_linfo; 413 /* subprog can use linfo_idx to access its first linfo and 414 * jited_linfo. 415 * main prog always has linfo_idx == 0 416 */ 417 u32 linfo_idx; 418 struct bpf_prog_stats __percpu *stats; 419 union { 420 struct work_struct work; 421 struct rcu_head rcu; 422 }; 423 }; 424 425 struct bpf_array { 426 struct bpf_map map; 427 u32 elem_size; 428 u32 index_mask; 429 /* 'ownership' of prog_array is claimed by the first program that 430 * is going to use this map or by the first program which FD is stored 431 * in the map to make sure that all callers and callees have the same 432 * prog_type and JITed flag 433 */ 434 enum bpf_prog_type owner_prog_type; 435 bool owner_jited; 436 union { 437 char value[0] __aligned(8); 438 void *ptrs[0] __aligned(8); 439 void __percpu *pptrs[0] __aligned(8); 440 }; 441 }; 442 443 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 444 #define MAX_TAIL_CALL_CNT 32 445 446 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 447 BPF_F_RDONLY_PROG | \ 448 BPF_F_WRONLY | \ 449 BPF_F_WRONLY_PROG) 450 451 #define BPF_MAP_CAN_READ BIT(0) 452 #define BPF_MAP_CAN_WRITE BIT(1) 453 454 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 455 { 456 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 457 458 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 459 * not possible. 460 */ 461 if (access_flags & BPF_F_RDONLY_PROG) 462 return BPF_MAP_CAN_READ; 463 else if (access_flags & BPF_F_WRONLY_PROG) 464 return BPF_MAP_CAN_WRITE; 465 else 466 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 467 } 468 469 static inline bool bpf_map_flags_access_ok(u32 access_flags) 470 { 471 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 472 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 473 } 474 475 struct bpf_event_entry { 476 struct perf_event *event; 477 struct file *perf_file; 478 struct file *map_file; 479 struct rcu_head rcu; 480 }; 481 482 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 483 int bpf_prog_calc_tag(struct bpf_prog *fp); 484 485 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 486 487 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 488 unsigned long off, unsigned long len); 489 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 490 const struct bpf_insn *src, 491 struct bpf_insn *dst, 492 struct bpf_prog *prog, 493 u32 *target_size); 494 495 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 496 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 497 498 /* an array of programs to be executed under rcu_lock. 499 * 500 * Typical usage: 501 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 502 * 503 * the structure returned by bpf_prog_array_alloc() should be populated 504 * with program pointers and the last pointer must be NULL. 505 * The user has to keep refcnt on the program and make sure the program 506 * is removed from the array before bpf_prog_put(). 507 * The 'struct bpf_prog_array *' should only be replaced with xchg() 508 * since other cpus are walking the array of pointers in parallel. 509 */ 510 struct bpf_prog_array_item { 511 struct bpf_prog *prog; 512 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 513 }; 514 515 struct bpf_prog_array { 516 struct rcu_head rcu; 517 struct bpf_prog_array_item items[0]; 518 }; 519 520 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 521 void bpf_prog_array_free(struct bpf_prog_array *progs); 522 int bpf_prog_array_length(struct bpf_prog_array *progs); 523 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 524 __u32 __user *prog_ids, u32 cnt); 525 526 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 527 struct bpf_prog *old_prog); 528 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 529 u32 *prog_ids, u32 request_cnt, 530 u32 *prog_cnt); 531 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 532 struct bpf_prog *exclude_prog, 533 struct bpf_prog *include_prog, 534 struct bpf_prog_array **new_array); 535 536 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 537 ({ \ 538 struct bpf_prog_array_item *_item; \ 539 struct bpf_prog *_prog; \ 540 struct bpf_prog_array *_array; \ 541 u32 _ret = 1; \ 542 preempt_disable(); \ 543 rcu_read_lock(); \ 544 _array = rcu_dereference(array); \ 545 if (unlikely(check_non_null && !_array))\ 546 goto _out; \ 547 _item = &_array->items[0]; \ 548 while ((_prog = READ_ONCE(_item->prog))) { \ 549 bpf_cgroup_storage_set(_item->cgroup_storage); \ 550 _ret &= func(_prog, ctx); \ 551 _item++; \ 552 } \ 553 _out: \ 554 rcu_read_unlock(); \ 555 preempt_enable(); \ 556 _ret; \ 557 }) 558 559 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 560 * so BPF programs can request cwr for TCP packets. 561 * 562 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 563 * packet. This macro changes the behavior so the low order bit 564 * indicates whether the packet should be dropped (0) or not (1) 565 * and the next bit is a congestion notification bit. This could be 566 * used by TCP to call tcp_enter_cwr() 567 * 568 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 569 * 0: drop packet 570 * 1: keep packet 571 * 2: drop packet and cn 572 * 3: keep packet and cn 573 * 574 * This macro then converts it to one of the NET_XMIT or an error 575 * code that is then interpreted as drop packet (and no cn): 576 * 0: NET_XMIT_SUCCESS skb should be transmitted 577 * 1: NET_XMIT_DROP skb should be dropped and cn 578 * 2: NET_XMIT_CN skb should be transmitted and cn 579 * 3: -EPERM skb should be dropped 580 */ 581 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 582 ({ \ 583 struct bpf_prog_array_item *_item; \ 584 struct bpf_prog *_prog; \ 585 struct bpf_prog_array *_array; \ 586 u32 ret; \ 587 u32 _ret = 1; \ 588 u32 _cn = 0; \ 589 preempt_disable(); \ 590 rcu_read_lock(); \ 591 _array = rcu_dereference(array); \ 592 _item = &_array->items[0]; \ 593 while ((_prog = READ_ONCE(_item->prog))) { \ 594 bpf_cgroup_storage_set(_item->cgroup_storage); \ 595 ret = func(_prog, ctx); \ 596 _ret &= (ret & 1); \ 597 _cn |= (ret & 2); \ 598 _item++; \ 599 } \ 600 rcu_read_unlock(); \ 601 preempt_enable(); \ 602 if (_ret) \ 603 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 604 else \ 605 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 606 _ret; \ 607 }) 608 609 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 610 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 611 612 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 613 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 614 615 #ifdef CONFIG_BPF_SYSCALL 616 DECLARE_PER_CPU(int, bpf_prog_active); 617 618 extern const struct file_operations bpf_map_fops; 619 extern const struct file_operations bpf_prog_fops; 620 621 #define BPF_PROG_TYPE(_id, _name) \ 622 extern const struct bpf_prog_ops _name ## _prog_ops; \ 623 extern const struct bpf_verifier_ops _name ## _verifier_ops; 624 #define BPF_MAP_TYPE(_id, _ops) \ 625 extern const struct bpf_map_ops _ops; 626 #include <linux/bpf_types.h> 627 #undef BPF_PROG_TYPE 628 #undef BPF_MAP_TYPE 629 630 extern const struct bpf_prog_ops bpf_offload_prog_ops; 631 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 632 extern const struct bpf_verifier_ops xdp_analyzer_ops; 633 634 struct bpf_prog *bpf_prog_get(u32 ufd); 635 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 636 bool attach_drv); 637 struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 638 void bpf_prog_sub(struct bpf_prog *prog, int i); 639 struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 640 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 641 void bpf_prog_put(struct bpf_prog *prog); 642 int __bpf_prog_charge(struct user_struct *user, u32 pages); 643 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 644 645 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 646 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 647 648 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 649 struct bpf_map *__bpf_map_get(struct fd f); 650 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 651 void bpf_map_put_with_uref(struct bpf_map *map); 652 void bpf_map_put(struct bpf_map *map); 653 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 654 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 655 int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); 656 void bpf_map_charge_finish(struct bpf_map_memory *mem); 657 void bpf_map_charge_move(struct bpf_map_memory *dst, 658 struct bpf_map_memory *src); 659 void *bpf_map_area_alloc(size_t size, int numa_node); 660 void bpf_map_area_free(void *base); 661 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 662 663 extern int sysctl_unprivileged_bpf_disabled; 664 extern int sysctl_bpf_stats_enabled; 665 666 int bpf_map_new_fd(struct bpf_map *map, int flags); 667 int bpf_prog_new_fd(struct bpf_prog *prog); 668 669 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 670 int bpf_obj_get_user(const char __user *pathname, int flags); 671 672 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 673 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 674 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 675 u64 flags); 676 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 677 u64 flags); 678 679 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 680 681 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 682 void *key, void *value, u64 map_flags); 683 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 684 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 685 void *key, void *value, u64 map_flags); 686 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 687 688 int bpf_get_file_flag(int flags); 689 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 690 size_t actual_size); 691 692 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 693 * forced to use 'long' read/writes to try to atomically copy long counters. 694 * Best-effort only. No barriers here, since it _will_ race with concurrent 695 * updates from BPF programs. Called from bpf syscall and mostly used with 696 * size 8 or 16 bytes, so ask compiler to inline it. 697 */ 698 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 699 { 700 const long *lsrc = src; 701 long *ldst = dst; 702 703 size /= sizeof(long); 704 while (size--) 705 *ldst++ = *lsrc++; 706 } 707 708 /* verify correctness of eBPF program */ 709 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 710 union bpf_attr __user *uattr); 711 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 712 713 /* Map specifics */ 714 struct xdp_buff; 715 struct sk_buff; 716 717 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 718 void __dev_map_insert_ctx(struct bpf_map *map, u32 index); 719 void __dev_map_flush(struct bpf_map *map); 720 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 721 struct net_device *dev_rx); 722 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 723 struct bpf_prog *xdp_prog); 724 725 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 726 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); 727 void __cpu_map_flush(struct bpf_map *map); 728 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 729 struct net_device *dev_rx); 730 731 /* Return map's numa specified by userspace */ 732 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 733 { 734 return (attr->map_flags & BPF_F_NUMA_NODE) ? 735 attr->numa_node : NUMA_NO_NODE; 736 } 737 738 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 739 int array_map_alloc_check(union bpf_attr *attr); 740 741 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 742 union bpf_attr __user *uattr); 743 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 744 union bpf_attr __user *uattr); 745 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 746 const union bpf_attr *kattr, 747 union bpf_attr __user *uattr); 748 #else /* !CONFIG_BPF_SYSCALL */ 749 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 750 { 751 return ERR_PTR(-EOPNOTSUPP); 752 } 753 754 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 755 enum bpf_prog_type type, 756 bool attach_drv) 757 { 758 return ERR_PTR(-EOPNOTSUPP); 759 } 760 761 static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, 762 int i) 763 { 764 return ERR_PTR(-EOPNOTSUPP); 765 } 766 767 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 768 { 769 } 770 771 static inline void bpf_prog_put(struct bpf_prog *prog) 772 { 773 } 774 775 static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) 776 { 777 return ERR_PTR(-EOPNOTSUPP); 778 } 779 780 static inline struct bpf_prog *__must_check 781 bpf_prog_inc_not_zero(struct bpf_prog *prog) 782 { 783 return ERR_PTR(-EOPNOTSUPP); 784 } 785 786 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 787 { 788 return 0; 789 } 790 791 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 792 { 793 } 794 795 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 796 { 797 return -EOPNOTSUPP; 798 } 799 800 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 801 u32 key) 802 { 803 return NULL; 804 } 805 806 static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) 807 { 808 } 809 810 static inline void __dev_map_flush(struct bpf_map *map) 811 { 812 } 813 814 struct xdp_buff; 815 struct bpf_dtab_netdev; 816 817 static inline 818 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 819 struct net_device *dev_rx) 820 { 821 return 0; 822 } 823 824 struct sk_buff; 825 826 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 827 struct sk_buff *skb, 828 struct bpf_prog *xdp_prog) 829 { 830 return 0; 831 } 832 833 static inline 834 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 835 { 836 return NULL; 837 } 838 839 static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) 840 { 841 } 842 843 static inline void __cpu_map_flush(struct bpf_map *map) 844 { 845 } 846 847 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 848 struct xdp_buff *xdp, 849 struct net_device *dev_rx) 850 { 851 return 0; 852 } 853 854 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 855 enum bpf_prog_type type) 856 { 857 return ERR_PTR(-EOPNOTSUPP); 858 } 859 860 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 861 const union bpf_attr *kattr, 862 union bpf_attr __user *uattr) 863 { 864 return -ENOTSUPP; 865 } 866 867 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 868 const union bpf_attr *kattr, 869 union bpf_attr __user *uattr) 870 { 871 return -ENOTSUPP; 872 } 873 874 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 875 const union bpf_attr *kattr, 876 union bpf_attr __user *uattr) 877 { 878 return -ENOTSUPP; 879 } 880 #endif /* CONFIG_BPF_SYSCALL */ 881 882 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 883 enum bpf_prog_type type) 884 { 885 return bpf_prog_get_type_dev(ufd, type, false); 886 } 887 888 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 889 890 int bpf_prog_offload_compile(struct bpf_prog *prog); 891 void bpf_prog_offload_destroy(struct bpf_prog *prog); 892 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 893 struct bpf_prog *prog); 894 895 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 896 897 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 898 int bpf_map_offload_update_elem(struct bpf_map *map, 899 void *key, void *value, u64 flags); 900 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 901 int bpf_map_offload_get_next_key(struct bpf_map *map, 902 void *key, void *next_key); 903 904 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 905 906 struct bpf_offload_dev * 907 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 908 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 909 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 910 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 911 struct net_device *netdev); 912 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 913 struct net_device *netdev); 914 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 915 916 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 917 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 918 919 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 920 { 921 return aux->offload_requested; 922 } 923 924 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 925 { 926 return unlikely(map->ops == &bpf_map_offload_ops); 927 } 928 929 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 930 void bpf_map_offload_map_free(struct bpf_map *map); 931 #else 932 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 933 union bpf_attr *attr) 934 { 935 return -EOPNOTSUPP; 936 } 937 938 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 939 { 940 return false; 941 } 942 943 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 944 { 945 return false; 946 } 947 948 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 949 { 950 return ERR_PTR(-EOPNOTSUPP); 951 } 952 953 static inline void bpf_map_offload_map_free(struct bpf_map *map) 954 { 955 } 956 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 957 958 #if defined(CONFIG_BPF_STREAM_PARSER) 959 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 960 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 961 #else 962 static inline int sock_map_prog_update(struct bpf_map *map, 963 struct bpf_prog *prog, u32 which) 964 { 965 return -EOPNOTSUPP; 966 } 967 968 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 969 struct bpf_prog *prog) 970 { 971 return -EINVAL; 972 } 973 #endif 974 975 #if defined(CONFIG_XDP_SOCKETS) 976 struct xdp_sock; 977 struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); 978 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, 979 struct xdp_sock *xs); 980 void __xsk_map_flush(struct bpf_map *map); 981 #else 982 struct xdp_sock; 983 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 984 u32 key) 985 { 986 return NULL; 987 } 988 989 static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, 990 struct xdp_sock *xs) 991 { 992 return -EOPNOTSUPP; 993 } 994 995 static inline void __xsk_map_flush(struct bpf_map *map) 996 { 997 } 998 #endif 999 1000 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1001 void bpf_sk_reuseport_detach(struct sock *sk); 1002 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1003 void *value); 1004 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1005 void *value, u64 map_flags); 1006 #else 1007 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1008 { 1009 } 1010 1011 #ifdef CONFIG_BPF_SYSCALL 1012 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1013 void *key, void *value) 1014 { 1015 return -EOPNOTSUPP; 1016 } 1017 1018 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1019 void *key, void *value, 1020 u64 map_flags) 1021 { 1022 return -EOPNOTSUPP; 1023 } 1024 #endif /* CONFIG_BPF_SYSCALL */ 1025 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1026 1027 /* verifier prototypes for helper functions called from eBPF programs */ 1028 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1029 extern const struct bpf_func_proto bpf_map_update_elem_proto; 1030 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1031 extern const struct bpf_func_proto bpf_map_push_elem_proto; 1032 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1033 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1034 1035 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1036 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1037 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1038 extern const struct bpf_func_proto bpf_tail_call_proto; 1039 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1040 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1041 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1042 extern const struct bpf_func_proto bpf_get_current_comm_proto; 1043 extern const struct bpf_func_proto bpf_get_stackid_proto; 1044 extern const struct bpf_func_proto bpf_get_stack_proto; 1045 extern const struct bpf_func_proto bpf_sock_map_update_proto; 1046 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1047 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1048 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1049 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1050 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1051 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1052 extern const struct bpf_func_proto bpf_spin_lock_proto; 1053 extern const struct bpf_func_proto bpf_spin_unlock_proto; 1054 extern const struct bpf_func_proto bpf_get_local_storage_proto; 1055 extern const struct bpf_func_proto bpf_strtol_proto; 1056 extern const struct bpf_func_proto bpf_strtoul_proto; 1057 1058 /* Shared helpers among cBPF and eBPF. */ 1059 void bpf_user_rnd_init_once(void); 1060 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1061 1062 #if defined(CONFIG_NET) 1063 bool bpf_sock_common_is_valid_access(int off, int size, 1064 enum bpf_access_type type, 1065 struct bpf_insn_access_aux *info); 1066 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1067 struct bpf_insn_access_aux *info); 1068 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1069 const struct bpf_insn *si, 1070 struct bpf_insn *insn_buf, 1071 struct bpf_prog *prog, 1072 u32 *target_size); 1073 #else 1074 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1075 enum bpf_access_type type, 1076 struct bpf_insn_access_aux *info) 1077 { 1078 return false; 1079 } 1080 static inline bool bpf_sock_is_valid_access(int off, int size, 1081 enum bpf_access_type type, 1082 struct bpf_insn_access_aux *info) 1083 { 1084 return false; 1085 } 1086 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1087 const struct bpf_insn *si, 1088 struct bpf_insn *insn_buf, 1089 struct bpf_prog *prog, 1090 u32 *target_size) 1091 { 1092 return 0; 1093 } 1094 #endif 1095 1096 #ifdef CONFIG_INET 1097 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1098 struct bpf_insn_access_aux *info); 1099 1100 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1101 const struct bpf_insn *si, 1102 struct bpf_insn *insn_buf, 1103 struct bpf_prog *prog, 1104 u32 *target_size); 1105 #else 1106 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1107 enum bpf_access_type type, 1108 struct bpf_insn_access_aux *info) 1109 { 1110 return false; 1111 } 1112 1113 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1114 const struct bpf_insn *si, 1115 struct bpf_insn *insn_buf, 1116 struct bpf_prog *prog, 1117 u32 *target_size) 1118 { 1119 return 0; 1120 } 1121 #endif /* CONFIG_INET */ 1122 1123 #endif /* _LINUX_BPF_H */ 1124