1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/u64_stats_sync.h> 18 #include <linux/refcount.h> 19 #include <linux/mutex.h> 20 #include <linux/module.h> 21 #include <linux/kallsyms.h> 22 23 struct bpf_verifier_env; 24 struct bpf_verifier_log; 25 struct perf_event; 26 struct bpf_prog; 27 struct bpf_prog_aux; 28 struct bpf_map; 29 struct sock; 30 struct seq_file; 31 struct btf; 32 struct btf_type; 33 struct exception_table_entry; 34 35 extern struct idr btf_idr; 36 extern spinlock_t btf_idr_lock; 37 38 /* map is generic key/value storage optionally accesible by eBPF programs */ 39 struct bpf_map_ops { 40 /* funcs callable from userspace (via syscall) */ 41 int (*map_alloc_check)(union bpf_attr *attr); 42 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 43 void (*map_release)(struct bpf_map *map, struct file *map_file); 44 void (*map_free)(struct bpf_map *map); 45 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 46 void (*map_release_uref)(struct bpf_map *map); 47 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 48 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 49 union bpf_attr __user *uattr); 50 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 51 const union bpf_attr *attr, 52 union bpf_attr __user *uattr); 53 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 54 union bpf_attr __user *uattr); 55 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 56 union bpf_attr __user *uattr); 57 58 /* funcs callable from userspace and from eBPF programs */ 59 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 60 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 61 int (*map_delete_elem)(struct bpf_map *map, void *key); 62 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 63 int (*map_pop_elem)(struct bpf_map *map, void *value); 64 int (*map_peek_elem)(struct bpf_map *map, void *value); 65 66 /* funcs called by prog_array and perf_event_array map */ 67 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 68 int fd); 69 void (*map_fd_put_ptr)(void *ptr); 70 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 71 u32 (*map_fd_sys_lookup_elem)(void *ptr); 72 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 73 struct seq_file *m); 74 int (*map_check_btf)(const struct bpf_map *map, 75 const struct btf *btf, 76 const struct btf_type *key_type, 77 const struct btf_type *value_type); 78 79 /* Prog poke tracking helpers. */ 80 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 81 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 82 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 83 struct bpf_prog *new); 84 85 /* Direct value access helpers. */ 86 int (*map_direct_value_addr)(const struct bpf_map *map, 87 u64 *imm, u32 off); 88 int (*map_direct_value_meta)(const struct bpf_map *map, 89 u64 imm, u32 *off); 90 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 91 }; 92 93 struct bpf_map_memory { 94 u32 pages; 95 struct user_struct *user; 96 }; 97 98 struct bpf_map { 99 /* The first two cachelines with read-mostly members of which some 100 * are also accessed in fast-path (e.g. ops, max_entries). 101 */ 102 const struct bpf_map_ops *ops ____cacheline_aligned; 103 struct bpf_map *inner_map_meta; 104 #ifdef CONFIG_SECURITY 105 void *security; 106 #endif 107 enum bpf_map_type map_type; 108 u32 key_size; 109 u32 value_size; 110 u32 max_entries; 111 u32 map_flags; 112 int spin_lock_off; /* >=0 valid offset, <0 error */ 113 u32 id; 114 int numa_node; 115 u32 btf_key_type_id; 116 u32 btf_value_type_id; 117 struct btf *btf; 118 struct bpf_map_memory memory; 119 char name[BPF_OBJ_NAME_LEN]; 120 u32 btf_vmlinux_value_type_id; 121 bool unpriv_array; 122 bool frozen; /* write-once; write-protected by freeze_mutex */ 123 /* 22 bytes hole */ 124 125 /* The 3rd and 4th cacheline with misc members to avoid false sharing 126 * particularly with refcounting. 127 */ 128 atomic64_t refcnt ____cacheline_aligned; 129 atomic64_t usercnt; 130 struct work_struct work; 131 struct mutex freeze_mutex; 132 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 133 }; 134 135 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 136 { 137 return map->spin_lock_off >= 0; 138 } 139 140 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 141 { 142 if (likely(!map_value_has_spin_lock(map))) 143 return; 144 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 145 (struct bpf_spin_lock){}; 146 } 147 148 /* copy everything but bpf_spin_lock */ 149 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 150 { 151 if (unlikely(map_value_has_spin_lock(map))) { 152 u32 off = map->spin_lock_off; 153 154 memcpy(dst, src, off); 155 memcpy(dst + off + sizeof(struct bpf_spin_lock), 156 src + off + sizeof(struct bpf_spin_lock), 157 map->value_size - off - sizeof(struct bpf_spin_lock)); 158 } else { 159 memcpy(dst, src, map->value_size); 160 } 161 } 162 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 163 bool lock_src); 164 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 165 166 struct bpf_offload_dev; 167 struct bpf_offloaded_map; 168 169 struct bpf_map_dev_ops { 170 int (*map_get_next_key)(struct bpf_offloaded_map *map, 171 void *key, void *next_key); 172 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 173 void *key, void *value); 174 int (*map_update_elem)(struct bpf_offloaded_map *map, 175 void *key, void *value, u64 flags); 176 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 177 }; 178 179 struct bpf_offloaded_map { 180 struct bpf_map map; 181 struct net_device *netdev; 182 const struct bpf_map_dev_ops *dev_ops; 183 void *dev_priv; 184 struct list_head offloads; 185 }; 186 187 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 188 { 189 return container_of(map, struct bpf_offloaded_map, map); 190 } 191 192 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 193 { 194 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 195 } 196 197 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 198 { 199 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 200 map->ops->map_seq_show_elem; 201 } 202 203 int map_check_no_btf(const struct bpf_map *map, 204 const struct btf *btf, 205 const struct btf_type *key_type, 206 const struct btf_type *value_type); 207 208 extern const struct bpf_map_ops bpf_map_offload_ops; 209 210 /* function argument constraints */ 211 enum bpf_arg_type { 212 ARG_DONTCARE = 0, /* unused argument in helper function */ 213 214 /* the following constraints used to prototype 215 * bpf_map_lookup/update/delete_elem() functions 216 */ 217 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 218 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 219 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 220 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 221 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 222 223 /* the following constraints used to prototype bpf_memcmp() and other 224 * functions that access data on eBPF program stack 225 */ 226 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 227 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 228 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 229 * helper function must fill all bytes or clear 230 * them in error case. 231 */ 232 233 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 234 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 235 236 ARG_PTR_TO_CTX, /* pointer to context */ 237 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ 238 ARG_ANYTHING, /* any (initialized) argument is ok */ 239 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 240 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 241 ARG_PTR_TO_INT, /* pointer to int */ 242 ARG_PTR_TO_LONG, /* pointer to long */ 243 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 244 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 245 }; 246 247 /* type of values returned from helper functions */ 248 enum bpf_return_type { 249 RET_INTEGER, /* function returns integer */ 250 RET_VOID, /* function doesn't return anything */ 251 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 252 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 253 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 254 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 255 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 256 }; 257 258 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 259 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 260 * instructions after verifying 261 */ 262 struct bpf_func_proto { 263 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 264 bool gpl_only; 265 bool pkt_access; 266 enum bpf_return_type ret_type; 267 union { 268 struct { 269 enum bpf_arg_type arg1_type; 270 enum bpf_arg_type arg2_type; 271 enum bpf_arg_type arg3_type; 272 enum bpf_arg_type arg4_type; 273 enum bpf_arg_type arg5_type; 274 }; 275 enum bpf_arg_type arg_type[5]; 276 }; 277 int *btf_id; /* BTF ids of arguments */ 278 }; 279 280 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 281 * the first argument to eBPF programs. 282 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 283 */ 284 struct bpf_context; 285 286 enum bpf_access_type { 287 BPF_READ = 1, 288 BPF_WRITE = 2 289 }; 290 291 /* types of values stored in eBPF registers */ 292 /* Pointer types represent: 293 * pointer 294 * pointer + imm 295 * pointer + (u16) var 296 * pointer + (u16) var + imm 297 * if (range > 0) then [ptr, ptr + range - off) is safe to access 298 * if (id > 0) means that some 'var' was added 299 * if (off > 0) means that 'imm' was added 300 */ 301 enum bpf_reg_type { 302 NOT_INIT = 0, /* nothing was written into register */ 303 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 304 PTR_TO_CTX, /* reg points to bpf_context */ 305 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 306 PTR_TO_MAP_VALUE, /* reg points to map element value */ 307 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 308 PTR_TO_STACK, /* reg == frame_pointer + offset */ 309 PTR_TO_PACKET_META, /* skb->data - meta_len */ 310 PTR_TO_PACKET, /* reg points to skb->data */ 311 PTR_TO_PACKET_END, /* skb->data + headlen */ 312 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 313 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 314 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 315 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 316 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 317 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 318 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 319 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 320 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 321 PTR_TO_BTF_ID, /* reg points to kernel struct */ 322 }; 323 324 /* The information passed from prog-specific *_is_valid_access 325 * back to the verifier. 326 */ 327 struct bpf_insn_access_aux { 328 enum bpf_reg_type reg_type; 329 union { 330 int ctx_field_size; 331 u32 btf_id; 332 }; 333 struct bpf_verifier_log *log; /* for verbose logs */ 334 }; 335 336 static inline void 337 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 338 { 339 aux->ctx_field_size = size; 340 } 341 342 struct bpf_prog_ops { 343 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 344 union bpf_attr __user *uattr); 345 }; 346 347 struct bpf_verifier_ops { 348 /* return eBPF function prototype for verification */ 349 const struct bpf_func_proto * 350 (*get_func_proto)(enum bpf_func_id func_id, 351 const struct bpf_prog *prog); 352 353 /* return true if 'size' wide access at offset 'off' within bpf_context 354 * with 'type' (read or write) is allowed 355 */ 356 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 357 const struct bpf_prog *prog, 358 struct bpf_insn_access_aux *info); 359 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 360 const struct bpf_prog *prog); 361 int (*gen_ld_abs)(const struct bpf_insn *orig, 362 struct bpf_insn *insn_buf); 363 u32 (*convert_ctx_access)(enum bpf_access_type type, 364 const struct bpf_insn *src, 365 struct bpf_insn *dst, 366 struct bpf_prog *prog, u32 *target_size); 367 int (*btf_struct_access)(struct bpf_verifier_log *log, 368 const struct btf_type *t, int off, int size, 369 enum bpf_access_type atype, 370 u32 *next_btf_id); 371 }; 372 373 struct bpf_prog_offload_ops { 374 /* verifier basic callbacks */ 375 int (*insn_hook)(struct bpf_verifier_env *env, 376 int insn_idx, int prev_insn_idx); 377 int (*finalize)(struct bpf_verifier_env *env); 378 /* verifier optimization callbacks (called after .finalize) */ 379 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 380 struct bpf_insn *insn); 381 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 382 /* program management callbacks */ 383 int (*prepare)(struct bpf_prog *prog); 384 int (*translate)(struct bpf_prog *prog); 385 void (*destroy)(struct bpf_prog *prog); 386 }; 387 388 struct bpf_prog_offload { 389 struct bpf_prog *prog; 390 struct net_device *netdev; 391 struct bpf_offload_dev *offdev; 392 void *dev_priv; 393 struct list_head offloads; 394 bool dev_state; 395 bool opt_failed; 396 void *jited_image; 397 u32 jited_len; 398 }; 399 400 enum bpf_cgroup_storage_type { 401 BPF_CGROUP_STORAGE_SHARED, 402 BPF_CGROUP_STORAGE_PERCPU, 403 __BPF_CGROUP_STORAGE_MAX 404 }; 405 406 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 407 408 /* The longest tracepoint has 12 args. 409 * See include/trace/bpf_probe.h 410 */ 411 #define MAX_BPF_FUNC_ARGS 12 412 413 struct bpf_prog_stats { 414 u64 cnt; 415 u64 nsecs; 416 struct u64_stats_sync syncp; 417 } __aligned(2 * sizeof(u64)); 418 419 struct btf_func_model { 420 u8 ret_size; 421 u8 nr_args; 422 u8 arg_size[MAX_BPF_FUNC_ARGS]; 423 }; 424 425 /* Restore arguments before returning from trampoline to let original function 426 * continue executing. This flag is used for fentry progs when there are no 427 * fexit progs. 428 */ 429 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 430 /* Call original function after fentry progs, but before fexit progs. 431 * Makes sense for fentry/fexit, normal calls and indirect calls. 432 */ 433 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 434 /* Skip current frame and return to parent. Makes sense for fentry/fexit 435 * programs only. Should not be used with normal calls and indirect calls. 436 */ 437 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 438 439 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 440 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 441 */ 442 #define BPF_MAX_TRAMP_PROGS 40 443 444 struct bpf_tramp_progs { 445 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 446 int nr_progs; 447 }; 448 449 /* Different use cases for BPF trampoline: 450 * 1. replace nop at the function entry (kprobe equivalent) 451 * flags = BPF_TRAMP_F_RESTORE_REGS 452 * fentry = a set of programs to run before returning from trampoline 453 * 454 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 455 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 456 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 457 * fentry = a set of program to run before calling original function 458 * fexit = a set of program to run after original function 459 * 460 * 3. replace direct call instruction anywhere in the function body 461 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 462 * With flags = 0 463 * fentry = a set of programs to run before returning from trampoline 464 * With flags = BPF_TRAMP_F_CALL_ORIG 465 * orig_call = original callback addr or direct function addr 466 * fentry = a set of program to run before calling original function 467 * fexit = a set of program to run after original function 468 */ 469 int arch_prepare_bpf_trampoline(void *image, void *image_end, 470 const struct btf_func_model *m, u32 flags, 471 struct bpf_tramp_progs *tprogs, 472 void *orig_call); 473 /* these two functions are called from generated trampoline */ 474 u64 notrace __bpf_prog_enter(void); 475 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 476 477 struct bpf_ksym { 478 unsigned long start; 479 unsigned long end; 480 char name[KSYM_NAME_LEN]; 481 struct list_head lnode; 482 struct latch_tree_node tnode; 483 bool prog; 484 }; 485 486 enum bpf_tramp_prog_type { 487 BPF_TRAMP_FENTRY, 488 BPF_TRAMP_FEXIT, 489 BPF_TRAMP_MODIFY_RETURN, 490 BPF_TRAMP_MAX, 491 BPF_TRAMP_REPLACE, /* more than MAX */ 492 }; 493 494 struct bpf_trampoline { 495 /* hlist for trampoline_table */ 496 struct hlist_node hlist; 497 /* serializes access to fields of this trampoline */ 498 struct mutex mutex; 499 refcount_t refcnt; 500 u64 key; 501 struct { 502 struct btf_func_model model; 503 void *addr; 504 bool ftrace_managed; 505 } func; 506 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 507 * program by replacing one of its functions. func.addr is the address 508 * of the function it replaced. 509 */ 510 struct bpf_prog *extension_prog; 511 /* list of BPF programs using this trampoline */ 512 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 513 /* Number of attached programs. A counter per kind. */ 514 int progs_cnt[BPF_TRAMP_MAX]; 515 /* Executable image of trampoline */ 516 void *image; 517 u64 selector; 518 struct bpf_ksym ksym; 519 }; 520 521 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 522 523 struct bpf_dispatcher_prog { 524 struct bpf_prog *prog; 525 refcount_t users; 526 }; 527 528 struct bpf_dispatcher { 529 /* dispatcher mutex */ 530 struct mutex mutex; 531 void *func; 532 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 533 int num_progs; 534 void *image; 535 u32 image_off; 536 struct bpf_ksym ksym; 537 }; 538 539 static __always_inline unsigned int bpf_dispatcher_nop_func( 540 const void *ctx, 541 const struct bpf_insn *insnsi, 542 unsigned int (*bpf_func)(const void *, 543 const struct bpf_insn *)) 544 { 545 return bpf_func(ctx, insnsi); 546 } 547 #ifdef CONFIG_BPF_JIT 548 struct bpf_trampoline *bpf_trampoline_lookup(u64 key); 549 int bpf_trampoline_link_prog(struct bpf_prog *prog); 550 int bpf_trampoline_unlink_prog(struct bpf_prog *prog); 551 void bpf_trampoline_put(struct bpf_trampoline *tr); 552 #define BPF_DISPATCHER_INIT(_name) { \ 553 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 554 .func = &_name##_func, \ 555 .progs = {}, \ 556 .num_progs = 0, \ 557 .image = NULL, \ 558 .image_off = 0, \ 559 .ksym = { \ 560 .name = #_name, \ 561 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 562 }, \ 563 } 564 565 #define DEFINE_BPF_DISPATCHER(name) \ 566 noinline unsigned int bpf_dispatcher_##name##_func( \ 567 const void *ctx, \ 568 const struct bpf_insn *insnsi, \ 569 unsigned int (*bpf_func)(const void *, \ 570 const struct bpf_insn *)) \ 571 { \ 572 return bpf_func(ctx, insnsi); \ 573 } \ 574 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 575 struct bpf_dispatcher bpf_dispatcher_##name = \ 576 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 577 #define DECLARE_BPF_DISPATCHER(name) \ 578 unsigned int bpf_dispatcher_##name##_func( \ 579 const void *ctx, \ 580 const struct bpf_insn *insnsi, \ 581 unsigned int (*bpf_func)(const void *, \ 582 const struct bpf_insn *)); \ 583 extern struct bpf_dispatcher bpf_dispatcher_##name; 584 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 585 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 586 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 587 struct bpf_prog *to); 588 /* Called only from JIT-enabled code, so there's no need for stubs. */ 589 void *bpf_jit_alloc_exec_page(void); 590 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 591 void bpf_image_ksym_del(struct bpf_ksym *ksym); 592 void bpf_ksym_add(struct bpf_ksym *ksym); 593 void bpf_ksym_del(struct bpf_ksym *ksym); 594 #else 595 static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) 596 { 597 return NULL; 598 } 599 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog) 600 { 601 return -ENOTSUPP; 602 } 603 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) 604 { 605 return -ENOTSUPP; 606 } 607 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 608 #define DEFINE_BPF_DISPATCHER(name) 609 #define DECLARE_BPF_DISPATCHER(name) 610 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 611 #define BPF_DISPATCHER_PTR(name) NULL 612 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 613 struct bpf_prog *from, 614 struct bpf_prog *to) {} 615 static inline bool is_bpf_image_address(unsigned long address) 616 { 617 return false; 618 } 619 #endif 620 621 struct bpf_func_info_aux { 622 u16 linkage; 623 bool unreliable; 624 }; 625 626 enum bpf_jit_poke_reason { 627 BPF_POKE_REASON_TAIL_CALL, 628 }; 629 630 /* Descriptor of pokes pointing /into/ the JITed image. */ 631 struct bpf_jit_poke_descriptor { 632 void *ip; 633 union { 634 struct { 635 struct bpf_map *map; 636 u32 key; 637 } tail_call; 638 }; 639 bool ip_stable; 640 u8 adj_off; 641 u16 reason; 642 }; 643 644 struct bpf_prog_aux { 645 atomic64_t refcnt; 646 u32 used_map_cnt; 647 u32 max_ctx_offset; 648 u32 max_pkt_offset; 649 u32 max_tp_access; 650 u32 stack_depth; 651 u32 id; 652 u32 func_cnt; /* used by non-func prog as the number of func progs */ 653 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 654 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 655 struct bpf_prog *linked_prog; 656 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 657 bool offload_requested; 658 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 659 bool func_proto_unreliable; 660 enum bpf_tramp_prog_type trampoline_prog_type; 661 struct bpf_trampoline *trampoline; 662 struct hlist_node tramp_hlist; 663 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 664 const struct btf_type *attach_func_proto; 665 /* function name for valid attach_btf_id */ 666 const char *attach_func_name; 667 struct bpf_prog **func; 668 void *jit_data; /* JIT specific data. arch dependent */ 669 struct bpf_jit_poke_descriptor *poke_tab; 670 u32 size_poke_tab; 671 struct bpf_ksym ksym; 672 const struct bpf_prog_ops *ops; 673 struct bpf_map **used_maps; 674 struct bpf_prog *prog; 675 struct user_struct *user; 676 u64 load_time; /* ns since boottime */ 677 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 678 char name[BPF_OBJ_NAME_LEN]; 679 #ifdef CONFIG_SECURITY 680 void *security; 681 #endif 682 struct bpf_prog_offload *offload; 683 struct btf *btf; 684 struct bpf_func_info *func_info; 685 struct bpf_func_info_aux *func_info_aux; 686 /* bpf_line_info loaded from userspace. linfo->insn_off 687 * has the xlated insn offset. 688 * Both the main and sub prog share the same linfo. 689 * The subprog can access its first linfo by 690 * using the linfo_idx. 691 */ 692 struct bpf_line_info *linfo; 693 /* jited_linfo is the jited addr of the linfo. It has a 694 * one to one mapping to linfo: 695 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 696 * Both the main and sub prog share the same jited_linfo. 697 * The subprog can access its first jited_linfo by 698 * using the linfo_idx. 699 */ 700 void **jited_linfo; 701 u32 func_info_cnt; 702 u32 nr_linfo; 703 /* subprog can use linfo_idx to access its first linfo and 704 * jited_linfo. 705 * main prog always has linfo_idx == 0 706 */ 707 u32 linfo_idx; 708 u32 num_exentries; 709 struct exception_table_entry *extable; 710 struct bpf_prog_stats __percpu *stats; 711 union { 712 struct work_struct work; 713 struct rcu_head rcu; 714 }; 715 }; 716 717 struct bpf_array_aux { 718 /* 'Ownership' of prog array is claimed by the first program that 719 * is going to use this map or by the first program which FD is 720 * stored in the map to make sure that all callers and callees have 721 * the same prog type and JITed flag. 722 */ 723 enum bpf_prog_type type; 724 bool jited; 725 /* Programs with direct jumps into programs part of this array. */ 726 struct list_head poke_progs; 727 struct bpf_map *map; 728 struct mutex poke_mutex; 729 struct work_struct work; 730 }; 731 732 struct bpf_struct_ops_value; 733 struct btf_type; 734 struct btf_member; 735 736 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 737 struct bpf_struct_ops { 738 const struct bpf_verifier_ops *verifier_ops; 739 int (*init)(struct btf *btf); 740 int (*check_member)(const struct btf_type *t, 741 const struct btf_member *member); 742 int (*init_member)(const struct btf_type *t, 743 const struct btf_member *member, 744 void *kdata, const void *udata); 745 int (*reg)(void *kdata); 746 void (*unreg)(void *kdata); 747 const struct btf_type *type; 748 const struct btf_type *value_type; 749 const char *name; 750 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 751 u32 type_id; 752 u32 value_id; 753 }; 754 755 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 756 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 757 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 758 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 759 bool bpf_struct_ops_get(const void *kdata); 760 void bpf_struct_ops_put(const void *kdata); 761 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 762 void *value); 763 static inline bool bpf_try_module_get(const void *data, struct module *owner) 764 { 765 if (owner == BPF_MODULE_OWNER) 766 return bpf_struct_ops_get(data); 767 else 768 return try_module_get(owner); 769 } 770 static inline void bpf_module_put(const void *data, struct module *owner) 771 { 772 if (owner == BPF_MODULE_OWNER) 773 bpf_struct_ops_put(data); 774 else 775 module_put(owner); 776 } 777 #else 778 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 779 { 780 return NULL; 781 } 782 static inline void bpf_struct_ops_init(struct btf *btf, 783 struct bpf_verifier_log *log) 784 { 785 } 786 static inline bool bpf_try_module_get(const void *data, struct module *owner) 787 { 788 return try_module_get(owner); 789 } 790 static inline void bpf_module_put(const void *data, struct module *owner) 791 { 792 module_put(owner); 793 } 794 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 795 void *key, 796 void *value) 797 { 798 return -EINVAL; 799 } 800 #endif 801 802 struct bpf_array { 803 struct bpf_map map; 804 u32 elem_size; 805 u32 index_mask; 806 struct bpf_array_aux *aux; 807 union { 808 char value[0] __aligned(8); 809 void *ptrs[0] __aligned(8); 810 void __percpu *pptrs[0] __aligned(8); 811 }; 812 }; 813 814 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 815 #define MAX_TAIL_CALL_CNT 32 816 817 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 818 BPF_F_RDONLY_PROG | \ 819 BPF_F_WRONLY | \ 820 BPF_F_WRONLY_PROG) 821 822 #define BPF_MAP_CAN_READ BIT(0) 823 #define BPF_MAP_CAN_WRITE BIT(1) 824 825 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 826 { 827 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 828 829 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 830 * not possible. 831 */ 832 if (access_flags & BPF_F_RDONLY_PROG) 833 return BPF_MAP_CAN_READ; 834 else if (access_flags & BPF_F_WRONLY_PROG) 835 return BPF_MAP_CAN_WRITE; 836 else 837 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 838 } 839 840 static inline bool bpf_map_flags_access_ok(u32 access_flags) 841 { 842 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 843 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 844 } 845 846 struct bpf_event_entry { 847 struct perf_event *event; 848 struct file *perf_file; 849 struct file *map_file; 850 struct rcu_head rcu; 851 }; 852 853 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 854 int bpf_prog_calc_tag(struct bpf_prog *fp); 855 const char *kernel_type_name(u32 btf_type_id); 856 857 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 858 859 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 860 unsigned long off, unsigned long len); 861 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 862 const struct bpf_insn *src, 863 struct bpf_insn *dst, 864 struct bpf_prog *prog, 865 u32 *target_size); 866 867 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 868 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 869 870 /* an array of programs to be executed under rcu_lock. 871 * 872 * Typical usage: 873 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 874 * 875 * the structure returned by bpf_prog_array_alloc() should be populated 876 * with program pointers and the last pointer must be NULL. 877 * The user has to keep refcnt on the program and make sure the program 878 * is removed from the array before bpf_prog_put(). 879 * The 'struct bpf_prog_array *' should only be replaced with xchg() 880 * since other cpus are walking the array of pointers in parallel. 881 */ 882 struct bpf_prog_array_item { 883 struct bpf_prog *prog; 884 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 885 }; 886 887 struct bpf_prog_array { 888 struct rcu_head rcu; 889 struct bpf_prog_array_item items[]; 890 }; 891 892 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 893 void bpf_prog_array_free(struct bpf_prog_array *progs); 894 int bpf_prog_array_length(struct bpf_prog_array *progs); 895 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 896 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 897 __u32 __user *prog_ids, u32 cnt); 898 899 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 900 struct bpf_prog *old_prog); 901 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 902 u32 *prog_ids, u32 request_cnt, 903 u32 *prog_cnt); 904 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 905 struct bpf_prog *exclude_prog, 906 struct bpf_prog *include_prog, 907 struct bpf_prog_array **new_array); 908 909 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 910 ({ \ 911 struct bpf_prog_array_item *_item; \ 912 struct bpf_prog *_prog; \ 913 struct bpf_prog_array *_array; \ 914 u32 _ret = 1; \ 915 migrate_disable(); \ 916 rcu_read_lock(); \ 917 _array = rcu_dereference(array); \ 918 if (unlikely(check_non_null && !_array))\ 919 goto _out; \ 920 _item = &_array->items[0]; \ 921 while ((_prog = READ_ONCE(_item->prog))) { \ 922 bpf_cgroup_storage_set(_item->cgroup_storage); \ 923 _ret &= func(_prog, ctx); \ 924 _item++; \ 925 } \ 926 _out: \ 927 rcu_read_unlock(); \ 928 migrate_enable(); \ 929 _ret; \ 930 }) 931 932 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 933 * so BPF programs can request cwr for TCP packets. 934 * 935 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 936 * packet. This macro changes the behavior so the low order bit 937 * indicates whether the packet should be dropped (0) or not (1) 938 * and the next bit is a congestion notification bit. This could be 939 * used by TCP to call tcp_enter_cwr() 940 * 941 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 942 * 0: drop packet 943 * 1: keep packet 944 * 2: drop packet and cn 945 * 3: keep packet and cn 946 * 947 * This macro then converts it to one of the NET_XMIT or an error 948 * code that is then interpreted as drop packet (and no cn): 949 * 0: NET_XMIT_SUCCESS skb should be transmitted 950 * 1: NET_XMIT_DROP skb should be dropped and cn 951 * 2: NET_XMIT_CN skb should be transmitted and cn 952 * 3: -EPERM skb should be dropped 953 */ 954 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 955 ({ \ 956 struct bpf_prog_array_item *_item; \ 957 struct bpf_prog *_prog; \ 958 struct bpf_prog_array *_array; \ 959 u32 ret; \ 960 u32 _ret = 1; \ 961 u32 _cn = 0; \ 962 migrate_disable(); \ 963 rcu_read_lock(); \ 964 _array = rcu_dereference(array); \ 965 _item = &_array->items[0]; \ 966 while ((_prog = READ_ONCE(_item->prog))) { \ 967 bpf_cgroup_storage_set(_item->cgroup_storage); \ 968 ret = func(_prog, ctx); \ 969 _ret &= (ret & 1); \ 970 _cn |= (ret & 2); \ 971 _item++; \ 972 } \ 973 rcu_read_unlock(); \ 974 migrate_enable(); \ 975 if (_ret) \ 976 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 977 else \ 978 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 979 _ret; \ 980 }) 981 982 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 983 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 984 985 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 986 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 987 988 #ifdef CONFIG_BPF_SYSCALL 989 DECLARE_PER_CPU(int, bpf_prog_active); 990 extern struct mutex bpf_stats_enabled_mutex; 991 992 /* 993 * Block execution of BPF programs attached to instrumentation (perf, 994 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 995 * these events can happen inside a region which holds a map bucket lock 996 * and can deadlock on it. 997 * 998 * Use the preemption safe inc/dec variants on RT because migrate disable 999 * is preemptible on RT and preemption in the middle of the RMW operation 1000 * might lead to inconsistent state. Use the raw variants for non RT 1001 * kernels as migrate_disable() maps to preempt_disable() so the slightly 1002 * more expensive save operation can be avoided. 1003 */ 1004 static inline void bpf_disable_instrumentation(void) 1005 { 1006 migrate_disable(); 1007 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1008 this_cpu_inc(bpf_prog_active); 1009 else 1010 __this_cpu_inc(bpf_prog_active); 1011 } 1012 1013 static inline void bpf_enable_instrumentation(void) 1014 { 1015 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1016 this_cpu_dec(bpf_prog_active); 1017 else 1018 __this_cpu_dec(bpf_prog_active); 1019 migrate_enable(); 1020 } 1021 1022 extern const struct file_operations bpf_map_fops; 1023 extern const struct file_operations bpf_prog_fops; 1024 1025 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1026 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1027 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1028 #define BPF_MAP_TYPE(_id, _ops) \ 1029 extern const struct bpf_map_ops _ops; 1030 #define BPF_LINK_TYPE(_id, _name) 1031 #include <linux/bpf_types.h> 1032 #undef BPF_PROG_TYPE 1033 #undef BPF_MAP_TYPE 1034 #undef BPF_LINK_TYPE 1035 1036 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1037 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1038 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1039 1040 struct bpf_prog *bpf_prog_get(u32 ufd); 1041 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1042 bool attach_drv); 1043 void bpf_prog_add(struct bpf_prog *prog, int i); 1044 void bpf_prog_sub(struct bpf_prog *prog, int i); 1045 void bpf_prog_inc(struct bpf_prog *prog); 1046 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1047 void bpf_prog_put(struct bpf_prog *prog); 1048 int __bpf_prog_charge(struct user_struct *user, u32 pages); 1049 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 1050 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1051 struct bpf_map **used_maps, u32 len); 1052 1053 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1054 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1055 1056 struct bpf_map *bpf_map_get(u32 ufd); 1057 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1058 struct bpf_map *__bpf_map_get(struct fd f); 1059 void bpf_map_inc(struct bpf_map *map); 1060 void bpf_map_inc_with_uref(struct bpf_map *map); 1061 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1062 void bpf_map_put_with_uref(struct bpf_map *map); 1063 void bpf_map_put(struct bpf_map *map); 1064 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 1065 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 1066 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 1067 void bpf_map_charge_finish(struct bpf_map_memory *mem); 1068 void bpf_map_charge_move(struct bpf_map_memory *dst, 1069 struct bpf_map_memory *src); 1070 void *bpf_map_area_alloc(u64 size, int numa_node); 1071 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1072 void bpf_map_area_free(void *base); 1073 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1074 int generic_map_lookup_batch(struct bpf_map *map, 1075 const union bpf_attr *attr, 1076 union bpf_attr __user *uattr); 1077 int generic_map_update_batch(struct bpf_map *map, 1078 const union bpf_attr *attr, 1079 union bpf_attr __user *uattr); 1080 int generic_map_delete_batch(struct bpf_map *map, 1081 const union bpf_attr *attr, 1082 union bpf_attr __user *uattr); 1083 1084 extern int sysctl_unprivileged_bpf_disabled; 1085 1086 int bpf_map_new_fd(struct bpf_map *map, int flags); 1087 int bpf_prog_new_fd(struct bpf_prog *prog); 1088 1089 struct bpf_link { 1090 atomic64_t refcnt; 1091 u32 id; 1092 enum bpf_link_type type; 1093 const struct bpf_link_ops *ops; 1094 struct bpf_prog *prog; 1095 struct work_struct work; 1096 }; 1097 1098 struct bpf_link_primer { 1099 struct bpf_link *link; 1100 struct file *file; 1101 int fd; 1102 u32 id; 1103 }; 1104 1105 struct bpf_link_ops { 1106 void (*release)(struct bpf_link *link); 1107 void (*dealloc)(struct bpf_link *link); 1108 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 1109 struct bpf_prog *old_prog); 1110 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 1111 int (*fill_link_info)(const struct bpf_link *link, 1112 struct bpf_link_info *info); 1113 }; 1114 1115 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1116 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1117 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1118 int bpf_link_settle(struct bpf_link_primer *primer); 1119 void bpf_link_cleanup(struct bpf_link_primer *primer); 1120 void bpf_link_inc(struct bpf_link *link); 1121 void bpf_link_put(struct bpf_link *link); 1122 int bpf_link_new_fd(struct bpf_link *link); 1123 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1124 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1125 1126 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1127 int bpf_obj_get_user(const char __user *pathname, int flags); 1128 1129 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1130 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1131 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1132 u64 flags); 1133 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1134 u64 flags); 1135 1136 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1137 1138 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1139 void *key, void *value, u64 map_flags); 1140 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1141 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1142 void *key, void *value, u64 map_flags); 1143 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1144 1145 int bpf_get_file_flag(int flags); 1146 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 1147 size_t actual_size); 1148 1149 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1150 * forced to use 'long' read/writes to try to atomically copy long counters. 1151 * Best-effort only. No barriers here, since it _will_ race with concurrent 1152 * updates from BPF programs. Called from bpf syscall and mostly used with 1153 * size 8 or 16 bytes, so ask compiler to inline it. 1154 */ 1155 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1156 { 1157 const long *lsrc = src; 1158 long *ldst = dst; 1159 1160 size /= sizeof(long); 1161 while (size--) 1162 *ldst++ = *lsrc++; 1163 } 1164 1165 /* verify correctness of eBPF program */ 1166 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 1167 union bpf_attr __user *uattr); 1168 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1169 1170 /* Map specifics */ 1171 struct xdp_buff; 1172 struct sk_buff; 1173 1174 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 1175 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 1176 void __dev_flush(void); 1177 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1178 struct net_device *dev_rx); 1179 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1180 struct net_device *dev_rx); 1181 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1182 struct bpf_prog *xdp_prog); 1183 1184 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 1185 void __cpu_map_flush(void); 1186 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1187 struct net_device *dev_rx); 1188 1189 /* Return map's numa specified by userspace */ 1190 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1191 { 1192 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1193 attr->numa_node : NUMA_NO_NODE; 1194 } 1195 1196 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1197 int array_map_alloc_check(union bpf_attr *attr); 1198 1199 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1200 union bpf_attr __user *uattr); 1201 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1202 union bpf_attr __user *uattr); 1203 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1204 const union bpf_attr *kattr, 1205 union bpf_attr __user *uattr); 1206 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1207 const union bpf_attr *kattr, 1208 union bpf_attr __user *uattr); 1209 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1210 const struct bpf_prog *prog, 1211 struct bpf_insn_access_aux *info); 1212 int btf_struct_access(struct bpf_verifier_log *log, 1213 const struct btf_type *t, int off, int size, 1214 enum bpf_access_type atype, 1215 u32 *next_btf_id); 1216 int btf_resolve_helper_id(struct bpf_verifier_log *log, 1217 const struct bpf_func_proto *fn, int); 1218 1219 int btf_distill_func_proto(struct bpf_verifier_log *log, 1220 struct btf *btf, 1221 const struct btf_type *func_proto, 1222 const char *func_name, 1223 struct btf_func_model *m); 1224 1225 struct bpf_reg_state; 1226 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, 1227 struct bpf_reg_state *regs); 1228 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1229 struct bpf_reg_state *reg); 1230 int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, 1231 struct btf *btf, const struct btf_type *t); 1232 1233 struct bpf_prog *bpf_prog_by_id(u32 id); 1234 1235 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1236 #else /* !CONFIG_BPF_SYSCALL */ 1237 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1238 { 1239 return ERR_PTR(-EOPNOTSUPP); 1240 } 1241 1242 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1243 enum bpf_prog_type type, 1244 bool attach_drv) 1245 { 1246 return ERR_PTR(-EOPNOTSUPP); 1247 } 1248 1249 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1250 { 1251 } 1252 1253 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1254 { 1255 } 1256 1257 static inline void bpf_prog_put(struct bpf_prog *prog) 1258 { 1259 } 1260 1261 static inline void bpf_prog_inc(struct bpf_prog *prog) 1262 { 1263 } 1264 1265 static inline struct bpf_prog *__must_check 1266 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1267 { 1268 return ERR_PTR(-EOPNOTSUPP); 1269 } 1270 1271 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 1272 { 1273 return 0; 1274 } 1275 1276 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1277 { 1278 } 1279 1280 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1281 { 1282 return -EOPNOTSUPP; 1283 } 1284 1285 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 1286 u32 key) 1287 { 1288 return NULL; 1289 } 1290 1291 static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, 1292 u32 key) 1293 { 1294 return NULL; 1295 } 1296 1297 static inline void __dev_flush(void) 1298 { 1299 } 1300 1301 struct xdp_buff; 1302 struct bpf_dtab_netdev; 1303 1304 static inline 1305 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1306 struct net_device *dev_rx) 1307 { 1308 return 0; 1309 } 1310 1311 static inline 1312 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1313 struct net_device *dev_rx) 1314 { 1315 return 0; 1316 } 1317 1318 struct sk_buff; 1319 1320 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1321 struct sk_buff *skb, 1322 struct bpf_prog *xdp_prog) 1323 { 1324 return 0; 1325 } 1326 1327 static inline 1328 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 1329 { 1330 return NULL; 1331 } 1332 1333 static inline void __cpu_map_flush(void) 1334 { 1335 } 1336 1337 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1338 struct xdp_buff *xdp, 1339 struct net_device *dev_rx) 1340 { 1341 return 0; 1342 } 1343 1344 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1345 enum bpf_prog_type type) 1346 { 1347 return ERR_PTR(-EOPNOTSUPP); 1348 } 1349 1350 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1351 const union bpf_attr *kattr, 1352 union bpf_attr __user *uattr) 1353 { 1354 return -ENOTSUPP; 1355 } 1356 1357 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1358 const union bpf_attr *kattr, 1359 union bpf_attr __user *uattr) 1360 { 1361 return -ENOTSUPP; 1362 } 1363 1364 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1365 const union bpf_attr *kattr, 1366 union bpf_attr __user *uattr) 1367 { 1368 return -ENOTSUPP; 1369 } 1370 1371 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1372 const union bpf_attr *kattr, 1373 union bpf_attr __user *uattr) 1374 { 1375 return -ENOTSUPP; 1376 } 1377 1378 static inline void bpf_map_put(struct bpf_map *map) 1379 { 1380 } 1381 1382 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1383 { 1384 return ERR_PTR(-ENOTSUPP); 1385 } 1386 1387 static inline const struct bpf_func_proto * 1388 bpf_base_func_proto(enum bpf_func_id func_id) 1389 { 1390 return NULL; 1391 } 1392 #endif /* CONFIG_BPF_SYSCALL */ 1393 1394 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1395 enum bpf_prog_type type) 1396 { 1397 return bpf_prog_get_type_dev(ufd, type, false); 1398 } 1399 1400 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1401 1402 int bpf_prog_offload_compile(struct bpf_prog *prog); 1403 void bpf_prog_offload_destroy(struct bpf_prog *prog); 1404 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1405 struct bpf_prog *prog); 1406 1407 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1408 1409 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1410 int bpf_map_offload_update_elem(struct bpf_map *map, 1411 void *key, void *value, u64 flags); 1412 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1413 int bpf_map_offload_get_next_key(struct bpf_map *map, 1414 void *key, void *next_key); 1415 1416 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1417 1418 struct bpf_offload_dev * 1419 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1420 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1421 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1422 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1423 struct net_device *netdev); 1424 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1425 struct net_device *netdev); 1426 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1427 1428 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1429 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1430 1431 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1432 { 1433 return aux->offload_requested; 1434 } 1435 1436 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1437 { 1438 return unlikely(map->ops == &bpf_map_offload_ops); 1439 } 1440 1441 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1442 void bpf_map_offload_map_free(struct bpf_map *map); 1443 #else 1444 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1445 union bpf_attr *attr) 1446 { 1447 return -EOPNOTSUPP; 1448 } 1449 1450 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1451 { 1452 return false; 1453 } 1454 1455 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1456 { 1457 return false; 1458 } 1459 1460 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1461 { 1462 return ERR_PTR(-EOPNOTSUPP); 1463 } 1464 1465 static inline void bpf_map_offload_map_free(struct bpf_map *map) 1466 { 1467 } 1468 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1469 1470 #if defined(CONFIG_BPF_STREAM_PARSER) 1471 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 1472 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1473 void sock_map_unhash(struct sock *sk); 1474 void sock_map_close(struct sock *sk, long timeout); 1475 #else 1476 static inline int sock_map_prog_update(struct bpf_map *map, 1477 struct bpf_prog *prog, u32 which) 1478 { 1479 return -EOPNOTSUPP; 1480 } 1481 1482 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1483 struct bpf_prog *prog) 1484 { 1485 return -EINVAL; 1486 } 1487 #endif /* CONFIG_BPF_STREAM_PARSER */ 1488 1489 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1490 void bpf_sk_reuseport_detach(struct sock *sk); 1491 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1492 void *value); 1493 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1494 void *value, u64 map_flags); 1495 #else 1496 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1497 { 1498 } 1499 1500 #ifdef CONFIG_BPF_SYSCALL 1501 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1502 void *key, void *value) 1503 { 1504 return -EOPNOTSUPP; 1505 } 1506 1507 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1508 void *key, void *value, 1509 u64 map_flags) 1510 { 1511 return -EOPNOTSUPP; 1512 } 1513 #endif /* CONFIG_BPF_SYSCALL */ 1514 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1515 1516 /* verifier prototypes for helper functions called from eBPF programs */ 1517 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1518 extern const struct bpf_func_proto bpf_map_update_elem_proto; 1519 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1520 extern const struct bpf_func_proto bpf_map_push_elem_proto; 1521 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1522 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1523 1524 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1525 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1526 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1527 extern const struct bpf_func_proto bpf_tail_call_proto; 1528 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1529 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 1530 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1531 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1532 extern const struct bpf_func_proto bpf_get_current_comm_proto; 1533 extern const struct bpf_func_proto bpf_get_stackid_proto; 1534 extern const struct bpf_func_proto bpf_get_stack_proto; 1535 extern const struct bpf_func_proto bpf_sock_map_update_proto; 1536 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1537 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1538 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 1539 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1540 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1541 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1542 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1543 extern const struct bpf_func_proto bpf_spin_lock_proto; 1544 extern const struct bpf_func_proto bpf_spin_unlock_proto; 1545 extern const struct bpf_func_proto bpf_get_local_storage_proto; 1546 extern const struct bpf_func_proto bpf_strtol_proto; 1547 extern const struct bpf_func_proto bpf_strtoul_proto; 1548 extern const struct bpf_func_proto bpf_tcp_sock_proto; 1549 extern const struct bpf_func_proto bpf_jiffies64_proto; 1550 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 1551 extern const struct bpf_func_proto bpf_event_output_data_proto; 1552 1553 const struct bpf_func_proto *bpf_tracing_func_proto( 1554 enum bpf_func_id func_id, const struct bpf_prog *prog); 1555 1556 /* Shared helpers among cBPF and eBPF. */ 1557 void bpf_user_rnd_init_once(void); 1558 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1559 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1560 1561 #if defined(CONFIG_NET) 1562 bool bpf_sock_common_is_valid_access(int off, int size, 1563 enum bpf_access_type type, 1564 struct bpf_insn_access_aux *info); 1565 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1566 struct bpf_insn_access_aux *info); 1567 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1568 const struct bpf_insn *si, 1569 struct bpf_insn *insn_buf, 1570 struct bpf_prog *prog, 1571 u32 *target_size); 1572 #else 1573 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1574 enum bpf_access_type type, 1575 struct bpf_insn_access_aux *info) 1576 { 1577 return false; 1578 } 1579 static inline bool bpf_sock_is_valid_access(int off, int size, 1580 enum bpf_access_type type, 1581 struct bpf_insn_access_aux *info) 1582 { 1583 return false; 1584 } 1585 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1586 const struct bpf_insn *si, 1587 struct bpf_insn *insn_buf, 1588 struct bpf_prog *prog, 1589 u32 *target_size) 1590 { 1591 return 0; 1592 } 1593 #endif 1594 1595 #ifdef CONFIG_INET 1596 struct sk_reuseport_kern { 1597 struct sk_buff *skb; 1598 struct sock *sk; 1599 struct sock *selected_sk; 1600 void *data_end; 1601 u32 hash; 1602 u32 reuseport_id; 1603 bool bind_inany; 1604 }; 1605 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1606 struct bpf_insn_access_aux *info); 1607 1608 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1609 const struct bpf_insn *si, 1610 struct bpf_insn *insn_buf, 1611 struct bpf_prog *prog, 1612 u32 *target_size); 1613 1614 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1615 struct bpf_insn_access_aux *info); 1616 1617 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1618 const struct bpf_insn *si, 1619 struct bpf_insn *insn_buf, 1620 struct bpf_prog *prog, 1621 u32 *target_size); 1622 #else 1623 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1624 enum bpf_access_type type, 1625 struct bpf_insn_access_aux *info) 1626 { 1627 return false; 1628 } 1629 1630 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1631 const struct bpf_insn *si, 1632 struct bpf_insn *insn_buf, 1633 struct bpf_prog *prog, 1634 u32 *target_size) 1635 { 1636 return 0; 1637 } 1638 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 1639 enum bpf_access_type type, 1640 struct bpf_insn_access_aux *info) 1641 { 1642 return false; 1643 } 1644 1645 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1646 const struct bpf_insn *si, 1647 struct bpf_insn *insn_buf, 1648 struct bpf_prog *prog, 1649 u32 *target_size) 1650 { 1651 return 0; 1652 } 1653 #endif /* CONFIG_INET */ 1654 1655 enum bpf_text_poke_type { 1656 BPF_MOD_CALL, 1657 BPF_MOD_JUMP, 1658 }; 1659 1660 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 1661 void *addr1, void *addr2); 1662 1663 #endif /* _LINUX_BPF_H */ 1664