1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/u64_stats_sync.h> 18 #include <linux/refcount.h> 19 #include <linux/mutex.h> 20 #include <linux/module.h> 21 #include <linux/kallsyms.h> 22 #include <linux/capability.h> 23 24 struct bpf_verifier_env; 25 struct bpf_verifier_log; 26 struct perf_event; 27 struct bpf_prog; 28 struct bpf_prog_aux; 29 struct bpf_map; 30 struct sock; 31 struct seq_file; 32 struct btf; 33 struct btf_type; 34 struct exception_table_entry; 35 struct seq_operations; 36 struct bpf_iter_aux_info; 37 struct bpf_local_storage; 38 struct bpf_local_storage_map; 39 40 extern struct idr btf_idr; 41 extern spinlock_t btf_idr_lock; 42 43 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 44 struct bpf_iter_aux_info *aux); 45 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 46 struct bpf_iter_seq_info { 47 const struct seq_operations *seq_ops; 48 bpf_iter_init_seq_priv_t init_seq_private; 49 bpf_iter_fini_seq_priv_t fini_seq_private; 50 u32 seq_priv_size; 51 }; 52 53 /* map is generic key/value storage optionally accesible by eBPF programs */ 54 struct bpf_map_ops { 55 /* funcs callable from userspace (via syscall) */ 56 int (*map_alloc_check)(union bpf_attr *attr); 57 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 58 void (*map_release)(struct bpf_map *map, struct file *map_file); 59 void (*map_free)(struct bpf_map *map); 60 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 61 void (*map_release_uref)(struct bpf_map *map); 62 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 63 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 64 union bpf_attr __user *uattr); 65 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 66 const union bpf_attr *attr, 67 union bpf_attr __user *uattr); 68 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 69 union bpf_attr __user *uattr); 70 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 71 union bpf_attr __user *uattr); 72 73 /* funcs callable from userspace and from eBPF programs */ 74 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 75 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 76 int (*map_delete_elem)(struct bpf_map *map, void *key); 77 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 78 int (*map_pop_elem)(struct bpf_map *map, void *value); 79 int (*map_peek_elem)(struct bpf_map *map, void *value); 80 81 /* funcs called by prog_array and perf_event_array map */ 82 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 83 int fd); 84 void (*map_fd_put_ptr)(void *ptr); 85 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 86 u32 (*map_fd_sys_lookup_elem)(void *ptr); 87 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 88 struct seq_file *m); 89 int (*map_check_btf)(const struct bpf_map *map, 90 const struct btf *btf, 91 const struct btf_type *key_type, 92 const struct btf_type *value_type); 93 94 /* Prog poke tracking helpers. */ 95 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 96 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 97 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 98 struct bpf_prog *new); 99 100 /* Direct value access helpers. */ 101 int (*map_direct_value_addr)(const struct bpf_map *map, 102 u64 *imm, u32 off); 103 int (*map_direct_value_meta)(const struct bpf_map *map, 104 u64 imm, u32 *off); 105 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 106 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 107 struct poll_table_struct *pts); 108 109 /* Functions called by bpf_local_storage maps */ 110 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 111 void *owner, u32 size); 112 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 113 void *owner, u32 size); 114 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 115 /* BTF name and id of struct allocated by map_alloc */ 116 const char * const map_btf_name; 117 int *map_btf_id; 118 119 /* bpf_iter info used to open a seq_file */ 120 const struct bpf_iter_seq_info *iter_seq_info; 121 }; 122 123 struct bpf_map_memory { 124 u32 pages; 125 struct user_struct *user; 126 }; 127 128 struct bpf_map { 129 /* The first two cachelines with read-mostly members of which some 130 * are also accessed in fast-path (e.g. ops, max_entries). 131 */ 132 const struct bpf_map_ops *ops ____cacheline_aligned; 133 struct bpf_map *inner_map_meta; 134 #ifdef CONFIG_SECURITY 135 void *security; 136 #endif 137 enum bpf_map_type map_type; 138 u32 key_size; 139 u32 value_size; 140 u32 max_entries; 141 u32 map_flags; 142 int spin_lock_off; /* >=0 valid offset, <0 error */ 143 u32 id; 144 int numa_node; 145 u32 btf_key_type_id; 146 u32 btf_value_type_id; 147 struct btf *btf; 148 struct bpf_map_memory memory; 149 char name[BPF_OBJ_NAME_LEN]; 150 u32 btf_vmlinux_value_type_id; 151 bool bypass_spec_v1; 152 bool frozen; /* write-once; write-protected by freeze_mutex */ 153 /* 22 bytes hole */ 154 155 /* The 3rd and 4th cacheline with misc members to avoid false sharing 156 * particularly with refcounting. 157 */ 158 atomic64_t refcnt ____cacheline_aligned; 159 atomic64_t usercnt; 160 struct work_struct work; 161 struct mutex freeze_mutex; 162 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 163 }; 164 165 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 166 { 167 return map->spin_lock_off >= 0; 168 } 169 170 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 171 { 172 if (likely(!map_value_has_spin_lock(map))) 173 return; 174 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 175 (struct bpf_spin_lock){}; 176 } 177 178 /* copy everything but bpf_spin_lock */ 179 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 180 { 181 if (unlikely(map_value_has_spin_lock(map))) { 182 u32 off = map->spin_lock_off; 183 184 memcpy(dst, src, off); 185 memcpy(dst + off + sizeof(struct bpf_spin_lock), 186 src + off + sizeof(struct bpf_spin_lock), 187 map->value_size - off - sizeof(struct bpf_spin_lock)); 188 } else { 189 memcpy(dst, src, map->value_size); 190 } 191 } 192 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 193 bool lock_src); 194 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 195 196 struct bpf_offload_dev; 197 struct bpf_offloaded_map; 198 199 struct bpf_map_dev_ops { 200 int (*map_get_next_key)(struct bpf_offloaded_map *map, 201 void *key, void *next_key); 202 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 203 void *key, void *value); 204 int (*map_update_elem)(struct bpf_offloaded_map *map, 205 void *key, void *value, u64 flags); 206 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 207 }; 208 209 struct bpf_offloaded_map { 210 struct bpf_map map; 211 struct net_device *netdev; 212 const struct bpf_map_dev_ops *dev_ops; 213 void *dev_priv; 214 struct list_head offloads; 215 }; 216 217 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 218 { 219 return container_of(map, struct bpf_offloaded_map, map); 220 } 221 222 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 223 { 224 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 225 } 226 227 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 228 { 229 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 230 map->ops->map_seq_show_elem; 231 } 232 233 int map_check_no_btf(const struct bpf_map *map, 234 const struct btf *btf, 235 const struct btf_type *key_type, 236 const struct btf_type *value_type); 237 238 extern const struct bpf_map_ops bpf_map_offload_ops; 239 240 /* function argument constraints */ 241 enum bpf_arg_type { 242 ARG_DONTCARE = 0, /* unused argument in helper function */ 243 244 /* the following constraints used to prototype 245 * bpf_map_lookup/update/delete_elem() functions 246 */ 247 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 248 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 249 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 250 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 251 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 252 253 /* the following constraints used to prototype bpf_memcmp() and other 254 * functions that access data on eBPF program stack 255 */ 256 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 257 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 258 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 259 * helper function must fill all bytes or clear 260 * them in error case. 261 */ 262 263 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 264 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 265 266 ARG_PTR_TO_CTX, /* pointer to context */ 267 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ 268 ARG_ANYTHING, /* any (initialized) argument is ok */ 269 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 270 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 271 ARG_PTR_TO_INT, /* pointer to int */ 272 ARG_PTR_TO_LONG, /* pointer to long */ 273 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 274 ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ 275 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 276 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 277 ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ 278 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 279 }; 280 281 /* type of values returned from helper functions */ 282 enum bpf_return_type { 283 RET_INTEGER, /* function returns integer */ 284 RET_VOID, /* function doesn't return anything */ 285 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 286 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 287 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 288 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 289 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 290 RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ 291 RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ 292 }; 293 294 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 295 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 296 * instructions after verifying 297 */ 298 struct bpf_func_proto { 299 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 300 bool gpl_only; 301 bool pkt_access; 302 enum bpf_return_type ret_type; 303 union { 304 struct { 305 enum bpf_arg_type arg1_type; 306 enum bpf_arg_type arg2_type; 307 enum bpf_arg_type arg3_type; 308 enum bpf_arg_type arg4_type; 309 enum bpf_arg_type arg5_type; 310 }; 311 enum bpf_arg_type arg_type[5]; 312 }; 313 int *btf_id; /* BTF ids of arguments */ 314 bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is 315 * valid. Often used if more 316 * than one btf id is permitted 317 * for this argument. 318 */ 319 int *ret_btf_id; /* return value btf_id */ 320 }; 321 322 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 323 * the first argument to eBPF programs. 324 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 325 */ 326 struct bpf_context; 327 328 enum bpf_access_type { 329 BPF_READ = 1, 330 BPF_WRITE = 2 331 }; 332 333 /* types of values stored in eBPF registers */ 334 /* Pointer types represent: 335 * pointer 336 * pointer + imm 337 * pointer + (u16) var 338 * pointer + (u16) var + imm 339 * if (range > 0) then [ptr, ptr + range - off) is safe to access 340 * if (id > 0) means that some 'var' was added 341 * if (off > 0) means that 'imm' was added 342 */ 343 enum bpf_reg_type { 344 NOT_INIT = 0, /* nothing was written into register */ 345 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 346 PTR_TO_CTX, /* reg points to bpf_context */ 347 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 348 PTR_TO_MAP_VALUE, /* reg points to map element value */ 349 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 350 PTR_TO_STACK, /* reg == frame_pointer + offset */ 351 PTR_TO_PACKET_META, /* skb->data - meta_len */ 352 PTR_TO_PACKET, /* reg points to skb->data */ 353 PTR_TO_PACKET_END, /* skb->data + headlen */ 354 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 355 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 356 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 357 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 358 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 359 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 360 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 361 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 362 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 363 PTR_TO_BTF_ID, /* reg points to kernel struct */ 364 PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */ 365 PTR_TO_MEM, /* reg points to valid memory region */ 366 PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ 367 PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ 368 PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ 369 PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ 370 PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ 371 }; 372 373 /* The information passed from prog-specific *_is_valid_access 374 * back to the verifier. 375 */ 376 struct bpf_insn_access_aux { 377 enum bpf_reg_type reg_type; 378 union { 379 int ctx_field_size; 380 u32 btf_id; 381 }; 382 struct bpf_verifier_log *log; /* for verbose logs */ 383 }; 384 385 static inline void 386 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 387 { 388 aux->ctx_field_size = size; 389 } 390 391 struct bpf_prog_ops { 392 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 393 union bpf_attr __user *uattr); 394 }; 395 396 struct bpf_verifier_ops { 397 /* return eBPF function prototype for verification */ 398 const struct bpf_func_proto * 399 (*get_func_proto)(enum bpf_func_id func_id, 400 const struct bpf_prog *prog); 401 402 /* return true if 'size' wide access at offset 'off' within bpf_context 403 * with 'type' (read or write) is allowed 404 */ 405 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 406 const struct bpf_prog *prog, 407 struct bpf_insn_access_aux *info); 408 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 409 const struct bpf_prog *prog); 410 int (*gen_ld_abs)(const struct bpf_insn *orig, 411 struct bpf_insn *insn_buf); 412 u32 (*convert_ctx_access)(enum bpf_access_type type, 413 const struct bpf_insn *src, 414 struct bpf_insn *dst, 415 struct bpf_prog *prog, u32 *target_size); 416 int (*btf_struct_access)(struct bpf_verifier_log *log, 417 const struct btf_type *t, int off, int size, 418 enum bpf_access_type atype, 419 u32 *next_btf_id); 420 }; 421 422 struct bpf_prog_offload_ops { 423 /* verifier basic callbacks */ 424 int (*insn_hook)(struct bpf_verifier_env *env, 425 int insn_idx, int prev_insn_idx); 426 int (*finalize)(struct bpf_verifier_env *env); 427 /* verifier optimization callbacks (called after .finalize) */ 428 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 429 struct bpf_insn *insn); 430 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 431 /* program management callbacks */ 432 int (*prepare)(struct bpf_prog *prog); 433 int (*translate)(struct bpf_prog *prog); 434 void (*destroy)(struct bpf_prog *prog); 435 }; 436 437 struct bpf_prog_offload { 438 struct bpf_prog *prog; 439 struct net_device *netdev; 440 struct bpf_offload_dev *offdev; 441 void *dev_priv; 442 struct list_head offloads; 443 bool dev_state; 444 bool opt_failed; 445 void *jited_image; 446 u32 jited_len; 447 }; 448 449 enum bpf_cgroup_storage_type { 450 BPF_CGROUP_STORAGE_SHARED, 451 BPF_CGROUP_STORAGE_PERCPU, 452 __BPF_CGROUP_STORAGE_MAX 453 }; 454 455 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 456 457 /* The longest tracepoint has 12 args. 458 * See include/trace/bpf_probe.h 459 */ 460 #define MAX_BPF_FUNC_ARGS 12 461 462 struct bpf_prog_stats { 463 u64 cnt; 464 u64 nsecs; 465 struct u64_stats_sync syncp; 466 } __aligned(2 * sizeof(u64)); 467 468 struct btf_func_model { 469 u8 ret_size; 470 u8 nr_args; 471 u8 arg_size[MAX_BPF_FUNC_ARGS]; 472 }; 473 474 /* Restore arguments before returning from trampoline to let original function 475 * continue executing. This flag is used for fentry progs when there are no 476 * fexit progs. 477 */ 478 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 479 /* Call original function after fentry progs, but before fexit progs. 480 * Makes sense for fentry/fexit, normal calls and indirect calls. 481 */ 482 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 483 /* Skip current frame and return to parent. Makes sense for fentry/fexit 484 * programs only. Should not be used with normal calls and indirect calls. 485 */ 486 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 487 488 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 489 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 490 */ 491 #define BPF_MAX_TRAMP_PROGS 40 492 493 struct bpf_tramp_progs { 494 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 495 int nr_progs; 496 }; 497 498 /* Different use cases for BPF trampoline: 499 * 1. replace nop at the function entry (kprobe equivalent) 500 * flags = BPF_TRAMP_F_RESTORE_REGS 501 * fentry = a set of programs to run before returning from trampoline 502 * 503 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 504 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 505 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 506 * fentry = a set of program to run before calling original function 507 * fexit = a set of program to run after original function 508 * 509 * 3. replace direct call instruction anywhere in the function body 510 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 511 * With flags = 0 512 * fentry = a set of programs to run before returning from trampoline 513 * With flags = BPF_TRAMP_F_CALL_ORIG 514 * orig_call = original callback addr or direct function addr 515 * fentry = a set of program to run before calling original function 516 * fexit = a set of program to run after original function 517 */ 518 int arch_prepare_bpf_trampoline(void *image, void *image_end, 519 const struct btf_func_model *m, u32 flags, 520 struct bpf_tramp_progs *tprogs, 521 void *orig_call); 522 /* these two functions are called from generated trampoline */ 523 u64 notrace __bpf_prog_enter(void); 524 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 525 526 struct bpf_ksym { 527 unsigned long start; 528 unsigned long end; 529 char name[KSYM_NAME_LEN]; 530 struct list_head lnode; 531 struct latch_tree_node tnode; 532 bool prog; 533 }; 534 535 enum bpf_tramp_prog_type { 536 BPF_TRAMP_FENTRY, 537 BPF_TRAMP_FEXIT, 538 BPF_TRAMP_MODIFY_RETURN, 539 BPF_TRAMP_MAX, 540 BPF_TRAMP_REPLACE, /* more than MAX */ 541 }; 542 543 struct bpf_trampoline { 544 /* hlist for trampoline_table */ 545 struct hlist_node hlist; 546 /* serializes access to fields of this trampoline */ 547 struct mutex mutex; 548 refcount_t refcnt; 549 u64 key; 550 struct { 551 struct btf_func_model model; 552 void *addr; 553 bool ftrace_managed; 554 } func; 555 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 556 * program by replacing one of its functions. func.addr is the address 557 * of the function it replaced. 558 */ 559 struct bpf_prog *extension_prog; 560 /* list of BPF programs using this trampoline */ 561 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 562 /* Number of attached programs. A counter per kind. */ 563 int progs_cnt[BPF_TRAMP_MAX]; 564 /* Executable image of trampoline */ 565 void *image; 566 u64 selector; 567 struct bpf_ksym ksym; 568 }; 569 570 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 571 572 struct bpf_dispatcher_prog { 573 struct bpf_prog *prog; 574 refcount_t users; 575 }; 576 577 struct bpf_dispatcher { 578 /* dispatcher mutex */ 579 struct mutex mutex; 580 void *func; 581 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 582 int num_progs; 583 void *image; 584 u32 image_off; 585 struct bpf_ksym ksym; 586 }; 587 588 static __always_inline unsigned int bpf_dispatcher_nop_func( 589 const void *ctx, 590 const struct bpf_insn *insnsi, 591 unsigned int (*bpf_func)(const void *, 592 const struct bpf_insn *)) 593 { 594 return bpf_func(ctx, insnsi); 595 } 596 #ifdef CONFIG_BPF_JIT 597 struct bpf_trampoline *bpf_trampoline_lookup(u64 key); 598 int bpf_trampoline_link_prog(struct bpf_prog *prog); 599 int bpf_trampoline_unlink_prog(struct bpf_prog *prog); 600 void bpf_trampoline_put(struct bpf_trampoline *tr); 601 #define BPF_DISPATCHER_INIT(_name) { \ 602 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 603 .func = &_name##_func, \ 604 .progs = {}, \ 605 .num_progs = 0, \ 606 .image = NULL, \ 607 .image_off = 0, \ 608 .ksym = { \ 609 .name = #_name, \ 610 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 611 }, \ 612 } 613 614 #define DEFINE_BPF_DISPATCHER(name) \ 615 noinline unsigned int bpf_dispatcher_##name##_func( \ 616 const void *ctx, \ 617 const struct bpf_insn *insnsi, \ 618 unsigned int (*bpf_func)(const void *, \ 619 const struct bpf_insn *)) \ 620 { \ 621 return bpf_func(ctx, insnsi); \ 622 } \ 623 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 624 struct bpf_dispatcher bpf_dispatcher_##name = \ 625 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 626 #define DECLARE_BPF_DISPATCHER(name) \ 627 unsigned int bpf_dispatcher_##name##_func( \ 628 const void *ctx, \ 629 const struct bpf_insn *insnsi, \ 630 unsigned int (*bpf_func)(const void *, \ 631 const struct bpf_insn *)); \ 632 extern struct bpf_dispatcher bpf_dispatcher_##name; 633 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 634 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 635 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 636 struct bpf_prog *to); 637 /* Called only from JIT-enabled code, so there's no need for stubs. */ 638 void *bpf_jit_alloc_exec_page(void); 639 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 640 void bpf_image_ksym_del(struct bpf_ksym *ksym); 641 void bpf_ksym_add(struct bpf_ksym *ksym); 642 void bpf_ksym_del(struct bpf_ksym *ksym); 643 #else 644 static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) 645 { 646 return NULL; 647 } 648 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog) 649 { 650 return -ENOTSUPP; 651 } 652 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) 653 { 654 return -ENOTSUPP; 655 } 656 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 657 #define DEFINE_BPF_DISPATCHER(name) 658 #define DECLARE_BPF_DISPATCHER(name) 659 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 660 #define BPF_DISPATCHER_PTR(name) NULL 661 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 662 struct bpf_prog *from, 663 struct bpf_prog *to) {} 664 static inline bool is_bpf_image_address(unsigned long address) 665 { 666 return false; 667 } 668 #endif 669 670 struct bpf_func_info_aux { 671 u16 linkage; 672 bool unreliable; 673 }; 674 675 enum bpf_jit_poke_reason { 676 BPF_POKE_REASON_TAIL_CALL, 677 }; 678 679 /* Descriptor of pokes pointing /into/ the JITed image. */ 680 struct bpf_jit_poke_descriptor { 681 void *ip; 682 union { 683 struct { 684 struct bpf_map *map; 685 u32 key; 686 } tail_call; 687 }; 688 bool ip_stable; 689 u8 adj_off; 690 u16 reason; 691 }; 692 693 /* reg_type info for ctx arguments */ 694 struct bpf_ctx_arg_aux { 695 u32 offset; 696 enum bpf_reg_type reg_type; 697 u32 btf_id; 698 }; 699 700 struct bpf_prog_aux { 701 atomic64_t refcnt; 702 u32 used_map_cnt; 703 u32 max_ctx_offset; 704 u32 max_pkt_offset; 705 u32 max_tp_access; 706 u32 stack_depth; 707 u32 id; 708 u32 func_cnt; /* used by non-func prog as the number of func progs */ 709 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 710 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 711 u32 ctx_arg_info_size; 712 u32 max_rdonly_access; 713 u32 max_rdwr_access; 714 const struct bpf_ctx_arg_aux *ctx_arg_info; 715 struct bpf_prog *linked_prog; 716 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 717 bool offload_requested; 718 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 719 bool func_proto_unreliable; 720 enum bpf_tramp_prog_type trampoline_prog_type; 721 struct bpf_trampoline *trampoline; 722 struct hlist_node tramp_hlist; 723 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 724 const struct btf_type *attach_func_proto; 725 /* function name for valid attach_btf_id */ 726 const char *attach_func_name; 727 struct bpf_prog **func; 728 void *jit_data; /* JIT specific data. arch dependent */ 729 struct bpf_jit_poke_descriptor *poke_tab; 730 u32 size_poke_tab; 731 struct bpf_ksym ksym; 732 const struct bpf_prog_ops *ops; 733 struct bpf_map **used_maps; 734 struct bpf_prog *prog; 735 struct user_struct *user; 736 u64 load_time; /* ns since boottime */ 737 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 738 char name[BPF_OBJ_NAME_LEN]; 739 #ifdef CONFIG_SECURITY 740 void *security; 741 #endif 742 struct bpf_prog_offload *offload; 743 struct btf *btf; 744 struct bpf_func_info *func_info; 745 struct bpf_func_info_aux *func_info_aux; 746 /* bpf_line_info loaded from userspace. linfo->insn_off 747 * has the xlated insn offset. 748 * Both the main and sub prog share the same linfo. 749 * The subprog can access its first linfo by 750 * using the linfo_idx. 751 */ 752 struct bpf_line_info *linfo; 753 /* jited_linfo is the jited addr of the linfo. It has a 754 * one to one mapping to linfo: 755 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 756 * Both the main and sub prog share the same jited_linfo. 757 * The subprog can access its first jited_linfo by 758 * using the linfo_idx. 759 */ 760 void **jited_linfo; 761 u32 func_info_cnt; 762 u32 nr_linfo; 763 /* subprog can use linfo_idx to access its first linfo and 764 * jited_linfo. 765 * main prog always has linfo_idx == 0 766 */ 767 u32 linfo_idx; 768 u32 num_exentries; 769 struct exception_table_entry *extable; 770 struct bpf_prog_stats __percpu *stats; 771 union { 772 struct work_struct work; 773 struct rcu_head rcu; 774 }; 775 }; 776 777 struct bpf_array_aux { 778 /* 'Ownership' of prog array is claimed by the first program that 779 * is going to use this map or by the first program which FD is 780 * stored in the map to make sure that all callers and callees have 781 * the same prog type and JITed flag. 782 */ 783 enum bpf_prog_type type; 784 bool jited; 785 /* Programs with direct jumps into programs part of this array. */ 786 struct list_head poke_progs; 787 struct bpf_map *map; 788 struct mutex poke_mutex; 789 struct work_struct work; 790 }; 791 792 struct bpf_link { 793 atomic64_t refcnt; 794 u32 id; 795 enum bpf_link_type type; 796 const struct bpf_link_ops *ops; 797 struct bpf_prog *prog; 798 struct work_struct work; 799 }; 800 801 struct bpf_link_ops { 802 void (*release)(struct bpf_link *link); 803 void (*dealloc)(struct bpf_link *link); 804 int (*detach)(struct bpf_link *link); 805 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 806 struct bpf_prog *old_prog); 807 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 808 int (*fill_link_info)(const struct bpf_link *link, 809 struct bpf_link_info *info); 810 }; 811 812 struct bpf_link_primer { 813 struct bpf_link *link; 814 struct file *file; 815 int fd; 816 u32 id; 817 }; 818 819 struct bpf_struct_ops_value; 820 struct btf_type; 821 struct btf_member; 822 823 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 824 struct bpf_struct_ops { 825 const struct bpf_verifier_ops *verifier_ops; 826 int (*init)(struct btf *btf); 827 int (*check_member)(const struct btf_type *t, 828 const struct btf_member *member); 829 int (*init_member)(const struct btf_type *t, 830 const struct btf_member *member, 831 void *kdata, const void *udata); 832 int (*reg)(void *kdata); 833 void (*unreg)(void *kdata); 834 const struct btf_type *type; 835 const struct btf_type *value_type; 836 const char *name; 837 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 838 u32 type_id; 839 u32 value_id; 840 }; 841 842 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 843 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 844 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 845 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 846 bool bpf_struct_ops_get(const void *kdata); 847 void bpf_struct_ops_put(const void *kdata); 848 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 849 void *value); 850 static inline bool bpf_try_module_get(const void *data, struct module *owner) 851 { 852 if (owner == BPF_MODULE_OWNER) 853 return bpf_struct_ops_get(data); 854 else 855 return try_module_get(owner); 856 } 857 static inline void bpf_module_put(const void *data, struct module *owner) 858 { 859 if (owner == BPF_MODULE_OWNER) 860 bpf_struct_ops_put(data); 861 else 862 module_put(owner); 863 } 864 #else 865 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 866 { 867 return NULL; 868 } 869 static inline void bpf_struct_ops_init(struct btf *btf, 870 struct bpf_verifier_log *log) 871 { 872 } 873 static inline bool bpf_try_module_get(const void *data, struct module *owner) 874 { 875 return try_module_get(owner); 876 } 877 static inline void bpf_module_put(const void *data, struct module *owner) 878 { 879 module_put(owner); 880 } 881 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 882 void *key, 883 void *value) 884 { 885 return -EINVAL; 886 } 887 #endif 888 889 struct bpf_array { 890 struct bpf_map map; 891 u32 elem_size; 892 u32 index_mask; 893 struct bpf_array_aux *aux; 894 union { 895 char value[0] __aligned(8); 896 void *ptrs[0] __aligned(8); 897 void __percpu *pptrs[0] __aligned(8); 898 }; 899 }; 900 901 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 902 #define MAX_TAIL_CALL_CNT 32 903 904 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 905 BPF_F_RDONLY_PROG | \ 906 BPF_F_WRONLY | \ 907 BPF_F_WRONLY_PROG) 908 909 #define BPF_MAP_CAN_READ BIT(0) 910 #define BPF_MAP_CAN_WRITE BIT(1) 911 912 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 913 { 914 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 915 916 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 917 * not possible. 918 */ 919 if (access_flags & BPF_F_RDONLY_PROG) 920 return BPF_MAP_CAN_READ; 921 else if (access_flags & BPF_F_WRONLY_PROG) 922 return BPF_MAP_CAN_WRITE; 923 else 924 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 925 } 926 927 static inline bool bpf_map_flags_access_ok(u32 access_flags) 928 { 929 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 930 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 931 } 932 933 struct bpf_event_entry { 934 struct perf_event *event; 935 struct file *perf_file; 936 struct file *map_file; 937 struct rcu_head rcu; 938 }; 939 940 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 941 int bpf_prog_calc_tag(struct bpf_prog *fp); 942 const char *kernel_type_name(u32 btf_type_id); 943 944 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 945 946 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 947 unsigned long off, unsigned long len); 948 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 949 const struct bpf_insn *src, 950 struct bpf_insn *dst, 951 struct bpf_prog *prog, 952 u32 *target_size); 953 954 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 955 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 956 957 /* an array of programs to be executed under rcu_lock. 958 * 959 * Typical usage: 960 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 961 * 962 * the structure returned by bpf_prog_array_alloc() should be populated 963 * with program pointers and the last pointer must be NULL. 964 * The user has to keep refcnt on the program and make sure the program 965 * is removed from the array before bpf_prog_put(). 966 * The 'struct bpf_prog_array *' should only be replaced with xchg() 967 * since other cpus are walking the array of pointers in parallel. 968 */ 969 struct bpf_prog_array_item { 970 struct bpf_prog *prog; 971 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 972 }; 973 974 struct bpf_prog_array { 975 struct rcu_head rcu; 976 struct bpf_prog_array_item items[]; 977 }; 978 979 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 980 void bpf_prog_array_free(struct bpf_prog_array *progs); 981 int bpf_prog_array_length(struct bpf_prog_array *progs); 982 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 983 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 984 __u32 __user *prog_ids, u32 cnt); 985 986 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 987 struct bpf_prog *old_prog); 988 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 989 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 990 struct bpf_prog *prog); 991 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 992 u32 *prog_ids, u32 request_cnt, 993 u32 *prog_cnt); 994 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 995 struct bpf_prog *exclude_prog, 996 struct bpf_prog *include_prog, 997 struct bpf_prog_array **new_array); 998 999 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 1000 ({ \ 1001 struct bpf_prog_array_item *_item; \ 1002 struct bpf_prog *_prog; \ 1003 struct bpf_prog_array *_array; \ 1004 u32 _ret = 1; \ 1005 migrate_disable(); \ 1006 rcu_read_lock(); \ 1007 _array = rcu_dereference(array); \ 1008 if (unlikely(check_non_null && !_array))\ 1009 goto _out; \ 1010 _item = &_array->items[0]; \ 1011 while ((_prog = READ_ONCE(_item->prog))) { \ 1012 bpf_cgroup_storage_set(_item->cgroup_storage); \ 1013 _ret &= func(_prog, ctx); \ 1014 _item++; \ 1015 } \ 1016 _out: \ 1017 rcu_read_unlock(); \ 1018 migrate_enable(); \ 1019 _ret; \ 1020 }) 1021 1022 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1023 * so BPF programs can request cwr for TCP packets. 1024 * 1025 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1026 * packet. This macro changes the behavior so the low order bit 1027 * indicates whether the packet should be dropped (0) or not (1) 1028 * and the next bit is a congestion notification bit. This could be 1029 * used by TCP to call tcp_enter_cwr() 1030 * 1031 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1032 * 0: drop packet 1033 * 1: keep packet 1034 * 2: drop packet and cn 1035 * 3: keep packet and cn 1036 * 1037 * This macro then converts it to one of the NET_XMIT or an error 1038 * code that is then interpreted as drop packet (and no cn): 1039 * 0: NET_XMIT_SUCCESS skb should be transmitted 1040 * 1: NET_XMIT_DROP skb should be dropped and cn 1041 * 2: NET_XMIT_CN skb should be transmitted and cn 1042 * 3: -EPERM skb should be dropped 1043 */ 1044 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1045 ({ \ 1046 struct bpf_prog_array_item *_item; \ 1047 struct bpf_prog *_prog; \ 1048 struct bpf_prog_array *_array; \ 1049 u32 ret; \ 1050 u32 _ret = 1; \ 1051 u32 _cn = 0; \ 1052 migrate_disable(); \ 1053 rcu_read_lock(); \ 1054 _array = rcu_dereference(array); \ 1055 _item = &_array->items[0]; \ 1056 while ((_prog = READ_ONCE(_item->prog))) { \ 1057 bpf_cgroup_storage_set(_item->cgroup_storage); \ 1058 ret = func(_prog, ctx); \ 1059 _ret &= (ret & 1); \ 1060 _cn |= (ret & 2); \ 1061 _item++; \ 1062 } \ 1063 rcu_read_unlock(); \ 1064 migrate_enable(); \ 1065 if (_ret) \ 1066 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1067 else \ 1068 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1069 _ret; \ 1070 }) 1071 1072 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 1073 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 1074 1075 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 1076 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 1077 1078 #ifdef CONFIG_BPF_SYSCALL 1079 DECLARE_PER_CPU(int, bpf_prog_active); 1080 extern struct mutex bpf_stats_enabled_mutex; 1081 1082 /* 1083 * Block execution of BPF programs attached to instrumentation (perf, 1084 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1085 * these events can happen inside a region which holds a map bucket lock 1086 * and can deadlock on it. 1087 * 1088 * Use the preemption safe inc/dec variants on RT because migrate disable 1089 * is preemptible on RT and preemption in the middle of the RMW operation 1090 * might lead to inconsistent state. Use the raw variants for non RT 1091 * kernels as migrate_disable() maps to preempt_disable() so the slightly 1092 * more expensive save operation can be avoided. 1093 */ 1094 static inline void bpf_disable_instrumentation(void) 1095 { 1096 migrate_disable(); 1097 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1098 this_cpu_inc(bpf_prog_active); 1099 else 1100 __this_cpu_inc(bpf_prog_active); 1101 } 1102 1103 static inline void bpf_enable_instrumentation(void) 1104 { 1105 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1106 this_cpu_dec(bpf_prog_active); 1107 else 1108 __this_cpu_dec(bpf_prog_active); 1109 migrate_enable(); 1110 } 1111 1112 extern const struct file_operations bpf_map_fops; 1113 extern const struct file_operations bpf_prog_fops; 1114 extern const struct file_operations bpf_iter_fops; 1115 1116 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1117 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1118 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1119 #define BPF_MAP_TYPE(_id, _ops) \ 1120 extern const struct bpf_map_ops _ops; 1121 #define BPF_LINK_TYPE(_id, _name) 1122 #include <linux/bpf_types.h> 1123 #undef BPF_PROG_TYPE 1124 #undef BPF_MAP_TYPE 1125 #undef BPF_LINK_TYPE 1126 1127 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1128 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1129 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1130 1131 struct bpf_prog *bpf_prog_get(u32 ufd); 1132 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1133 bool attach_drv); 1134 void bpf_prog_add(struct bpf_prog *prog, int i); 1135 void bpf_prog_sub(struct bpf_prog *prog, int i); 1136 void bpf_prog_inc(struct bpf_prog *prog); 1137 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1138 void bpf_prog_put(struct bpf_prog *prog); 1139 int __bpf_prog_charge(struct user_struct *user, u32 pages); 1140 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 1141 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1142 struct bpf_map **used_maps, u32 len); 1143 1144 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1145 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1146 1147 struct bpf_map *bpf_map_get(u32 ufd); 1148 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1149 struct bpf_map *__bpf_map_get(struct fd f); 1150 void bpf_map_inc(struct bpf_map *map); 1151 void bpf_map_inc_with_uref(struct bpf_map *map); 1152 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1153 void bpf_map_put_with_uref(struct bpf_map *map); 1154 void bpf_map_put(struct bpf_map *map); 1155 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 1156 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 1157 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 1158 void bpf_map_charge_finish(struct bpf_map_memory *mem); 1159 void bpf_map_charge_move(struct bpf_map_memory *dst, 1160 struct bpf_map_memory *src); 1161 void *bpf_map_area_alloc(u64 size, int numa_node); 1162 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1163 void bpf_map_area_free(void *base); 1164 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1165 int generic_map_lookup_batch(struct bpf_map *map, 1166 const union bpf_attr *attr, 1167 union bpf_attr __user *uattr); 1168 int generic_map_update_batch(struct bpf_map *map, 1169 const union bpf_attr *attr, 1170 union bpf_attr __user *uattr); 1171 int generic_map_delete_batch(struct bpf_map *map, 1172 const union bpf_attr *attr, 1173 union bpf_attr __user *uattr); 1174 struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1175 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1176 1177 extern int sysctl_unprivileged_bpf_disabled; 1178 1179 static inline bool bpf_allow_ptr_leaks(void) 1180 { 1181 return perfmon_capable(); 1182 } 1183 1184 static inline bool bpf_allow_ptr_to_map_access(void) 1185 { 1186 return perfmon_capable(); 1187 } 1188 1189 static inline bool bpf_bypass_spec_v1(void) 1190 { 1191 return perfmon_capable(); 1192 } 1193 1194 static inline bool bpf_bypass_spec_v4(void) 1195 { 1196 return perfmon_capable(); 1197 } 1198 1199 int bpf_map_new_fd(struct bpf_map *map, int flags); 1200 int bpf_prog_new_fd(struct bpf_prog *prog); 1201 1202 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1203 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1204 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1205 int bpf_link_settle(struct bpf_link_primer *primer); 1206 void bpf_link_cleanup(struct bpf_link_primer *primer); 1207 void bpf_link_inc(struct bpf_link *link); 1208 void bpf_link_put(struct bpf_link *link); 1209 int bpf_link_new_fd(struct bpf_link *link); 1210 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1211 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1212 1213 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1214 int bpf_obj_get_user(const char __user *pathname, int flags); 1215 1216 #define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1217 #define DEFINE_BPF_ITER_FUNC(target, args...) \ 1218 extern int bpf_iter_ ## target(args); \ 1219 int __init bpf_iter_ ## target(args) { return 0; } 1220 1221 struct bpf_iter_aux_info { 1222 struct bpf_map *map; 1223 }; 1224 1225 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1226 union bpf_iter_link_info *linfo, 1227 struct bpf_iter_aux_info *aux); 1228 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1229 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1230 struct seq_file *seq); 1231 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1232 struct bpf_link_info *info); 1233 1234 #define BPF_ITER_CTX_ARG_MAX 2 1235 struct bpf_iter_reg { 1236 const char *target; 1237 bpf_iter_attach_target_t attach_target; 1238 bpf_iter_detach_target_t detach_target; 1239 bpf_iter_show_fdinfo_t show_fdinfo; 1240 bpf_iter_fill_link_info_t fill_link_info; 1241 u32 ctx_arg_info_size; 1242 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1243 const struct bpf_iter_seq_info *seq_info; 1244 }; 1245 1246 struct bpf_iter_meta { 1247 __bpf_md_ptr(struct seq_file *, seq); 1248 u64 session_id; 1249 u64 seq_num; 1250 }; 1251 1252 struct bpf_iter__bpf_map_elem { 1253 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1254 __bpf_md_ptr(struct bpf_map *, map); 1255 __bpf_md_ptr(void *, key); 1256 __bpf_md_ptr(void *, value); 1257 }; 1258 1259 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1260 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1261 bool bpf_iter_prog_supported(struct bpf_prog *prog); 1262 int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 1263 int bpf_iter_new_fd(struct bpf_link *link); 1264 bool bpf_link_is_iter(struct bpf_link *link); 1265 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1266 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1267 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1268 struct seq_file *seq); 1269 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1270 struct bpf_link_info *info); 1271 1272 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1273 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1274 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1275 u64 flags); 1276 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1277 u64 flags); 1278 1279 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1280 1281 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1282 void *key, void *value, u64 map_flags); 1283 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1284 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1285 void *key, void *value, u64 map_flags); 1286 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1287 1288 int bpf_get_file_flag(int flags); 1289 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 1290 size_t actual_size); 1291 1292 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1293 * forced to use 'long' read/writes to try to atomically copy long counters. 1294 * Best-effort only. No barriers here, since it _will_ race with concurrent 1295 * updates from BPF programs. Called from bpf syscall and mostly used with 1296 * size 8 or 16 bytes, so ask compiler to inline it. 1297 */ 1298 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1299 { 1300 const long *lsrc = src; 1301 long *ldst = dst; 1302 1303 size /= sizeof(long); 1304 while (size--) 1305 *ldst++ = *lsrc++; 1306 } 1307 1308 /* verify correctness of eBPF program */ 1309 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 1310 union bpf_attr __user *uattr); 1311 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1312 1313 /* Map specifics */ 1314 struct xdp_buff; 1315 struct sk_buff; 1316 1317 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 1318 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 1319 void __dev_flush(void); 1320 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1321 struct net_device *dev_rx); 1322 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1323 struct net_device *dev_rx); 1324 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1325 struct bpf_prog *xdp_prog); 1326 bool dev_map_can_have_prog(struct bpf_map *map); 1327 1328 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 1329 void __cpu_map_flush(void); 1330 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1331 struct net_device *dev_rx); 1332 bool cpu_map_prog_allowed(struct bpf_map *map); 1333 1334 /* Return map's numa specified by userspace */ 1335 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1336 { 1337 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1338 attr->numa_node : NUMA_NO_NODE; 1339 } 1340 1341 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1342 int array_map_alloc_check(union bpf_attr *attr); 1343 1344 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1345 union bpf_attr __user *uattr); 1346 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1347 union bpf_attr __user *uattr); 1348 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1349 const union bpf_attr *kattr, 1350 union bpf_attr __user *uattr); 1351 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1352 const union bpf_attr *kattr, 1353 union bpf_attr __user *uattr); 1354 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1355 const struct bpf_prog *prog, 1356 struct bpf_insn_access_aux *info); 1357 int btf_struct_access(struct bpf_verifier_log *log, 1358 const struct btf_type *t, int off, int size, 1359 enum bpf_access_type atype, 1360 u32 *next_btf_id); 1361 int btf_resolve_helper_id(struct bpf_verifier_log *log, 1362 const struct bpf_func_proto *fn, int); 1363 1364 int btf_distill_func_proto(struct bpf_verifier_log *log, 1365 struct btf *btf, 1366 const struct btf_type *func_proto, 1367 const char *func_name, 1368 struct btf_func_model *m); 1369 1370 struct bpf_reg_state; 1371 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, 1372 struct bpf_reg_state *regs); 1373 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1374 struct bpf_reg_state *reg); 1375 int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, 1376 struct btf *btf, const struct btf_type *t); 1377 1378 struct bpf_prog *bpf_prog_by_id(u32 id); 1379 struct bpf_link *bpf_link_by_id(u32 id); 1380 1381 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1382 #else /* !CONFIG_BPF_SYSCALL */ 1383 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1384 { 1385 return ERR_PTR(-EOPNOTSUPP); 1386 } 1387 1388 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1389 enum bpf_prog_type type, 1390 bool attach_drv) 1391 { 1392 return ERR_PTR(-EOPNOTSUPP); 1393 } 1394 1395 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1396 { 1397 } 1398 1399 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1400 { 1401 } 1402 1403 static inline void bpf_prog_put(struct bpf_prog *prog) 1404 { 1405 } 1406 1407 static inline void bpf_prog_inc(struct bpf_prog *prog) 1408 { 1409 } 1410 1411 static inline struct bpf_prog *__must_check 1412 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1413 { 1414 return ERR_PTR(-EOPNOTSUPP); 1415 } 1416 1417 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 1418 { 1419 return 0; 1420 } 1421 1422 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1423 { 1424 } 1425 1426 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1427 const struct bpf_link_ops *ops, 1428 struct bpf_prog *prog) 1429 { 1430 } 1431 1432 static inline int bpf_link_prime(struct bpf_link *link, 1433 struct bpf_link_primer *primer) 1434 { 1435 return -EOPNOTSUPP; 1436 } 1437 1438 static inline int bpf_link_settle(struct bpf_link_primer *primer) 1439 { 1440 return -EOPNOTSUPP; 1441 } 1442 1443 static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1444 { 1445 } 1446 1447 static inline void bpf_link_inc(struct bpf_link *link) 1448 { 1449 } 1450 1451 static inline void bpf_link_put(struct bpf_link *link) 1452 { 1453 } 1454 1455 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1456 { 1457 return -EOPNOTSUPP; 1458 } 1459 1460 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 1461 u32 key) 1462 { 1463 return NULL; 1464 } 1465 1466 static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, 1467 u32 key) 1468 { 1469 return NULL; 1470 } 1471 static inline bool dev_map_can_have_prog(struct bpf_map *map) 1472 { 1473 return false; 1474 } 1475 1476 static inline void __dev_flush(void) 1477 { 1478 } 1479 1480 struct xdp_buff; 1481 struct bpf_dtab_netdev; 1482 1483 static inline 1484 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1485 struct net_device *dev_rx) 1486 { 1487 return 0; 1488 } 1489 1490 static inline 1491 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1492 struct net_device *dev_rx) 1493 { 1494 return 0; 1495 } 1496 1497 struct sk_buff; 1498 1499 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1500 struct sk_buff *skb, 1501 struct bpf_prog *xdp_prog) 1502 { 1503 return 0; 1504 } 1505 1506 static inline 1507 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 1508 { 1509 return NULL; 1510 } 1511 1512 static inline void __cpu_map_flush(void) 1513 { 1514 } 1515 1516 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1517 struct xdp_buff *xdp, 1518 struct net_device *dev_rx) 1519 { 1520 return 0; 1521 } 1522 1523 static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1524 { 1525 return false; 1526 } 1527 1528 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1529 enum bpf_prog_type type) 1530 { 1531 return ERR_PTR(-EOPNOTSUPP); 1532 } 1533 1534 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1535 const union bpf_attr *kattr, 1536 union bpf_attr __user *uattr) 1537 { 1538 return -ENOTSUPP; 1539 } 1540 1541 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1542 const union bpf_attr *kattr, 1543 union bpf_attr __user *uattr) 1544 { 1545 return -ENOTSUPP; 1546 } 1547 1548 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1549 const union bpf_attr *kattr, 1550 union bpf_attr __user *uattr) 1551 { 1552 return -ENOTSUPP; 1553 } 1554 1555 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1556 const union bpf_attr *kattr, 1557 union bpf_attr __user *uattr) 1558 { 1559 return -ENOTSUPP; 1560 } 1561 1562 static inline void bpf_map_put(struct bpf_map *map) 1563 { 1564 } 1565 1566 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1567 { 1568 return ERR_PTR(-ENOTSUPP); 1569 } 1570 1571 static inline const struct bpf_func_proto * 1572 bpf_base_func_proto(enum bpf_func_id func_id) 1573 { 1574 return NULL; 1575 } 1576 #endif /* CONFIG_BPF_SYSCALL */ 1577 1578 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1579 enum bpf_prog_type type) 1580 { 1581 return bpf_prog_get_type_dev(ufd, type, false); 1582 } 1583 1584 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1585 1586 int bpf_prog_offload_compile(struct bpf_prog *prog); 1587 void bpf_prog_offload_destroy(struct bpf_prog *prog); 1588 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1589 struct bpf_prog *prog); 1590 1591 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1592 1593 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1594 int bpf_map_offload_update_elem(struct bpf_map *map, 1595 void *key, void *value, u64 flags); 1596 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1597 int bpf_map_offload_get_next_key(struct bpf_map *map, 1598 void *key, void *next_key); 1599 1600 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1601 1602 struct bpf_offload_dev * 1603 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1604 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1605 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1606 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1607 struct net_device *netdev); 1608 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1609 struct net_device *netdev); 1610 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1611 1612 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1613 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1614 1615 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1616 { 1617 return aux->offload_requested; 1618 } 1619 1620 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1621 { 1622 return unlikely(map->ops == &bpf_map_offload_ops); 1623 } 1624 1625 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1626 void bpf_map_offload_map_free(struct bpf_map *map); 1627 #else 1628 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1629 union bpf_attr *attr) 1630 { 1631 return -EOPNOTSUPP; 1632 } 1633 1634 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1635 { 1636 return false; 1637 } 1638 1639 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1640 { 1641 return false; 1642 } 1643 1644 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1645 { 1646 return ERR_PTR(-EOPNOTSUPP); 1647 } 1648 1649 static inline void bpf_map_offload_map_free(struct bpf_map *map) 1650 { 1651 } 1652 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1653 1654 #if defined(CONFIG_BPF_STREAM_PARSER) 1655 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1656 struct bpf_prog *old, u32 which); 1657 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1658 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 1659 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 1660 void sock_map_unhash(struct sock *sk); 1661 void sock_map_close(struct sock *sk, long timeout); 1662 #else 1663 static inline int sock_map_prog_update(struct bpf_map *map, 1664 struct bpf_prog *prog, 1665 struct bpf_prog *old, u32 which) 1666 { 1667 return -EOPNOTSUPP; 1668 } 1669 1670 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1671 struct bpf_prog *prog) 1672 { 1673 return -EINVAL; 1674 } 1675 1676 static inline int sock_map_prog_detach(const union bpf_attr *attr, 1677 enum bpf_prog_type ptype) 1678 { 1679 return -EOPNOTSUPP; 1680 } 1681 1682 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 1683 u64 flags) 1684 { 1685 return -EOPNOTSUPP; 1686 } 1687 #endif /* CONFIG_BPF_STREAM_PARSER */ 1688 1689 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1690 void bpf_sk_reuseport_detach(struct sock *sk); 1691 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1692 void *value); 1693 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1694 void *value, u64 map_flags); 1695 #else 1696 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1697 { 1698 } 1699 1700 #ifdef CONFIG_BPF_SYSCALL 1701 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1702 void *key, void *value) 1703 { 1704 return -EOPNOTSUPP; 1705 } 1706 1707 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1708 void *key, void *value, 1709 u64 map_flags) 1710 { 1711 return -EOPNOTSUPP; 1712 } 1713 #endif /* CONFIG_BPF_SYSCALL */ 1714 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1715 1716 /* verifier prototypes for helper functions called from eBPF programs */ 1717 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1718 extern const struct bpf_func_proto bpf_map_update_elem_proto; 1719 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1720 extern const struct bpf_func_proto bpf_map_push_elem_proto; 1721 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1722 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1723 1724 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1725 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1726 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1727 extern const struct bpf_func_proto bpf_tail_call_proto; 1728 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1729 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 1730 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1731 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1732 extern const struct bpf_func_proto bpf_get_current_comm_proto; 1733 extern const struct bpf_func_proto bpf_get_stackid_proto; 1734 extern const struct bpf_func_proto bpf_get_stack_proto; 1735 extern const struct bpf_func_proto bpf_get_task_stack_proto; 1736 extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 1737 extern const struct bpf_func_proto bpf_get_stack_proto_pe; 1738 extern const struct bpf_func_proto bpf_sock_map_update_proto; 1739 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1740 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1741 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 1742 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1743 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1744 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1745 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1746 extern const struct bpf_func_proto bpf_spin_lock_proto; 1747 extern const struct bpf_func_proto bpf_spin_unlock_proto; 1748 extern const struct bpf_func_proto bpf_get_local_storage_proto; 1749 extern const struct bpf_func_proto bpf_strtol_proto; 1750 extern const struct bpf_func_proto bpf_strtoul_proto; 1751 extern const struct bpf_func_proto bpf_tcp_sock_proto; 1752 extern const struct bpf_func_proto bpf_jiffies64_proto; 1753 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 1754 extern const struct bpf_func_proto bpf_event_output_data_proto; 1755 extern const struct bpf_func_proto bpf_ringbuf_output_proto; 1756 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 1757 extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 1758 extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 1759 extern const struct bpf_func_proto bpf_ringbuf_query_proto; 1760 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 1761 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 1762 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 1763 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 1764 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 1765 1766 const struct bpf_func_proto *bpf_tracing_func_proto( 1767 enum bpf_func_id func_id, const struct bpf_prog *prog); 1768 1769 const struct bpf_func_proto *tracing_prog_func_proto( 1770 enum bpf_func_id func_id, const struct bpf_prog *prog); 1771 1772 /* Shared helpers among cBPF and eBPF. */ 1773 void bpf_user_rnd_init_once(void); 1774 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1775 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1776 1777 #if defined(CONFIG_NET) 1778 bool bpf_sock_common_is_valid_access(int off, int size, 1779 enum bpf_access_type type, 1780 struct bpf_insn_access_aux *info); 1781 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1782 struct bpf_insn_access_aux *info); 1783 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1784 const struct bpf_insn *si, 1785 struct bpf_insn *insn_buf, 1786 struct bpf_prog *prog, 1787 u32 *target_size); 1788 #else 1789 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1790 enum bpf_access_type type, 1791 struct bpf_insn_access_aux *info) 1792 { 1793 return false; 1794 } 1795 static inline bool bpf_sock_is_valid_access(int off, int size, 1796 enum bpf_access_type type, 1797 struct bpf_insn_access_aux *info) 1798 { 1799 return false; 1800 } 1801 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1802 const struct bpf_insn *si, 1803 struct bpf_insn *insn_buf, 1804 struct bpf_prog *prog, 1805 u32 *target_size) 1806 { 1807 return 0; 1808 } 1809 #endif 1810 1811 #ifdef CONFIG_INET 1812 struct sk_reuseport_kern { 1813 struct sk_buff *skb; 1814 struct sock *sk; 1815 struct sock *selected_sk; 1816 void *data_end; 1817 u32 hash; 1818 u32 reuseport_id; 1819 bool bind_inany; 1820 }; 1821 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1822 struct bpf_insn_access_aux *info); 1823 1824 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1825 const struct bpf_insn *si, 1826 struct bpf_insn *insn_buf, 1827 struct bpf_prog *prog, 1828 u32 *target_size); 1829 1830 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1831 struct bpf_insn_access_aux *info); 1832 1833 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1834 const struct bpf_insn *si, 1835 struct bpf_insn *insn_buf, 1836 struct bpf_prog *prog, 1837 u32 *target_size); 1838 #else 1839 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1840 enum bpf_access_type type, 1841 struct bpf_insn_access_aux *info) 1842 { 1843 return false; 1844 } 1845 1846 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1847 const struct bpf_insn *si, 1848 struct bpf_insn *insn_buf, 1849 struct bpf_prog *prog, 1850 u32 *target_size) 1851 { 1852 return 0; 1853 } 1854 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 1855 enum bpf_access_type type, 1856 struct bpf_insn_access_aux *info) 1857 { 1858 return false; 1859 } 1860 1861 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1862 const struct bpf_insn *si, 1863 struct bpf_insn *insn_buf, 1864 struct bpf_prog *prog, 1865 u32 *target_size) 1866 { 1867 return 0; 1868 } 1869 #endif /* CONFIG_INET */ 1870 1871 enum bpf_text_poke_type { 1872 BPF_MOD_CALL, 1873 BPF_MOD_JUMP, 1874 }; 1875 1876 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 1877 void *addr1, void *addr2); 1878 1879 #endif /* _LINUX_BPF_H */ 1880