1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/u64_stats_sync.h> 18 #include <linux/refcount.h> 19 #include <linux/mutex.h> 20 #include <linux/module.h> 21 22 struct bpf_verifier_env; 23 struct bpf_verifier_log; 24 struct perf_event; 25 struct bpf_prog; 26 struct bpf_prog_aux; 27 struct bpf_map; 28 struct sock; 29 struct seq_file; 30 struct btf; 31 struct btf_type; 32 struct exception_table_entry; 33 34 extern struct idr btf_idr; 35 extern spinlock_t btf_idr_lock; 36 37 /* map is generic key/value storage optionally accesible by eBPF programs */ 38 struct bpf_map_ops { 39 /* funcs callable from userspace (via syscall) */ 40 int (*map_alloc_check)(union bpf_attr *attr); 41 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 42 void (*map_release)(struct bpf_map *map, struct file *map_file); 43 void (*map_free)(struct bpf_map *map); 44 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 45 void (*map_release_uref)(struct bpf_map *map); 46 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 47 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 48 union bpf_attr __user *uattr); 49 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 50 const union bpf_attr *attr, 51 union bpf_attr __user *uattr); 52 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 53 union bpf_attr __user *uattr); 54 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 55 union bpf_attr __user *uattr); 56 57 /* funcs callable from userspace and from eBPF programs */ 58 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 59 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 60 int (*map_delete_elem)(struct bpf_map *map, void *key); 61 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 62 int (*map_pop_elem)(struct bpf_map *map, void *value); 63 int (*map_peek_elem)(struct bpf_map *map, void *value); 64 65 /* funcs called by prog_array and perf_event_array map */ 66 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 67 int fd); 68 void (*map_fd_put_ptr)(void *ptr); 69 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 70 u32 (*map_fd_sys_lookup_elem)(void *ptr); 71 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 72 struct seq_file *m); 73 int (*map_check_btf)(const struct bpf_map *map, 74 const struct btf *btf, 75 const struct btf_type *key_type, 76 const struct btf_type *value_type); 77 78 /* Prog poke tracking helpers. */ 79 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 80 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 81 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 82 struct bpf_prog *new); 83 84 /* Direct value access helpers. */ 85 int (*map_direct_value_addr)(const struct bpf_map *map, 86 u64 *imm, u32 off); 87 int (*map_direct_value_meta)(const struct bpf_map *map, 88 u64 imm, u32 *off); 89 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 90 }; 91 92 struct bpf_map_memory { 93 u32 pages; 94 struct user_struct *user; 95 }; 96 97 struct bpf_map { 98 /* The first two cachelines with read-mostly members of which some 99 * are also accessed in fast-path (e.g. ops, max_entries). 100 */ 101 const struct bpf_map_ops *ops ____cacheline_aligned; 102 struct bpf_map *inner_map_meta; 103 #ifdef CONFIG_SECURITY 104 void *security; 105 #endif 106 enum bpf_map_type map_type; 107 u32 key_size; 108 u32 value_size; 109 u32 max_entries; 110 u32 map_flags; 111 int spin_lock_off; /* >=0 valid offset, <0 error */ 112 u32 id; 113 int numa_node; 114 u32 btf_key_type_id; 115 u32 btf_value_type_id; 116 struct btf *btf; 117 struct bpf_map_memory memory; 118 char name[BPF_OBJ_NAME_LEN]; 119 u32 btf_vmlinux_value_type_id; 120 bool unpriv_array; 121 bool frozen; /* write-once; write-protected by freeze_mutex */ 122 /* 22 bytes hole */ 123 124 /* The 3rd and 4th cacheline with misc members to avoid false sharing 125 * particularly with refcounting. 126 */ 127 atomic64_t refcnt ____cacheline_aligned; 128 atomic64_t usercnt; 129 struct work_struct work; 130 struct mutex freeze_mutex; 131 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 132 }; 133 134 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 135 { 136 return map->spin_lock_off >= 0; 137 } 138 139 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 140 { 141 if (likely(!map_value_has_spin_lock(map))) 142 return; 143 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 144 (struct bpf_spin_lock){}; 145 } 146 147 /* copy everything but bpf_spin_lock */ 148 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 149 { 150 if (unlikely(map_value_has_spin_lock(map))) { 151 u32 off = map->spin_lock_off; 152 153 memcpy(dst, src, off); 154 memcpy(dst + off + sizeof(struct bpf_spin_lock), 155 src + off + sizeof(struct bpf_spin_lock), 156 map->value_size - off - sizeof(struct bpf_spin_lock)); 157 } else { 158 memcpy(dst, src, map->value_size); 159 } 160 } 161 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 162 bool lock_src); 163 164 struct bpf_offload_dev; 165 struct bpf_offloaded_map; 166 167 struct bpf_map_dev_ops { 168 int (*map_get_next_key)(struct bpf_offloaded_map *map, 169 void *key, void *next_key); 170 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 171 void *key, void *value); 172 int (*map_update_elem)(struct bpf_offloaded_map *map, 173 void *key, void *value, u64 flags); 174 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 175 }; 176 177 struct bpf_offloaded_map { 178 struct bpf_map map; 179 struct net_device *netdev; 180 const struct bpf_map_dev_ops *dev_ops; 181 void *dev_priv; 182 struct list_head offloads; 183 }; 184 185 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 186 { 187 return container_of(map, struct bpf_offloaded_map, map); 188 } 189 190 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 191 { 192 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 193 } 194 195 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 196 { 197 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 198 map->ops->map_seq_show_elem; 199 } 200 201 int map_check_no_btf(const struct bpf_map *map, 202 const struct btf *btf, 203 const struct btf_type *key_type, 204 const struct btf_type *value_type); 205 206 extern const struct bpf_map_ops bpf_map_offload_ops; 207 208 /* function argument constraints */ 209 enum bpf_arg_type { 210 ARG_DONTCARE = 0, /* unused argument in helper function */ 211 212 /* the following constraints used to prototype 213 * bpf_map_lookup/update/delete_elem() functions 214 */ 215 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 216 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 217 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 218 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 219 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 220 221 /* the following constraints used to prototype bpf_memcmp() and other 222 * functions that access data on eBPF program stack 223 */ 224 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 225 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 226 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 227 * helper function must fill all bytes or clear 228 * them in error case. 229 */ 230 231 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 232 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 233 234 ARG_PTR_TO_CTX, /* pointer to context */ 235 ARG_ANYTHING, /* any (initialized) argument is ok */ 236 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 237 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 238 ARG_PTR_TO_INT, /* pointer to int */ 239 ARG_PTR_TO_LONG, /* pointer to long */ 240 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 241 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 242 }; 243 244 /* type of values returned from helper functions */ 245 enum bpf_return_type { 246 RET_INTEGER, /* function returns integer */ 247 RET_VOID, /* function doesn't return anything */ 248 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 249 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 250 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 251 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 252 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 253 }; 254 255 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 256 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 257 * instructions after verifying 258 */ 259 struct bpf_func_proto { 260 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 261 bool gpl_only; 262 bool pkt_access; 263 enum bpf_return_type ret_type; 264 union { 265 struct { 266 enum bpf_arg_type arg1_type; 267 enum bpf_arg_type arg2_type; 268 enum bpf_arg_type arg3_type; 269 enum bpf_arg_type arg4_type; 270 enum bpf_arg_type arg5_type; 271 }; 272 enum bpf_arg_type arg_type[5]; 273 }; 274 int *btf_id; /* BTF ids of arguments */ 275 }; 276 277 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 278 * the first argument to eBPF programs. 279 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 280 */ 281 struct bpf_context; 282 283 enum bpf_access_type { 284 BPF_READ = 1, 285 BPF_WRITE = 2 286 }; 287 288 /* types of values stored in eBPF registers */ 289 /* Pointer types represent: 290 * pointer 291 * pointer + imm 292 * pointer + (u16) var 293 * pointer + (u16) var + imm 294 * if (range > 0) then [ptr, ptr + range - off) is safe to access 295 * if (id > 0) means that some 'var' was added 296 * if (off > 0) means that 'imm' was added 297 */ 298 enum bpf_reg_type { 299 NOT_INIT = 0, /* nothing was written into register */ 300 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 301 PTR_TO_CTX, /* reg points to bpf_context */ 302 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 303 PTR_TO_MAP_VALUE, /* reg points to map element value */ 304 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 305 PTR_TO_STACK, /* reg == frame_pointer + offset */ 306 PTR_TO_PACKET_META, /* skb->data - meta_len */ 307 PTR_TO_PACKET, /* reg points to skb->data */ 308 PTR_TO_PACKET_END, /* skb->data + headlen */ 309 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 310 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 311 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 312 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 313 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 314 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 315 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 316 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 317 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 318 PTR_TO_BTF_ID, /* reg points to kernel struct */ 319 }; 320 321 /* The information passed from prog-specific *_is_valid_access 322 * back to the verifier. 323 */ 324 struct bpf_insn_access_aux { 325 enum bpf_reg_type reg_type; 326 union { 327 int ctx_field_size; 328 u32 btf_id; 329 }; 330 struct bpf_verifier_log *log; /* for verbose logs */ 331 }; 332 333 static inline void 334 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 335 { 336 aux->ctx_field_size = size; 337 } 338 339 struct bpf_prog_ops { 340 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 341 union bpf_attr __user *uattr); 342 }; 343 344 struct bpf_verifier_ops { 345 /* return eBPF function prototype for verification */ 346 const struct bpf_func_proto * 347 (*get_func_proto)(enum bpf_func_id func_id, 348 const struct bpf_prog *prog); 349 350 /* return true if 'size' wide access at offset 'off' within bpf_context 351 * with 'type' (read or write) is allowed 352 */ 353 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 354 const struct bpf_prog *prog, 355 struct bpf_insn_access_aux *info); 356 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 357 const struct bpf_prog *prog); 358 int (*gen_ld_abs)(const struct bpf_insn *orig, 359 struct bpf_insn *insn_buf); 360 u32 (*convert_ctx_access)(enum bpf_access_type type, 361 const struct bpf_insn *src, 362 struct bpf_insn *dst, 363 struct bpf_prog *prog, u32 *target_size); 364 int (*btf_struct_access)(struct bpf_verifier_log *log, 365 const struct btf_type *t, int off, int size, 366 enum bpf_access_type atype, 367 u32 *next_btf_id); 368 }; 369 370 struct bpf_prog_offload_ops { 371 /* verifier basic callbacks */ 372 int (*insn_hook)(struct bpf_verifier_env *env, 373 int insn_idx, int prev_insn_idx); 374 int (*finalize)(struct bpf_verifier_env *env); 375 /* verifier optimization callbacks (called after .finalize) */ 376 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 377 struct bpf_insn *insn); 378 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 379 /* program management callbacks */ 380 int (*prepare)(struct bpf_prog *prog); 381 int (*translate)(struct bpf_prog *prog); 382 void (*destroy)(struct bpf_prog *prog); 383 }; 384 385 struct bpf_prog_offload { 386 struct bpf_prog *prog; 387 struct net_device *netdev; 388 struct bpf_offload_dev *offdev; 389 void *dev_priv; 390 struct list_head offloads; 391 bool dev_state; 392 bool opt_failed; 393 void *jited_image; 394 u32 jited_len; 395 }; 396 397 enum bpf_cgroup_storage_type { 398 BPF_CGROUP_STORAGE_SHARED, 399 BPF_CGROUP_STORAGE_PERCPU, 400 __BPF_CGROUP_STORAGE_MAX 401 }; 402 403 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 404 405 /* The longest tracepoint has 12 args. 406 * See include/trace/bpf_probe.h 407 */ 408 #define MAX_BPF_FUNC_ARGS 12 409 410 struct bpf_prog_stats { 411 u64 cnt; 412 u64 nsecs; 413 struct u64_stats_sync syncp; 414 } __aligned(2 * sizeof(u64)); 415 416 struct btf_func_model { 417 u8 ret_size; 418 u8 nr_args; 419 u8 arg_size[MAX_BPF_FUNC_ARGS]; 420 }; 421 422 /* Restore arguments before returning from trampoline to let original function 423 * continue executing. This flag is used for fentry progs when there are no 424 * fexit progs. 425 */ 426 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 427 /* Call original function after fentry progs, but before fexit progs. 428 * Makes sense for fentry/fexit, normal calls and indirect calls. 429 */ 430 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 431 /* Skip current frame and return to parent. Makes sense for fentry/fexit 432 * programs only. Should not be used with normal calls and indirect calls. 433 */ 434 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 435 436 /* Different use cases for BPF trampoline: 437 * 1. replace nop at the function entry (kprobe equivalent) 438 * flags = BPF_TRAMP_F_RESTORE_REGS 439 * fentry = a set of programs to run before returning from trampoline 440 * 441 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 442 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 443 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 444 * fentry = a set of program to run before calling original function 445 * fexit = a set of program to run after original function 446 * 447 * 3. replace direct call instruction anywhere in the function body 448 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 449 * With flags = 0 450 * fentry = a set of programs to run before returning from trampoline 451 * With flags = BPF_TRAMP_F_CALL_ORIG 452 * orig_call = original callback addr or direct function addr 453 * fentry = a set of program to run before calling original function 454 * fexit = a set of program to run after original function 455 */ 456 int arch_prepare_bpf_trampoline(void *image, void *image_end, 457 const struct btf_func_model *m, u32 flags, 458 struct bpf_prog **fentry_progs, int fentry_cnt, 459 struct bpf_prog **fexit_progs, int fexit_cnt, 460 void *orig_call); 461 /* these two functions are called from generated trampoline */ 462 u64 notrace __bpf_prog_enter(void); 463 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 464 465 enum bpf_tramp_prog_type { 466 BPF_TRAMP_FENTRY, 467 BPF_TRAMP_FEXIT, 468 BPF_TRAMP_MAX, 469 BPF_TRAMP_REPLACE, /* more than MAX */ 470 }; 471 472 struct bpf_trampoline { 473 /* hlist for trampoline_table */ 474 struct hlist_node hlist; 475 /* serializes access to fields of this trampoline */ 476 struct mutex mutex; 477 refcount_t refcnt; 478 u64 key; 479 struct { 480 struct btf_func_model model; 481 void *addr; 482 bool ftrace_managed; 483 } func; 484 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 485 * program by replacing one of its functions. func.addr is the address 486 * of the function it replaced. 487 */ 488 struct bpf_prog *extension_prog; 489 /* list of BPF programs using this trampoline */ 490 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 491 /* Number of attached programs. A counter per kind. */ 492 int progs_cnt[BPF_TRAMP_MAX]; 493 /* Executable image of trampoline */ 494 void *image; 495 u64 selector; 496 }; 497 498 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 499 500 struct bpf_dispatcher_prog { 501 struct bpf_prog *prog; 502 refcount_t users; 503 }; 504 505 struct bpf_dispatcher { 506 /* dispatcher mutex */ 507 struct mutex mutex; 508 void *func; 509 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 510 int num_progs; 511 void *image; 512 u32 image_off; 513 }; 514 515 static __always_inline unsigned int bpf_dispatcher_nopfunc( 516 const void *ctx, 517 const struct bpf_insn *insnsi, 518 unsigned int (*bpf_func)(const void *, 519 const struct bpf_insn *)) 520 { 521 return bpf_func(ctx, insnsi); 522 } 523 #ifdef CONFIG_BPF_JIT 524 struct bpf_trampoline *bpf_trampoline_lookup(u64 key); 525 int bpf_trampoline_link_prog(struct bpf_prog *prog); 526 int bpf_trampoline_unlink_prog(struct bpf_prog *prog); 527 void bpf_trampoline_put(struct bpf_trampoline *tr); 528 #define BPF_DISPATCHER_INIT(name) { \ 529 .mutex = __MUTEX_INITIALIZER(name.mutex), \ 530 .func = &name##func, \ 531 .progs = {}, \ 532 .num_progs = 0, \ 533 .image = NULL, \ 534 .image_off = 0 \ 535 } 536 537 #define DEFINE_BPF_DISPATCHER(name) \ 538 noinline unsigned int name##func( \ 539 const void *ctx, \ 540 const struct bpf_insn *insnsi, \ 541 unsigned int (*bpf_func)(const void *, \ 542 const struct bpf_insn *)) \ 543 { \ 544 return bpf_func(ctx, insnsi); \ 545 } \ 546 EXPORT_SYMBOL(name##func); \ 547 struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name); 548 #define DECLARE_BPF_DISPATCHER(name) \ 549 unsigned int name##func( \ 550 const void *ctx, \ 551 const struct bpf_insn *insnsi, \ 552 unsigned int (*bpf_func)(const void *, \ 553 const struct bpf_insn *)); \ 554 extern struct bpf_dispatcher name; 555 #define BPF_DISPATCHER_FUNC(name) name##func 556 #define BPF_DISPATCHER_PTR(name) (&name) 557 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 558 struct bpf_prog *to); 559 struct bpf_image { 560 struct latch_tree_node tnode; 561 unsigned char data[]; 562 }; 563 #define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image)) 564 bool is_bpf_image_address(unsigned long address); 565 void *bpf_image_alloc(void); 566 #else 567 static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) 568 { 569 return NULL; 570 } 571 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog) 572 { 573 return -ENOTSUPP; 574 } 575 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) 576 { 577 return -ENOTSUPP; 578 } 579 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 580 #define DEFINE_BPF_DISPATCHER(name) 581 #define DECLARE_BPF_DISPATCHER(name) 582 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc 583 #define BPF_DISPATCHER_PTR(name) NULL 584 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 585 struct bpf_prog *from, 586 struct bpf_prog *to) {} 587 static inline bool is_bpf_image_address(unsigned long address) 588 { 589 return false; 590 } 591 #endif 592 593 struct bpf_func_info_aux { 594 u16 linkage; 595 bool unreliable; 596 }; 597 598 enum bpf_jit_poke_reason { 599 BPF_POKE_REASON_TAIL_CALL, 600 }; 601 602 /* Descriptor of pokes pointing /into/ the JITed image. */ 603 struct bpf_jit_poke_descriptor { 604 void *ip; 605 union { 606 struct { 607 struct bpf_map *map; 608 u32 key; 609 } tail_call; 610 }; 611 bool ip_stable; 612 u8 adj_off; 613 u16 reason; 614 }; 615 616 struct bpf_prog_aux { 617 atomic64_t refcnt; 618 u32 used_map_cnt; 619 u32 max_ctx_offset; 620 u32 max_pkt_offset; 621 u32 max_tp_access; 622 u32 stack_depth; 623 u32 id; 624 u32 func_cnt; /* used by non-func prog as the number of func progs */ 625 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 626 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 627 struct bpf_prog *linked_prog; 628 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 629 bool offload_requested; 630 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 631 bool func_proto_unreliable; 632 enum bpf_tramp_prog_type trampoline_prog_type; 633 struct bpf_trampoline *trampoline; 634 struct hlist_node tramp_hlist; 635 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 636 const struct btf_type *attach_func_proto; 637 /* function name for valid attach_btf_id */ 638 const char *attach_func_name; 639 struct bpf_prog **func; 640 void *jit_data; /* JIT specific data. arch dependent */ 641 struct bpf_jit_poke_descriptor *poke_tab; 642 u32 size_poke_tab; 643 struct latch_tree_node ksym_tnode; 644 struct list_head ksym_lnode; 645 const struct bpf_prog_ops *ops; 646 struct bpf_map **used_maps; 647 struct bpf_prog *prog; 648 struct user_struct *user; 649 u64 load_time; /* ns since boottime */ 650 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 651 char name[BPF_OBJ_NAME_LEN]; 652 #ifdef CONFIG_SECURITY 653 void *security; 654 #endif 655 struct bpf_prog_offload *offload; 656 struct btf *btf; 657 struct bpf_func_info *func_info; 658 struct bpf_func_info_aux *func_info_aux; 659 /* bpf_line_info loaded from userspace. linfo->insn_off 660 * has the xlated insn offset. 661 * Both the main and sub prog share the same linfo. 662 * The subprog can access its first linfo by 663 * using the linfo_idx. 664 */ 665 struct bpf_line_info *linfo; 666 /* jited_linfo is the jited addr of the linfo. It has a 667 * one to one mapping to linfo: 668 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 669 * Both the main and sub prog share the same jited_linfo. 670 * The subprog can access its first jited_linfo by 671 * using the linfo_idx. 672 */ 673 void **jited_linfo; 674 u32 func_info_cnt; 675 u32 nr_linfo; 676 /* subprog can use linfo_idx to access its first linfo and 677 * jited_linfo. 678 * main prog always has linfo_idx == 0 679 */ 680 u32 linfo_idx; 681 u32 num_exentries; 682 struct exception_table_entry *extable; 683 struct bpf_prog_stats __percpu *stats; 684 union { 685 struct work_struct work; 686 struct rcu_head rcu; 687 }; 688 }; 689 690 struct bpf_array_aux { 691 /* 'Ownership' of prog array is claimed by the first program that 692 * is going to use this map or by the first program which FD is 693 * stored in the map to make sure that all callers and callees have 694 * the same prog type and JITed flag. 695 */ 696 enum bpf_prog_type type; 697 bool jited; 698 /* Programs with direct jumps into programs part of this array. */ 699 struct list_head poke_progs; 700 struct bpf_map *map; 701 struct mutex poke_mutex; 702 struct work_struct work; 703 }; 704 705 struct bpf_struct_ops_value; 706 struct btf_type; 707 struct btf_member; 708 709 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 710 struct bpf_struct_ops { 711 const struct bpf_verifier_ops *verifier_ops; 712 int (*init)(struct btf *btf); 713 int (*check_member)(const struct btf_type *t, 714 const struct btf_member *member); 715 int (*init_member)(const struct btf_type *t, 716 const struct btf_member *member, 717 void *kdata, const void *udata); 718 int (*reg)(void *kdata); 719 void (*unreg)(void *kdata); 720 const struct btf_type *type; 721 const struct btf_type *value_type; 722 const char *name; 723 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 724 u32 type_id; 725 u32 value_id; 726 }; 727 728 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 729 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 730 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 731 void bpf_struct_ops_init(struct btf *btf); 732 bool bpf_struct_ops_get(const void *kdata); 733 void bpf_struct_ops_put(const void *kdata); 734 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 735 void *value); 736 static inline bool bpf_try_module_get(const void *data, struct module *owner) 737 { 738 if (owner == BPF_MODULE_OWNER) 739 return bpf_struct_ops_get(data); 740 else 741 return try_module_get(owner); 742 } 743 static inline void bpf_module_put(const void *data, struct module *owner) 744 { 745 if (owner == BPF_MODULE_OWNER) 746 bpf_struct_ops_put(data); 747 else 748 module_put(owner); 749 } 750 #else 751 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 752 { 753 return NULL; 754 } 755 static inline void bpf_struct_ops_init(struct btf *btf) { } 756 static inline bool bpf_try_module_get(const void *data, struct module *owner) 757 { 758 return try_module_get(owner); 759 } 760 static inline void bpf_module_put(const void *data, struct module *owner) 761 { 762 module_put(owner); 763 } 764 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 765 void *key, 766 void *value) 767 { 768 return -EINVAL; 769 } 770 #endif 771 772 struct bpf_array { 773 struct bpf_map map; 774 u32 elem_size; 775 u32 index_mask; 776 struct bpf_array_aux *aux; 777 union { 778 char value[0] __aligned(8); 779 void *ptrs[0] __aligned(8); 780 void __percpu *pptrs[0] __aligned(8); 781 }; 782 }; 783 784 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 785 #define MAX_TAIL_CALL_CNT 32 786 787 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 788 BPF_F_RDONLY_PROG | \ 789 BPF_F_WRONLY | \ 790 BPF_F_WRONLY_PROG) 791 792 #define BPF_MAP_CAN_READ BIT(0) 793 #define BPF_MAP_CAN_WRITE BIT(1) 794 795 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 796 { 797 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 798 799 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 800 * not possible. 801 */ 802 if (access_flags & BPF_F_RDONLY_PROG) 803 return BPF_MAP_CAN_READ; 804 else if (access_flags & BPF_F_WRONLY_PROG) 805 return BPF_MAP_CAN_WRITE; 806 else 807 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 808 } 809 810 static inline bool bpf_map_flags_access_ok(u32 access_flags) 811 { 812 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 813 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 814 } 815 816 struct bpf_event_entry { 817 struct perf_event *event; 818 struct file *perf_file; 819 struct file *map_file; 820 struct rcu_head rcu; 821 }; 822 823 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 824 int bpf_prog_calc_tag(struct bpf_prog *fp); 825 const char *kernel_type_name(u32 btf_type_id); 826 827 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 828 829 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 830 unsigned long off, unsigned long len); 831 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 832 const struct bpf_insn *src, 833 struct bpf_insn *dst, 834 struct bpf_prog *prog, 835 u32 *target_size); 836 837 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 838 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 839 840 /* an array of programs to be executed under rcu_lock. 841 * 842 * Typical usage: 843 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 844 * 845 * the structure returned by bpf_prog_array_alloc() should be populated 846 * with program pointers and the last pointer must be NULL. 847 * The user has to keep refcnt on the program and make sure the program 848 * is removed from the array before bpf_prog_put(). 849 * The 'struct bpf_prog_array *' should only be replaced with xchg() 850 * since other cpus are walking the array of pointers in parallel. 851 */ 852 struct bpf_prog_array_item { 853 struct bpf_prog *prog; 854 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 855 }; 856 857 struct bpf_prog_array { 858 struct rcu_head rcu; 859 struct bpf_prog_array_item items[0]; 860 }; 861 862 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 863 void bpf_prog_array_free(struct bpf_prog_array *progs); 864 int bpf_prog_array_length(struct bpf_prog_array *progs); 865 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 866 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 867 __u32 __user *prog_ids, u32 cnt); 868 869 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 870 struct bpf_prog *old_prog); 871 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 872 u32 *prog_ids, u32 request_cnt, 873 u32 *prog_cnt); 874 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 875 struct bpf_prog *exclude_prog, 876 struct bpf_prog *include_prog, 877 struct bpf_prog_array **new_array); 878 879 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 880 ({ \ 881 struct bpf_prog_array_item *_item; \ 882 struct bpf_prog *_prog; \ 883 struct bpf_prog_array *_array; \ 884 u32 _ret = 1; \ 885 preempt_disable(); \ 886 rcu_read_lock(); \ 887 _array = rcu_dereference(array); \ 888 if (unlikely(check_non_null && !_array))\ 889 goto _out; \ 890 _item = &_array->items[0]; \ 891 while ((_prog = READ_ONCE(_item->prog))) { \ 892 bpf_cgroup_storage_set(_item->cgroup_storage); \ 893 _ret &= func(_prog, ctx); \ 894 _item++; \ 895 } \ 896 _out: \ 897 rcu_read_unlock(); \ 898 preempt_enable(); \ 899 _ret; \ 900 }) 901 902 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 903 * so BPF programs can request cwr for TCP packets. 904 * 905 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 906 * packet. This macro changes the behavior so the low order bit 907 * indicates whether the packet should be dropped (0) or not (1) 908 * and the next bit is a congestion notification bit. This could be 909 * used by TCP to call tcp_enter_cwr() 910 * 911 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 912 * 0: drop packet 913 * 1: keep packet 914 * 2: drop packet and cn 915 * 3: keep packet and cn 916 * 917 * This macro then converts it to one of the NET_XMIT or an error 918 * code that is then interpreted as drop packet (and no cn): 919 * 0: NET_XMIT_SUCCESS skb should be transmitted 920 * 1: NET_XMIT_DROP skb should be dropped and cn 921 * 2: NET_XMIT_CN skb should be transmitted and cn 922 * 3: -EPERM skb should be dropped 923 */ 924 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 925 ({ \ 926 struct bpf_prog_array_item *_item; \ 927 struct bpf_prog *_prog; \ 928 struct bpf_prog_array *_array; \ 929 u32 ret; \ 930 u32 _ret = 1; \ 931 u32 _cn = 0; \ 932 preempt_disable(); \ 933 rcu_read_lock(); \ 934 _array = rcu_dereference(array); \ 935 _item = &_array->items[0]; \ 936 while ((_prog = READ_ONCE(_item->prog))) { \ 937 bpf_cgroup_storage_set(_item->cgroup_storage); \ 938 ret = func(_prog, ctx); \ 939 _ret &= (ret & 1); \ 940 _cn |= (ret & 2); \ 941 _item++; \ 942 } \ 943 rcu_read_unlock(); \ 944 preempt_enable(); \ 945 if (_ret) \ 946 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 947 else \ 948 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 949 _ret; \ 950 }) 951 952 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 953 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 954 955 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 956 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 957 958 #ifdef CONFIG_BPF_SYSCALL 959 DECLARE_PER_CPU(int, bpf_prog_active); 960 961 extern const struct file_operations bpf_map_fops; 962 extern const struct file_operations bpf_prog_fops; 963 964 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 965 extern const struct bpf_prog_ops _name ## _prog_ops; \ 966 extern const struct bpf_verifier_ops _name ## _verifier_ops; 967 #define BPF_MAP_TYPE(_id, _ops) \ 968 extern const struct bpf_map_ops _ops; 969 #include <linux/bpf_types.h> 970 #undef BPF_PROG_TYPE 971 #undef BPF_MAP_TYPE 972 973 extern const struct bpf_prog_ops bpf_offload_prog_ops; 974 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 975 extern const struct bpf_verifier_ops xdp_analyzer_ops; 976 977 struct bpf_prog *bpf_prog_get(u32 ufd); 978 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 979 bool attach_drv); 980 void bpf_prog_add(struct bpf_prog *prog, int i); 981 void bpf_prog_sub(struct bpf_prog *prog, int i); 982 void bpf_prog_inc(struct bpf_prog *prog); 983 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 984 void bpf_prog_put(struct bpf_prog *prog); 985 int __bpf_prog_charge(struct user_struct *user, u32 pages); 986 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 987 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 988 struct bpf_map **used_maps, u32 len); 989 990 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 991 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 992 993 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 994 struct bpf_map *__bpf_map_get(struct fd f); 995 void bpf_map_inc(struct bpf_map *map); 996 void bpf_map_inc_with_uref(struct bpf_map *map); 997 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 998 void bpf_map_put_with_uref(struct bpf_map *map); 999 void bpf_map_put(struct bpf_map *map); 1000 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 1001 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 1002 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 1003 void bpf_map_charge_finish(struct bpf_map_memory *mem); 1004 void bpf_map_charge_move(struct bpf_map_memory *dst, 1005 struct bpf_map_memory *src); 1006 void *bpf_map_area_alloc(u64 size, int numa_node); 1007 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1008 void bpf_map_area_free(void *base); 1009 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1010 int generic_map_lookup_batch(struct bpf_map *map, 1011 const union bpf_attr *attr, 1012 union bpf_attr __user *uattr); 1013 int generic_map_update_batch(struct bpf_map *map, 1014 const union bpf_attr *attr, 1015 union bpf_attr __user *uattr); 1016 int generic_map_delete_batch(struct bpf_map *map, 1017 const union bpf_attr *attr, 1018 union bpf_attr __user *uattr); 1019 1020 extern int sysctl_unprivileged_bpf_disabled; 1021 1022 int bpf_map_new_fd(struct bpf_map *map, int flags); 1023 int bpf_prog_new_fd(struct bpf_prog *prog); 1024 1025 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1026 int bpf_obj_get_user(const char __user *pathname, int flags); 1027 1028 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1029 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1030 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1031 u64 flags); 1032 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1033 u64 flags); 1034 1035 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1036 1037 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1038 void *key, void *value, u64 map_flags); 1039 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1040 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1041 void *key, void *value, u64 map_flags); 1042 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1043 1044 int bpf_get_file_flag(int flags); 1045 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 1046 size_t actual_size); 1047 1048 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1049 * forced to use 'long' read/writes to try to atomically copy long counters. 1050 * Best-effort only. No barriers here, since it _will_ race with concurrent 1051 * updates from BPF programs. Called from bpf syscall and mostly used with 1052 * size 8 or 16 bytes, so ask compiler to inline it. 1053 */ 1054 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1055 { 1056 const long *lsrc = src; 1057 long *ldst = dst; 1058 1059 size /= sizeof(long); 1060 while (size--) 1061 *ldst++ = *lsrc++; 1062 } 1063 1064 /* verify correctness of eBPF program */ 1065 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 1066 union bpf_attr __user *uattr); 1067 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1068 1069 /* Map specifics */ 1070 struct xdp_buff; 1071 struct sk_buff; 1072 1073 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 1074 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 1075 void __dev_flush(void); 1076 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1077 struct net_device *dev_rx); 1078 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1079 struct net_device *dev_rx); 1080 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1081 struct bpf_prog *xdp_prog); 1082 1083 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 1084 void __cpu_map_flush(void); 1085 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1086 struct net_device *dev_rx); 1087 1088 /* Return map's numa specified by userspace */ 1089 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1090 { 1091 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1092 attr->numa_node : NUMA_NO_NODE; 1093 } 1094 1095 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1096 int array_map_alloc_check(union bpf_attr *attr); 1097 1098 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1099 union bpf_attr __user *uattr); 1100 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1101 union bpf_attr __user *uattr); 1102 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1103 const union bpf_attr *kattr, 1104 union bpf_attr __user *uattr); 1105 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1106 const struct bpf_prog *prog, 1107 struct bpf_insn_access_aux *info); 1108 int btf_struct_access(struct bpf_verifier_log *log, 1109 const struct btf_type *t, int off, int size, 1110 enum bpf_access_type atype, 1111 u32 *next_btf_id); 1112 int btf_resolve_helper_id(struct bpf_verifier_log *log, 1113 const struct bpf_func_proto *fn, int); 1114 1115 int btf_distill_func_proto(struct bpf_verifier_log *log, 1116 struct btf *btf, 1117 const struct btf_type *func_proto, 1118 const char *func_name, 1119 struct btf_func_model *m); 1120 1121 struct bpf_reg_state; 1122 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, 1123 struct bpf_reg_state *regs); 1124 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1125 struct bpf_reg_state *reg); 1126 int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, 1127 struct btf *btf, const struct btf_type *t); 1128 1129 struct bpf_prog *bpf_prog_by_id(u32 id); 1130 1131 #else /* !CONFIG_BPF_SYSCALL */ 1132 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1133 { 1134 return ERR_PTR(-EOPNOTSUPP); 1135 } 1136 1137 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1138 enum bpf_prog_type type, 1139 bool attach_drv) 1140 { 1141 return ERR_PTR(-EOPNOTSUPP); 1142 } 1143 1144 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1145 { 1146 } 1147 1148 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1149 { 1150 } 1151 1152 static inline void bpf_prog_put(struct bpf_prog *prog) 1153 { 1154 } 1155 1156 static inline void bpf_prog_inc(struct bpf_prog *prog) 1157 { 1158 } 1159 1160 static inline struct bpf_prog *__must_check 1161 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1162 { 1163 return ERR_PTR(-EOPNOTSUPP); 1164 } 1165 1166 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 1167 { 1168 return 0; 1169 } 1170 1171 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1172 { 1173 } 1174 1175 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1176 { 1177 return -EOPNOTSUPP; 1178 } 1179 1180 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 1181 u32 key) 1182 { 1183 return NULL; 1184 } 1185 1186 static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, 1187 u32 key) 1188 { 1189 return NULL; 1190 } 1191 1192 static inline void __dev_flush(void) 1193 { 1194 } 1195 1196 struct xdp_buff; 1197 struct bpf_dtab_netdev; 1198 1199 static inline 1200 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1201 struct net_device *dev_rx) 1202 { 1203 return 0; 1204 } 1205 1206 static inline 1207 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1208 struct net_device *dev_rx) 1209 { 1210 return 0; 1211 } 1212 1213 struct sk_buff; 1214 1215 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1216 struct sk_buff *skb, 1217 struct bpf_prog *xdp_prog) 1218 { 1219 return 0; 1220 } 1221 1222 static inline 1223 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 1224 { 1225 return NULL; 1226 } 1227 1228 static inline void __cpu_map_flush(void) 1229 { 1230 } 1231 1232 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1233 struct xdp_buff *xdp, 1234 struct net_device *dev_rx) 1235 { 1236 return 0; 1237 } 1238 1239 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1240 enum bpf_prog_type type) 1241 { 1242 return ERR_PTR(-EOPNOTSUPP); 1243 } 1244 1245 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1246 const union bpf_attr *kattr, 1247 union bpf_attr __user *uattr) 1248 { 1249 return -ENOTSUPP; 1250 } 1251 1252 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1253 const union bpf_attr *kattr, 1254 union bpf_attr __user *uattr) 1255 { 1256 return -ENOTSUPP; 1257 } 1258 1259 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1260 const union bpf_attr *kattr, 1261 union bpf_attr __user *uattr) 1262 { 1263 return -ENOTSUPP; 1264 } 1265 1266 static inline void bpf_map_put(struct bpf_map *map) 1267 { 1268 } 1269 1270 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1271 { 1272 return ERR_PTR(-ENOTSUPP); 1273 } 1274 #endif /* CONFIG_BPF_SYSCALL */ 1275 1276 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1277 enum bpf_prog_type type) 1278 { 1279 return bpf_prog_get_type_dev(ufd, type, false); 1280 } 1281 1282 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1283 1284 int bpf_prog_offload_compile(struct bpf_prog *prog); 1285 void bpf_prog_offload_destroy(struct bpf_prog *prog); 1286 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1287 struct bpf_prog *prog); 1288 1289 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1290 1291 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1292 int bpf_map_offload_update_elem(struct bpf_map *map, 1293 void *key, void *value, u64 flags); 1294 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1295 int bpf_map_offload_get_next_key(struct bpf_map *map, 1296 void *key, void *next_key); 1297 1298 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1299 1300 struct bpf_offload_dev * 1301 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1302 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1303 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1304 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1305 struct net_device *netdev); 1306 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1307 struct net_device *netdev); 1308 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1309 1310 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1311 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1312 1313 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1314 { 1315 return aux->offload_requested; 1316 } 1317 1318 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1319 { 1320 return unlikely(map->ops == &bpf_map_offload_ops); 1321 } 1322 1323 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1324 void bpf_map_offload_map_free(struct bpf_map *map); 1325 #else 1326 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1327 union bpf_attr *attr) 1328 { 1329 return -EOPNOTSUPP; 1330 } 1331 1332 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1333 { 1334 return false; 1335 } 1336 1337 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1338 { 1339 return false; 1340 } 1341 1342 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1343 { 1344 return ERR_PTR(-EOPNOTSUPP); 1345 } 1346 1347 static inline void bpf_map_offload_map_free(struct bpf_map *map) 1348 { 1349 } 1350 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1351 1352 #if defined(CONFIG_BPF_STREAM_PARSER) 1353 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 1354 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1355 #else 1356 static inline int sock_map_prog_update(struct bpf_map *map, 1357 struct bpf_prog *prog, u32 which) 1358 { 1359 return -EOPNOTSUPP; 1360 } 1361 1362 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1363 struct bpf_prog *prog) 1364 { 1365 return -EINVAL; 1366 } 1367 #endif 1368 1369 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1370 void bpf_sk_reuseport_detach(struct sock *sk); 1371 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1372 void *value); 1373 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1374 void *value, u64 map_flags); 1375 #else 1376 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1377 { 1378 } 1379 1380 #ifdef CONFIG_BPF_SYSCALL 1381 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1382 void *key, void *value) 1383 { 1384 return -EOPNOTSUPP; 1385 } 1386 1387 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1388 void *key, void *value, 1389 u64 map_flags) 1390 { 1391 return -EOPNOTSUPP; 1392 } 1393 #endif /* CONFIG_BPF_SYSCALL */ 1394 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1395 1396 /* verifier prototypes for helper functions called from eBPF programs */ 1397 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1398 extern const struct bpf_func_proto bpf_map_update_elem_proto; 1399 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1400 extern const struct bpf_func_proto bpf_map_push_elem_proto; 1401 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1402 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1403 1404 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1405 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1406 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1407 extern const struct bpf_func_proto bpf_tail_call_proto; 1408 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1409 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1410 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1411 extern const struct bpf_func_proto bpf_get_current_comm_proto; 1412 extern const struct bpf_func_proto bpf_get_stackid_proto; 1413 extern const struct bpf_func_proto bpf_get_stack_proto; 1414 extern const struct bpf_func_proto bpf_sock_map_update_proto; 1415 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1416 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1417 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1418 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1419 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1420 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1421 extern const struct bpf_func_proto bpf_spin_lock_proto; 1422 extern const struct bpf_func_proto bpf_spin_unlock_proto; 1423 extern const struct bpf_func_proto bpf_get_local_storage_proto; 1424 extern const struct bpf_func_proto bpf_strtol_proto; 1425 extern const struct bpf_func_proto bpf_strtoul_proto; 1426 extern const struct bpf_func_proto bpf_tcp_sock_proto; 1427 extern const struct bpf_func_proto bpf_jiffies64_proto; 1428 1429 /* Shared helpers among cBPF and eBPF. */ 1430 void bpf_user_rnd_init_once(void); 1431 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1432 1433 #if defined(CONFIG_NET) 1434 bool bpf_sock_common_is_valid_access(int off, int size, 1435 enum bpf_access_type type, 1436 struct bpf_insn_access_aux *info); 1437 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1438 struct bpf_insn_access_aux *info); 1439 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1440 const struct bpf_insn *si, 1441 struct bpf_insn *insn_buf, 1442 struct bpf_prog *prog, 1443 u32 *target_size); 1444 #else 1445 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1446 enum bpf_access_type type, 1447 struct bpf_insn_access_aux *info) 1448 { 1449 return false; 1450 } 1451 static inline bool bpf_sock_is_valid_access(int off, int size, 1452 enum bpf_access_type type, 1453 struct bpf_insn_access_aux *info) 1454 { 1455 return false; 1456 } 1457 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1458 const struct bpf_insn *si, 1459 struct bpf_insn *insn_buf, 1460 struct bpf_prog *prog, 1461 u32 *target_size) 1462 { 1463 return 0; 1464 } 1465 #endif 1466 1467 #ifdef CONFIG_INET 1468 struct sk_reuseport_kern { 1469 struct sk_buff *skb; 1470 struct sock *sk; 1471 struct sock *selected_sk; 1472 void *data_end; 1473 u32 hash; 1474 u32 reuseport_id; 1475 bool bind_inany; 1476 }; 1477 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1478 struct bpf_insn_access_aux *info); 1479 1480 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1481 const struct bpf_insn *si, 1482 struct bpf_insn *insn_buf, 1483 struct bpf_prog *prog, 1484 u32 *target_size); 1485 1486 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1487 struct bpf_insn_access_aux *info); 1488 1489 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1490 const struct bpf_insn *si, 1491 struct bpf_insn *insn_buf, 1492 struct bpf_prog *prog, 1493 u32 *target_size); 1494 #else 1495 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1496 enum bpf_access_type type, 1497 struct bpf_insn_access_aux *info) 1498 { 1499 return false; 1500 } 1501 1502 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1503 const struct bpf_insn *si, 1504 struct bpf_insn *insn_buf, 1505 struct bpf_prog *prog, 1506 u32 *target_size) 1507 { 1508 return 0; 1509 } 1510 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 1511 enum bpf_access_type type, 1512 struct bpf_insn_access_aux *info) 1513 { 1514 return false; 1515 } 1516 1517 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1518 const struct bpf_insn *si, 1519 struct bpf_insn *insn_buf, 1520 struct bpf_prog *prog, 1521 u32 *target_size) 1522 { 1523 return 0; 1524 } 1525 #endif /* CONFIG_INET */ 1526 1527 enum bpf_text_poke_type { 1528 BPF_MOD_CALL, 1529 BPF_MOD_JUMP, 1530 }; 1531 1532 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 1533 void *addr1, void *addr2); 1534 1535 #endif /* _LINUX_BPF_H */ 1536