1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/u64_stats_sync.h> 18 #include <linux/refcount.h> 19 #include <linux/mutex.h> 20 #include <linux/module.h> 21 22 struct bpf_verifier_env; 23 struct bpf_verifier_log; 24 struct perf_event; 25 struct bpf_prog; 26 struct bpf_prog_aux; 27 struct bpf_map; 28 struct sock; 29 struct seq_file; 30 struct btf; 31 struct btf_type; 32 struct exception_table_entry; 33 34 extern struct idr btf_idr; 35 extern spinlock_t btf_idr_lock; 36 37 /* map is generic key/value storage optionally accesible by eBPF programs */ 38 struct bpf_map_ops { 39 /* funcs callable from userspace (via syscall) */ 40 int (*map_alloc_check)(union bpf_attr *attr); 41 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 42 void (*map_release)(struct bpf_map *map, struct file *map_file); 43 void (*map_free)(struct bpf_map *map); 44 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 45 void (*map_release_uref)(struct bpf_map *map); 46 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 47 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 48 union bpf_attr __user *uattr); 49 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 50 const union bpf_attr *attr, 51 union bpf_attr __user *uattr); 52 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 53 union bpf_attr __user *uattr); 54 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 55 union bpf_attr __user *uattr); 56 57 /* funcs callable from userspace and from eBPF programs */ 58 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 59 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 60 int (*map_delete_elem)(struct bpf_map *map, void *key); 61 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 62 int (*map_pop_elem)(struct bpf_map *map, void *value); 63 int (*map_peek_elem)(struct bpf_map *map, void *value); 64 65 /* funcs called by prog_array and perf_event_array map */ 66 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 67 int fd); 68 void (*map_fd_put_ptr)(void *ptr); 69 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 70 u32 (*map_fd_sys_lookup_elem)(void *ptr); 71 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 72 struct seq_file *m); 73 int (*map_check_btf)(const struct bpf_map *map, 74 const struct btf *btf, 75 const struct btf_type *key_type, 76 const struct btf_type *value_type); 77 78 /* Prog poke tracking helpers. */ 79 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 80 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 81 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 82 struct bpf_prog *new); 83 84 /* Direct value access helpers. */ 85 int (*map_direct_value_addr)(const struct bpf_map *map, 86 u64 *imm, u32 off); 87 int (*map_direct_value_meta)(const struct bpf_map *map, 88 u64 imm, u32 *off); 89 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 90 }; 91 92 struct bpf_map_memory { 93 u32 pages; 94 struct user_struct *user; 95 }; 96 97 struct bpf_map { 98 /* The first two cachelines with read-mostly members of which some 99 * are also accessed in fast-path (e.g. ops, max_entries). 100 */ 101 const struct bpf_map_ops *ops ____cacheline_aligned; 102 struct bpf_map *inner_map_meta; 103 #ifdef CONFIG_SECURITY 104 void *security; 105 #endif 106 enum bpf_map_type map_type; 107 u32 key_size; 108 u32 value_size; 109 u32 max_entries; 110 u32 map_flags; 111 int spin_lock_off; /* >=0 valid offset, <0 error */ 112 u32 id; 113 int numa_node; 114 u32 btf_key_type_id; 115 u32 btf_value_type_id; 116 struct btf *btf; 117 struct bpf_map_memory memory; 118 char name[BPF_OBJ_NAME_LEN]; 119 u32 btf_vmlinux_value_type_id; 120 bool unpriv_array; 121 bool frozen; /* write-once; write-protected by freeze_mutex */ 122 /* 22 bytes hole */ 123 124 /* The 3rd and 4th cacheline with misc members to avoid false sharing 125 * particularly with refcounting. 126 */ 127 atomic64_t refcnt ____cacheline_aligned; 128 atomic64_t usercnt; 129 struct work_struct work; 130 struct mutex freeze_mutex; 131 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 132 }; 133 134 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 135 { 136 return map->spin_lock_off >= 0; 137 } 138 139 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 140 { 141 if (likely(!map_value_has_spin_lock(map))) 142 return; 143 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 144 (struct bpf_spin_lock){}; 145 } 146 147 /* copy everything but bpf_spin_lock */ 148 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 149 { 150 if (unlikely(map_value_has_spin_lock(map))) { 151 u32 off = map->spin_lock_off; 152 153 memcpy(dst, src, off); 154 memcpy(dst + off + sizeof(struct bpf_spin_lock), 155 src + off + sizeof(struct bpf_spin_lock), 156 map->value_size - off - sizeof(struct bpf_spin_lock)); 157 } else { 158 memcpy(dst, src, map->value_size); 159 } 160 } 161 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 162 bool lock_src); 163 164 struct bpf_offload_dev; 165 struct bpf_offloaded_map; 166 167 struct bpf_map_dev_ops { 168 int (*map_get_next_key)(struct bpf_offloaded_map *map, 169 void *key, void *next_key); 170 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 171 void *key, void *value); 172 int (*map_update_elem)(struct bpf_offloaded_map *map, 173 void *key, void *value, u64 flags); 174 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 175 }; 176 177 struct bpf_offloaded_map { 178 struct bpf_map map; 179 struct net_device *netdev; 180 const struct bpf_map_dev_ops *dev_ops; 181 void *dev_priv; 182 struct list_head offloads; 183 }; 184 185 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 186 { 187 return container_of(map, struct bpf_offloaded_map, map); 188 } 189 190 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 191 { 192 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 193 } 194 195 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 196 { 197 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 198 map->ops->map_seq_show_elem; 199 } 200 201 int map_check_no_btf(const struct bpf_map *map, 202 const struct btf *btf, 203 const struct btf_type *key_type, 204 const struct btf_type *value_type); 205 206 extern const struct bpf_map_ops bpf_map_offload_ops; 207 208 /* function argument constraints */ 209 enum bpf_arg_type { 210 ARG_DONTCARE = 0, /* unused argument in helper function */ 211 212 /* the following constraints used to prototype 213 * bpf_map_lookup/update/delete_elem() functions 214 */ 215 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 216 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 217 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 218 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 219 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 220 221 /* the following constraints used to prototype bpf_memcmp() and other 222 * functions that access data on eBPF program stack 223 */ 224 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 225 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 226 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 227 * helper function must fill all bytes or clear 228 * them in error case. 229 */ 230 231 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 232 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 233 234 ARG_PTR_TO_CTX, /* pointer to context */ 235 ARG_ANYTHING, /* any (initialized) argument is ok */ 236 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 237 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 238 ARG_PTR_TO_INT, /* pointer to int */ 239 ARG_PTR_TO_LONG, /* pointer to long */ 240 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 241 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 242 }; 243 244 /* type of values returned from helper functions */ 245 enum bpf_return_type { 246 RET_INTEGER, /* function returns integer */ 247 RET_VOID, /* function doesn't return anything */ 248 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 249 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 250 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 251 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 252 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 253 }; 254 255 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 256 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 257 * instructions after verifying 258 */ 259 struct bpf_func_proto { 260 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 261 bool gpl_only; 262 bool pkt_access; 263 enum bpf_return_type ret_type; 264 union { 265 struct { 266 enum bpf_arg_type arg1_type; 267 enum bpf_arg_type arg2_type; 268 enum bpf_arg_type arg3_type; 269 enum bpf_arg_type arg4_type; 270 enum bpf_arg_type arg5_type; 271 }; 272 enum bpf_arg_type arg_type[5]; 273 }; 274 int *btf_id; /* BTF ids of arguments */ 275 }; 276 277 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 278 * the first argument to eBPF programs. 279 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 280 */ 281 struct bpf_context; 282 283 enum bpf_access_type { 284 BPF_READ = 1, 285 BPF_WRITE = 2 286 }; 287 288 /* types of values stored in eBPF registers */ 289 /* Pointer types represent: 290 * pointer 291 * pointer + imm 292 * pointer + (u16) var 293 * pointer + (u16) var + imm 294 * if (range > 0) then [ptr, ptr + range - off) is safe to access 295 * if (id > 0) means that some 'var' was added 296 * if (off > 0) means that 'imm' was added 297 */ 298 enum bpf_reg_type { 299 NOT_INIT = 0, /* nothing was written into register */ 300 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 301 PTR_TO_CTX, /* reg points to bpf_context */ 302 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 303 PTR_TO_MAP_VALUE, /* reg points to map element value */ 304 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 305 PTR_TO_STACK, /* reg == frame_pointer + offset */ 306 PTR_TO_PACKET_META, /* skb->data - meta_len */ 307 PTR_TO_PACKET, /* reg points to skb->data */ 308 PTR_TO_PACKET_END, /* skb->data + headlen */ 309 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 310 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 311 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 312 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 313 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 314 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 315 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 316 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 317 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 318 PTR_TO_BTF_ID, /* reg points to kernel struct */ 319 }; 320 321 /* The information passed from prog-specific *_is_valid_access 322 * back to the verifier. 323 */ 324 struct bpf_insn_access_aux { 325 enum bpf_reg_type reg_type; 326 union { 327 int ctx_field_size; 328 u32 btf_id; 329 }; 330 struct bpf_verifier_log *log; /* for verbose logs */ 331 }; 332 333 static inline void 334 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 335 { 336 aux->ctx_field_size = size; 337 } 338 339 struct bpf_prog_ops { 340 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 341 union bpf_attr __user *uattr); 342 }; 343 344 struct bpf_verifier_ops { 345 /* return eBPF function prototype for verification */ 346 const struct bpf_func_proto * 347 (*get_func_proto)(enum bpf_func_id func_id, 348 const struct bpf_prog *prog); 349 350 /* return true if 'size' wide access at offset 'off' within bpf_context 351 * with 'type' (read or write) is allowed 352 */ 353 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 354 const struct bpf_prog *prog, 355 struct bpf_insn_access_aux *info); 356 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 357 const struct bpf_prog *prog); 358 int (*gen_ld_abs)(const struct bpf_insn *orig, 359 struct bpf_insn *insn_buf); 360 u32 (*convert_ctx_access)(enum bpf_access_type type, 361 const struct bpf_insn *src, 362 struct bpf_insn *dst, 363 struct bpf_prog *prog, u32 *target_size); 364 int (*btf_struct_access)(struct bpf_verifier_log *log, 365 const struct btf_type *t, int off, int size, 366 enum bpf_access_type atype, 367 u32 *next_btf_id); 368 }; 369 370 struct bpf_prog_offload_ops { 371 /* verifier basic callbacks */ 372 int (*insn_hook)(struct bpf_verifier_env *env, 373 int insn_idx, int prev_insn_idx); 374 int (*finalize)(struct bpf_verifier_env *env); 375 /* verifier optimization callbacks (called after .finalize) */ 376 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 377 struct bpf_insn *insn); 378 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 379 /* program management callbacks */ 380 int (*prepare)(struct bpf_prog *prog); 381 int (*translate)(struct bpf_prog *prog); 382 void (*destroy)(struct bpf_prog *prog); 383 }; 384 385 struct bpf_prog_offload { 386 struct bpf_prog *prog; 387 struct net_device *netdev; 388 struct bpf_offload_dev *offdev; 389 void *dev_priv; 390 struct list_head offloads; 391 bool dev_state; 392 bool opt_failed; 393 void *jited_image; 394 u32 jited_len; 395 }; 396 397 enum bpf_cgroup_storage_type { 398 BPF_CGROUP_STORAGE_SHARED, 399 BPF_CGROUP_STORAGE_PERCPU, 400 __BPF_CGROUP_STORAGE_MAX 401 }; 402 403 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 404 405 /* The longest tracepoint has 12 args. 406 * See include/trace/bpf_probe.h 407 */ 408 #define MAX_BPF_FUNC_ARGS 12 409 410 struct bpf_prog_stats { 411 u64 cnt; 412 u64 nsecs; 413 struct u64_stats_sync syncp; 414 } __aligned(2 * sizeof(u64)); 415 416 struct btf_func_model { 417 u8 ret_size; 418 u8 nr_args; 419 u8 arg_size[MAX_BPF_FUNC_ARGS]; 420 }; 421 422 /* Restore arguments before returning from trampoline to let original function 423 * continue executing. This flag is used for fentry progs when there are no 424 * fexit progs. 425 */ 426 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 427 /* Call original function after fentry progs, but before fexit progs. 428 * Makes sense for fentry/fexit, normal calls and indirect calls. 429 */ 430 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 431 /* Skip current frame and return to parent. Makes sense for fentry/fexit 432 * programs only. Should not be used with normal calls and indirect calls. 433 */ 434 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 435 436 /* Different use cases for BPF trampoline: 437 * 1. replace nop at the function entry (kprobe equivalent) 438 * flags = BPF_TRAMP_F_RESTORE_REGS 439 * fentry = a set of programs to run before returning from trampoline 440 * 441 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 442 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 443 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 444 * fentry = a set of program to run before calling original function 445 * fexit = a set of program to run after original function 446 * 447 * 3. replace direct call instruction anywhere in the function body 448 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 449 * With flags = 0 450 * fentry = a set of programs to run before returning from trampoline 451 * With flags = BPF_TRAMP_F_CALL_ORIG 452 * orig_call = original callback addr or direct function addr 453 * fentry = a set of program to run before calling original function 454 * fexit = a set of program to run after original function 455 */ 456 int arch_prepare_bpf_trampoline(void *image, void *image_end, 457 const struct btf_func_model *m, u32 flags, 458 struct bpf_prog **fentry_progs, int fentry_cnt, 459 struct bpf_prog **fexit_progs, int fexit_cnt, 460 void *orig_call); 461 /* these two functions are called from generated trampoline */ 462 u64 notrace __bpf_prog_enter(void); 463 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 464 465 enum bpf_tramp_prog_type { 466 BPF_TRAMP_FENTRY, 467 BPF_TRAMP_FEXIT, 468 BPF_TRAMP_MAX, 469 BPF_TRAMP_REPLACE, /* more than MAX */ 470 }; 471 472 struct bpf_trampoline { 473 /* hlist for trampoline_table */ 474 struct hlist_node hlist; 475 /* serializes access to fields of this trampoline */ 476 struct mutex mutex; 477 refcount_t refcnt; 478 u64 key; 479 struct { 480 struct btf_func_model model; 481 void *addr; 482 bool ftrace_managed; 483 } func; 484 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 485 * program by replacing one of its functions. func.addr is the address 486 * of the function it replaced. 487 */ 488 struct bpf_prog *extension_prog; 489 /* list of BPF programs using this trampoline */ 490 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 491 /* Number of attached programs. A counter per kind. */ 492 int progs_cnt[BPF_TRAMP_MAX]; 493 /* Executable image of trampoline */ 494 void *image; 495 u64 selector; 496 }; 497 498 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 499 500 struct bpf_dispatcher_prog { 501 struct bpf_prog *prog; 502 refcount_t users; 503 }; 504 505 struct bpf_dispatcher { 506 /* dispatcher mutex */ 507 struct mutex mutex; 508 void *func; 509 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 510 int num_progs; 511 void *image; 512 u32 image_off; 513 }; 514 515 static __always_inline unsigned int bpf_dispatcher_nopfunc( 516 const void *ctx, 517 const struct bpf_insn *insnsi, 518 unsigned int (*bpf_func)(const void *, 519 const struct bpf_insn *)) 520 { 521 return bpf_func(ctx, insnsi); 522 } 523 #ifdef CONFIG_BPF_JIT 524 struct bpf_trampoline *bpf_trampoline_lookup(u64 key); 525 int bpf_trampoline_link_prog(struct bpf_prog *prog); 526 int bpf_trampoline_unlink_prog(struct bpf_prog *prog); 527 void bpf_trampoline_put(struct bpf_trampoline *tr); 528 #define BPF_DISPATCHER_INIT(name) { \ 529 .mutex = __MUTEX_INITIALIZER(name.mutex), \ 530 .func = &name##func, \ 531 .progs = {}, \ 532 .num_progs = 0, \ 533 .image = NULL, \ 534 .image_off = 0 \ 535 } 536 537 #define DEFINE_BPF_DISPATCHER(name) \ 538 noinline unsigned int name##func( \ 539 const void *ctx, \ 540 const struct bpf_insn *insnsi, \ 541 unsigned int (*bpf_func)(const void *, \ 542 const struct bpf_insn *)) \ 543 { \ 544 return bpf_func(ctx, insnsi); \ 545 } \ 546 EXPORT_SYMBOL(name##func); \ 547 struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name); 548 #define DECLARE_BPF_DISPATCHER(name) \ 549 unsigned int name##func( \ 550 const void *ctx, \ 551 const struct bpf_insn *insnsi, \ 552 unsigned int (*bpf_func)(const void *, \ 553 const struct bpf_insn *)); \ 554 extern struct bpf_dispatcher name; 555 #define BPF_DISPATCHER_FUNC(name) name##func 556 #define BPF_DISPATCHER_PTR(name) (&name) 557 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 558 struct bpf_prog *to); 559 struct bpf_image { 560 struct latch_tree_node tnode; 561 unsigned char data[]; 562 }; 563 #define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image)) 564 bool is_bpf_image_address(unsigned long address); 565 void *bpf_image_alloc(void); 566 #else 567 static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) 568 { 569 return NULL; 570 } 571 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog) 572 { 573 return -ENOTSUPP; 574 } 575 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) 576 { 577 return -ENOTSUPP; 578 } 579 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 580 #define DEFINE_BPF_DISPATCHER(name) 581 #define DECLARE_BPF_DISPATCHER(name) 582 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc 583 #define BPF_DISPATCHER_PTR(name) NULL 584 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 585 struct bpf_prog *from, 586 struct bpf_prog *to) {} 587 static inline bool is_bpf_image_address(unsigned long address) 588 { 589 return false; 590 } 591 #endif 592 593 struct bpf_func_info_aux { 594 u16 linkage; 595 bool unreliable; 596 }; 597 598 enum bpf_jit_poke_reason { 599 BPF_POKE_REASON_TAIL_CALL, 600 }; 601 602 /* Descriptor of pokes pointing /into/ the JITed image. */ 603 struct bpf_jit_poke_descriptor { 604 void *ip; 605 union { 606 struct { 607 struct bpf_map *map; 608 u32 key; 609 } tail_call; 610 }; 611 bool ip_stable; 612 u8 adj_off; 613 u16 reason; 614 }; 615 616 struct bpf_prog_aux { 617 atomic64_t refcnt; 618 u32 used_map_cnt; 619 u32 max_ctx_offset; 620 u32 max_pkt_offset; 621 u32 max_tp_access; 622 u32 stack_depth; 623 u32 id; 624 u32 func_cnt; /* used by non-func prog as the number of func progs */ 625 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 626 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 627 struct bpf_prog *linked_prog; 628 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 629 bool offload_requested; 630 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 631 bool func_proto_unreliable; 632 enum bpf_tramp_prog_type trampoline_prog_type; 633 struct bpf_trampoline *trampoline; 634 struct hlist_node tramp_hlist; 635 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 636 const struct btf_type *attach_func_proto; 637 /* function name for valid attach_btf_id */ 638 const char *attach_func_name; 639 struct bpf_prog **func; 640 void *jit_data; /* JIT specific data. arch dependent */ 641 struct bpf_jit_poke_descriptor *poke_tab; 642 u32 size_poke_tab; 643 struct latch_tree_node ksym_tnode; 644 struct list_head ksym_lnode; 645 const struct bpf_prog_ops *ops; 646 struct bpf_map **used_maps; 647 struct bpf_prog *prog; 648 struct user_struct *user; 649 u64 load_time; /* ns since boottime */ 650 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 651 char name[BPF_OBJ_NAME_LEN]; 652 #ifdef CONFIG_SECURITY 653 void *security; 654 #endif 655 struct bpf_prog_offload *offload; 656 struct btf *btf; 657 struct bpf_func_info *func_info; 658 struct bpf_func_info_aux *func_info_aux; 659 /* bpf_line_info loaded from userspace. linfo->insn_off 660 * has the xlated insn offset. 661 * Both the main and sub prog share the same linfo. 662 * The subprog can access its first linfo by 663 * using the linfo_idx. 664 */ 665 struct bpf_line_info *linfo; 666 /* jited_linfo is the jited addr of the linfo. It has a 667 * one to one mapping to linfo: 668 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 669 * Both the main and sub prog share the same jited_linfo. 670 * The subprog can access its first jited_linfo by 671 * using the linfo_idx. 672 */ 673 void **jited_linfo; 674 u32 func_info_cnt; 675 u32 nr_linfo; 676 /* subprog can use linfo_idx to access its first linfo and 677 * jited_linfo. 678 * main prog always has linfo_idx == 0 679 */ 680 u32 linfo_idx; 681 u32 num_exentries; 682 struct exception_table_entry *extable; 683 struct bpf_prog_stats __percpu *stats; 684 union { 685 struct work_struct work; 686 struct rcu_head rcu; 687 }; 688 }; 689 690 struct bpf_array_aux { 691 /* 'Ownership' of prog array is claimed by the first program that 692 * is going to use this map or by the first program which FD is 693 * stored in the map to make sure that all callers and callees have 694 * the same prog type and JITed flag. 695 */ 696 enum bpf_prog_type type; 697 bool jited; 698 /* Programs with direct jumps into programs part of this array. */ 699 struct list_head poke_progs; 700 struct bpf_map *map; 701 struct mutex poke_mutex; 702 struct work_struct work; 703 }; 704 705 struct bpf_struct_ops_value; 706 struct btf_type; 707 struct btf_member; 708 709 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 710 struct bpf_struct_ops { 711 const struct bpf_verifier_ops *verifier_ops; 712 int (*init)(struct btf *btf); 713 int (*check_member)(const struct btf_type *t, 714 const struct btf_member *member); 715 int (*init_member)(const struct btf_type *t, 716 const struct btf_member *member, 717 void *kdata, const void *udata); 718 int (*reg)(void *kdata); 719 void (*unreg)(void *kdata); 720 const struct btf_type *type; 721 const struct btf_type *value_type; 722 const char *name; 723 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 724 u32 type_id; 725 u32 value_id; 726 }; 727 728 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 729 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 730 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 731 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 732 bool bpf_struct_ops_get(const void *kdata); 733 void bpf_struct_ops_put(const void *kdata); 734 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 735 void *value); 736 static inline bool bpf_try_module_get(const void *data, struct module *owner) 737 { 738 if (owner == BPF_MODULE_OWNER) 739 return bpf_struct_ops_get(data); 740 else 741 return try_module_get(owner); 742 } 743 static inline void bpf_module_put(const void *data, struct module *owner) 744 { 745 if (owner == BPF_MODULE_OWNER) 746 bpf_struct_ops_put(data); 747 else 748 module_put(owner); 749 } 750 #else 751 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 752 { 753 return NULL; 754 } 755 static inline void bpf_struct_ops_init(struct btf *btf, 756 struct bpf_verifier_log *log) 757 { 758 } 759 static inline bool bpf_try_module_get(const void *data, struct module *owner) 760 { 761 return try_module_get(owner); 762 } 763 static inline void bpf_module_put(const void *data, struct module *owner) 764 { 765 module_put(owner); 766 } 767 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 768 void *key, 769 void *value) 770 { 771 return -EINVAL; 772 } 773 #endif 774 775 struct bpf_array { 776 struct bpf_map map; 777 u32 elem_size; 778 u32 index_mask; 779 struct bpf_array_aux *aux; 780 union { 781 char value[0] __aligned(8); 782 void *ptrs[0] __aligned(8); 783 void __percpu *pptrs[0] __aligned(8); 784 }; 785 }; 786 787 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 788 #define MAX_TAIL_CALL_CNT 32 789 790 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 791 BPF_F_RDONLY_PROG | \ 792 BPF_F_WRONLY | \ 793 BPF_F_WRONLY_PROG) 794 795 #define BPF_MAP_CAN_READ BIT(0) 796 #define BPF_MAP_CAN_WRITE BIT(1) 797 798 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 799 { 800 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 801 802 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 803 * not possible. 804 */ 805 if (access_flags & BPF_F_RDONLY_PROG) 806 return BPF_MAP_CAN_READ; 807 else if (access_flags & BPF_F_WRONLY_PROG) 808 return BPF_MAP_CAN_WRITE; 809 else 810 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 811 } 812 813 static inline bool bpf_map_flags_access_ok(u32 access_flags) 814 { 815 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 816 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 817 } 818 819 struct bpf_event_entry { 820 struct perf_event *event; 821 struct file *perf_file; 822 struct file *map_file; 823 struct rcu_head rcu; 824 }; 825 826 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 827 int bpf_prog_calc_tag(struct bpf_prog *fp); 828 const char *kernel_type_name(u32 btf_type_id); 829 830 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 831 832 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 833 unsigned long off, unsigned long len); 834 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 835 const struct bpf_insn *src, 836 struct bpf_insn *dst, 837 struct bpf_prog *prog, 838 u32 *target_size); 839 840 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 841 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 842 843 /* an array of programs to be executed under rcu_lock. 844 * 845 * Typical usage: 846 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 847 * 848 * the structure returned by bpf_prog_array_alloc() should be populated 849 * with program pointers and the last pointer must be NULL. 850 * The user has to keep refcnt on the program and make sure the program 851 * is removed from the array before bpf_prog_put(). 852 * The 'struct bpf_prog_array *' should only be replaced with xchg() 853 * since other cpus are walking the array of pointers in parallel. 854 */ 855 struct bpf_prog_array_item { 856 struct bpf_prog *prog; 857 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 858 }; 859 860 struct bpf_prog_array { 861 struct rcu_head rcu; 862 struct bpf_prog_array_item items[]; 863 }; 864 865 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 866 void bpf_prog_array_free(struct bpf_prog_array *progs); 867 int bpf_prog_array_length(struct bpf_prog_array *progs); 868 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 869 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 870 __u32 __user *prog_ids, u32 cnt); 871 872 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 873 struct bpf_prog *old_prog); 874 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 875 u32 *prog_ids, u32 request_cnt, 876 u32 *prog_cnt); 877 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 878 struct bpf_prog *exclude_prog, 879 struct bpf_prog *include_prog, 880 struct bpf_prog_array **new_array); 881 882 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 883 ({ \ 884 struct bpf_prog_array_item *_item; \ 885 struct bpf_prog *_prog; \ 886 struct bpf_prog_array *_array; \ 887 u32 _ret = 1; \ 888 migrate_disable(); \ 889 rcu_read_lock(); \ 890 _array = rcu_dereference(array); \ 891 if (unlikely(check_non_null && !_array))\ 892 goto _out; \ 893 _item = &_array->items[0]; \ 894 while ((_prog = READ_ONCE(_item->prog))) { \ 895 bpf_cgroup_storage_set(_item->cgroup_storage); \ 896 _ret &= func(_prog, ctx); \ 897 _item++; \ 898 } \ 899 _out: \ 900 rcu_read_unlock(); \ 901 migrate_enable(); \ 902 _ret; \ 903 }) 904 905 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 906 * so BPF programs can request cwr for TCP packets. 907 * 908 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 909 * packet. This macro changes the behavior so the low order bit 910 * indicates whether the packet should be dropped (0) or not (1) 911 * and the next bit is a congestion notification bit. This could be 912 * used by TCP to call tcp_enter_cwr() 913 * 914 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 915 * 0: drop packet 916 * 1: keep packet 917 * 2: drop packet and cn 918 * 3: keep packet and cn 919 * 920 * This macro then converts it to one of the NET_XMIT or an error 921 * code that is then interpreted as drop packet (and no cn): 922 * 0: NET_XMIT_SUCCESS skb should be transmitted 923 * 1: NET_XMIT_DROP skb should be dropped and cn 924 * 2: NET_XMIT_CN skb should be transmitted and cn 925 * 3: -EPERM skb should be dropped 926 */ 927 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 928 ({ \ 929 struct bpf_prog_array_item *_item; \ 930 struct bpf_prog *_prog; \ 931 struct bpf_prog_array *_array; \ 932 u32 ret; \ 933 u32 _ret = 1; \ 934 u32 _cn = 0; \ 935 migrate_disable(); \ 936 rcu_read_lock(); \ 937 _array = rcu_dereference(array); \ 938 _item = &_array->items[0]; \ 939 while ((_prog = READ_ONCE(_item->prog))) { \ 940 bpf_cgroup_storage_set(_item->cgroup_storage); \ 941 ret = func(_prog, ctx); \ 942 _ret &= (ret & 1); \ 943 _cn |= (ret & 2); \ 944 _item++; \ 945 } \ 946 rcu_read_unlock(); \ 947 migrate_enable(); \ 948 if (_ret) \ 949 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 950 else \ 951 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 952 _ret; \ 953 }) 954 955 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 956 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 957 958 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 959 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 960 961 #ifdef CONFIG_BPF_SYSCALL 962 DECLARE_PER_CPU(int, bpf_prog_active); 963 964 /* 965 * Block execution of BPF programs attached to instrumentation (perf, 966 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 967 * these events can happen inside a region which holds a map bucket lock 968 * and can deadlock on it. 969 * 970 * Use the preemption safe inc/dec variants on RT because migrate disable 971 * is preemptible on RT and preemption in the middle of the RMW operation 972 * might lead to inconsistent state. Use the raw variants for non RT 973 * kernels as migrate_disable() maps to preempt_disable() so the slightly 974 * more expensive save operation can be avoided. 975 */ 976 static inline void bpf_disable_instrumentation(void) 977 { 978 migrate_disable(); 979 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 980 this_cpu_inc(bpf_prog_active); 981 else 982 __this_cpu_inc(bpf_prog_active); 983 } 984 985 static inline void bpf_enable_instrumentation(void) 986 { 987 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 988 this_cpu_dec(bpf_prog_active); 989 else 990 __this_cpu_dec(bpf_prog_active); 991 migrate_enable(); 992 } 993 994 extern const struct file_operations bpf_map_fops; 995 extern const struct file_operations bpf_prog_fops; 996 997 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 998 extern const struct bpf_prog_ops _name ## _prog_ops; \ 999 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1000 #define BPF_MAP_TYPE(_id, _ops) \ 1001 extern const struct bpf_map_ops _ops; 1002 #include <linux/bpf_types.h> 1003 #undef BPF_PROG_TYPE 1004 #undef BPF_MAP_TYPE 1005 1006 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1007 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1008 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1009 1010 struct bpf_prog *bpf_prog_get(u32 ufd); 1011 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1012 bool attach_drv); 1013 void bpf_prog_add(struct bpf_prog *prog, int i); 1014 void bpf_prog_sub(struct bpf_prog *prog, int i); 1015 void bpf_prog_inc(struct bpf_prog *prog); 1016 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1017 void bpf_prog_put(struct bpf_prog *prog); 1018 int __bpf_prog_charge(struct user_struct *user, u32 pages); 1019 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 1020 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1021 struct bpf_map **used_maps, u32 len); 1022 1023 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1024 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1025 1026 struct bpf_map *bpf_map_get(u32 ufd); 1027 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1028 struct bpf_map *__bpf_map_get(struct fd f); 1029 void bpf_map_inc(struct bpf_map *map); 1030 void bpf_map_inc_with_uref(struct bpf_map *map); 1031 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1032 void bpf_map_put_with_uref(struct bpf_map *map); 1033 void bpf_map_put(struct bpf_map *map); 1034 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 1035 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 1036 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 1037 void bpf_map_charge_finish(struct bpf_map_memory *mem); 1038 void bpf_map_charge_move(struct bpf_map_memory *dst, 1039 struct bpf_map_memory *src); 1040 void *bpf_map_area_alloc(u64 size, int numa_node); 1041 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1042 void bpf_map_area_free(void *base); 1043 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1044 int generic_map_lookup_batch(struct bpf_map *map, 1045 const union bpf_attr *attr, 1046 union bpf_attr __user *uattr); 1047 int generic_map_update_batch(struct bpf_map *map, 1048 const union bpf_attr *attr, 1049 union bpf_attr __user *uattr); 1050 int generic_map_delete_batch(struct bpf_map *map, 1051 const union bpf_attr *attr, 1052 union bpf_attr __user *uattr); 1053 1054 extern int sysctl_unprivileged_bpf_disabled; 1055 1056 int bpf_map_new_fd(struct bpf_map *map, int flags); 1057 int bpf_prog_new_fd(struct bpf_prog *prog); 1058 1059 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1060 int bpf_obj_get_user(const char __user *pathname, int flags); 1061 1062 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1063 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1064 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1065 u64 flags); 1066 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1067 u64 flags); 1068 1069 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1070 1071 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1072 void *key, void *value, u64 map_flags); 1073 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1074 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1075 void *key, void *value, u64 map_flags); 1076 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1077 1078 int bpf_get_file_flag(int flags); 1079 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 1080 size_t actual_size); 1081 1082 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1083 * forced to use 'long' read/writes to try to atomically copy long counters. 1084 * Best-effort only. No barriers here, since it _will_ race with concurrent 1085 * updates from BPF programs. Called from bpf syscall and mostly used with 1086 * size 8 or 16 bytes, so ask compiler to inline it. 1087 */ 1088 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1089 { 1090 const long *lsrc = src; 1091 long *ldst = dst; 1092 1093 size /= sizeof(long); 1094 while (size--) 1095 *ldst++ = *lsrc++; 1096 } 1097 1098 /* verify correctness of eBPF program */ 1099 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 1100 union bpf_attr __user *uattr); 1101 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1102 1103 /* Map specifics */ 1104 struct xdp_buff; 1105 struct sk_buff; 1106 1107 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 1108 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 1109 void __dev_flush(void); 1110 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1111 struct net_device *dev_rx); 1112 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1113 struct net_device *dev_rx); 1114 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1115 struct bpf_prog *xdp_prog); 1116 1117 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 1118 void __cpu_map_flush(void); 1119 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1120 struct net_device *dev_rx); 1121 1122 /* Return map's numa specified by userspace */ 1123 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1124 { 1125 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1126 attr->numa_node : NUMA_NO_NODE; 1127 } 1128 1129 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1130 int array_map_alloc_check(union bpf_attr *attr); 1131 1132 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1133 union bpf_attr __user *uattr); 1134 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1135 union bpf_attr __user *uattr); 1136 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1137 const union bpf_attr *kattr, 1138 union bpf_attr __user *uattr); 1139 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1140 const struct bpf_prog *prog, 1141 struct bpf_insn_access_aux *info); 1142 int btf_struct_access(struct bpf_verifier_log *log, 1143 const struct btf_type *t, int off, int size, 1144 enum bpf_access_type atype, 1145 u32 *next_btf_id); 1146 int btf_resolve_helper_id(struct bpf_verifier_log *log, 1147 const struct bpf_func_proto *fn, int); 1148 1149 int btf_distill_func_proto(struct bpf_verifier_log *log, 1150 struct btf *btf, 1151 const struct btf_type *func_proto, 1152 const char *func_name, 1153 struct btf_func_model *m); 1154 1155 struct bpf_reg_state; 1156 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, 1157 struct bpf_reg_state *regs); 1158 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1159 struct bpf_reg_state *reg); 1160 int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, 1161 struct btf *btf, const struct btf_type *t); 1162 1163 struct bpf_prog *bpf_prog_by_id(u32 id); 1164 1165 #else /* !CONFIG_BPF_SYSCALL */ 1166 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1167 { 1168 return ERR_PTR(-EOPNOTSUPP); 1169 } 1170 1171 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1172 enum bpf_prog_type type, 1173 bool attach_drv) 1174 { 1175 return ERR_PTR(-EOPNOTSUPP); 1176 } 1177 1178 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1179 { 1180 } 1181 1182 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1183 { 1184 } 1185 1186 static inline void bpf_prog_put(struct bpf_prog *prog) 1187 { 1188 } 1189 1190 static inline void bpf_prog_inc(struct bpf_prog *prog) 1191 { 1192 } 1193 1194 static inline struct bpf_prog *__must_check 1195 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1196 { 1197 return ERR_PTR(-EOPNOTSUPP); 1198 } 1199 1200 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 1201 { 1202 return 0; 1203 } 1204 1205 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1206 { 1207 } 1208 1209 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1210 { 1211 return -EOPNOTSUPP; 1212 } 1213 1214 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 1215 u32 key) 1216 { 1217 return NULL; 1218 } 1219 1220 static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, 1221 u32 key) 1222 { 1223 return NULL; 1224 } 1225 1226 static inline void __dev_flush(void) 1227 { 1228 } 1229 1230 struct xdp_buff; 1231 struct bpf_dtab_netdev; 1232 1233 static inline 1234 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1235 struct net_device *dev_rx) 1236 { 1237 return 0; 1238 } 1239 1240 static inline 1241 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1242 struct net_device *dev_rx) 1243 { 1244 return 0; 1245 } 1246 1247 struct sk_buff; 1248 1249 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1250 struct sk_buff *skb, 1251 struct bpf_prog *xdp_prog) 1252 { 1253 return 0; 1254 } 1255 1256 static inline 1257 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 1258 { 1259 return NULL; 1260 } 1261 1262 static inline void __cpu_map_flush(void) 1263 { 1264 } 1265 1266 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1267 struct xdp_buff *xdp, 1268 struct net_device *dev_rx) 1269 { 1270 return 0; 1271 } 1272 1273 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1274 enum bpf_prog_type type) 1275 { 1276 return ERR_PTR(-EOPNOTSUPP); 1277 } 1278 1279 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1280 const union bpf_attr *kattr, 1281 union bpf_attr __user *uattr) 1282 { 1283 return -ENOTSUPP; 1284 } 1285 1286 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1287 const union bpf_attr *kattr, 1288 union bpf_attr __user *uattr) 1289 { 1290 return -ENOTSUPP; 1291 } 1292 1293 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1294 const union bpf_attr *kattr, 1295 union bpf_attr __user *uattr) 1296 { 1297 return -ENOTSUPP; 1298 } 1299 1300 static inline void bpf_map_put(struct bpf_map *map) 1301 { 1302 } 1303 1304 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1305 { 1306 return ERR_PTR(-ENOTSUPP); 1307 } 1308 #endif /* CONFIG_BPF_SYSCALL */ 1309 1310 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1311 enum bpf_prog_type type) 1312 { 1313 return bpf_prog_get_type_dev(ufd, type, false); 1314 } 1315 1316 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1317 1318 int bpf_prog_offload_compile(struct bpf_prog *prog); 1319 void bpf_prog_offload_destroy(struct bpf_prog *prog); 1320 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1321 struct bpf_prog *prog); 1322 1323 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1324 1325 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1326 int bpf_map_offload_update_elem(struct bpf_map *map, 1327 void *key, void *value, u64 flags); 1328 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1329 int bpf_map_offload_get_next_key(struct bpf_map *map, 1330 void *key, void *next_key); 1331 1332 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1333 1334 struct bpf_offload_dev * 1335 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1336 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1337 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1338 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1339 struct net_device *netdev); 1340 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1341 struct net_device *netdev); 1342 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1343 1344 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1345 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1346 1347 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1348 { 1349 return aux->offload_requested; 1350 } 1351 1352 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1353 { 1354 return unlikely(map->ops == &bpf_map_offload_ops); 1355 } 1356 1357 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1358 void bpf_map_offload_map_free(struct bpf_map *map); 1359 #else 1360 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1361 union bpf_attr *attr) 1362 { 1363 return -EOPNOTSUPP; 1364 } 1365 1366 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1367 { 1368 return false; 1369 } 1370 1371 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1372 { 1373 return false; 1374 } 1375 1376 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1377 { 1378 return ERR_PTR(-EOPNOTSUPP); 1379 } 1380 1381 static inline void bpf_map_offload_map_free(struct bpf_map *map) 1382 { 1383 } 1384 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1385 1386 #if defined(CONFIG_BPF_STREAM_PARSER) 1387 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 1388 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1389 #else 1390 static inline int sock_map_prog_update(struct bpf_map *map, 1391 struct bpf_prog *prog, u32 which) 1392 { 1393 return -EOPNOTSUPP; 1394 } 1395 1396 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1397 struct bpf_prog *prog) 1398 { 1399 return -EINVAL; 1400 } 1401 #endif 1402 1403 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1404 void bpf_sk_reuseport_detach(struct sock *sk); 1405 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1406 void *value); 1407 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1408 void *value, u64 map_flags); 1409 #else 1410 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1411 { 1412 } 1413 1414 #ifdef CONFIG_BPF_SYSCALL 1415 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1416 void *key, void *value) 1417 { 1418 return -EOPNOTSUPP; 1419 } 1420 1421 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1422 void *key, void *value, 1423 u64 map_flags) 1424 { 1425 return -EOPNOTSUPP; 1426 } 1427 #endif /* CONFIG_BPF_SYSCALL */ 1428 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1429 1430 /* verifier prototypes for helper functions called from eBPF programs */ 1431 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1432 extern const struct bpf_func_proto bpf_map_update_elem_proto; 1433 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1434 extern const struct bpf_func_proto bpf_map_push_elem_proto; 1435 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1436 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1437 1438 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1439 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1440 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1441 extern const struct bpf_func_proto bpf_tail_call_proto; 1442 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1443 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1444 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1445 extern const struct bpf_func_proto bpf_get_current_comm_proto; 1446 extern const struct bpf_func_proto bpf_get_stackid_proto; 1447 extern const struct bpf_func_proto bpf_get_stack_proto; 1448 extern const struct bpf_func_proto bpf_sock_map_update_proto; 1449 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1450 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1451 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1452 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1453 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1454 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1455 extern const struct bpf_func_proto bpf_spin_lock_proto; 1456 extern const struct bpf_func_proto bpf_spin_unlock_proto; 1457 extern const struct bpf_func_proto bpf_get_local_storage_proto; 1458 extern const struct bpf_func_proto bpf_strtol_proto; 1459 extern const struct bpf_func_proto bpf_strtoul_proto; 1460 extern const struct bpf_func_proto bpf_tcp_sock_proto; 1461 extern const struct bpf_func_proto bpf_jiffies64_proto; 1462 1463 /* Shared helpers among cBPF and eBPF. */ 1464 void bpf_user_rnd_init_once(void); 1465 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1466 1467 #if defined(CONFIG_NET) 1468 bool bpf_sock_common_is_valid_access(int off, int size, 1469 enum bpf_access_type type, 1470 struct bpf_insn_access_aux *info); 1471 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1472 struct bpf_insn_access_aux *info); 1473 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1474 const struct bpf_insn *si, 1475 struct bpf_insn *insn_buf, 1476 struct bpf_prog *prog, 1477 u32 *target_size); 1478 #else 1479 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1480 enum bpf_access_type type, 1481 struct bpf_insn_access_aux *info) 1482 { 1483 return false; 1484 } 1485 static inline bool bpf_sock_is_valid_access(int off, int size, 1486 enum bpf_access_type type, 1487 struct bpf_insn_access_aux *info) 1488 { 1489 return false; 1490 } 1491 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1492 const struct bpf_insn *si, 1493 struct bpf_insn *insn_buf, 1494 struct bpf_prog *prog, 1495 u32 *target_size) 1496 { 1497 return 0; 1498 } 1499 #endif 1500 1501 #ifdef CONFIG_INET 1502 struct sk_reuseport_kern { 1503 struct sk_buff *skb; 1504 struct sock *sk; 1505 struct sock *selected_sk; 1506 void *data_end; 1507 u32 hash; 1508 u32 reuseport_id; 1509 bool bind_inany; 1510 }; 1511 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1512 struct bpf_insn_access_aux *info); 1513 1514 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1515 const struct bpf_insn *si, 1516 struct bpf_insn *insn_buf, 1517 struct bpf_prog *prog, 1518 u32 *target_size); 1519 1520 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1521 struct bpf_insn_access_aux *info); 1522 1523 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1524 const struct bpf_insn *si, 1525 struct bpf_insn *insn_buf, 1526 struct bpf_prog *prog, 1527 u32 *target_size); 1528 #else 1529 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1530 enum bpf_access_type type, 1531 struct bpf_insn_access_aux *info) 1532 { 1533 return false; 1534 } 1535 1536 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1537 const struct bpf_insn *si, 1538 struct bpf_insn *insn_buf, 1539 struct bpf_prog *prog, 1540 u32 *target_size) 1541 { 1542 return 0; 1543 } 1544 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 1545 enum bpf_access_type type, 1546 struct bpf_insn_access_aux *info) 1547 { 1548 return false; 1549 } 1550 1551 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1552 const struct bpf_insn *si, 1553 struct bpf_insn *insn_buf, 1554 struct bpf_prog *prog, 1555 u32 *target_size) 1556 { 1557 return 0; 1558 } 1559 #endif /* CONFIG_INET */ 1560 1561 enum bpf_text_poke_type { 1562 BPF_MOD_CALL, 1563 BPF_MOD_JUMP, 1564 }; 1565 1566 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 1567 void *addr1, void *addr2); 1568 1569 #endif /* _LINUX_BPF_H */ 1570