1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/refcount.h> 18 #include <linux/mutex.h> 19 #include <linux/module.h> 20 #include <linux/kallsyms.h> 21 #include <linux/capability.h> 22 #include <linux/sched/mm.h> 23 #include <linux/slab.h> 24 #include <linux/percpu-refcount.h> 25 #include <linux/bpfptr.h> 26 27 struct bpf_verifier_env; 28 struct bpf_verifier_log; 29 struct perf_event; 30 struct bpf_prog; 31 struct bpf_prog_aux; 32 struct bpf_map; 33 struct sock; 34 struct seq_file; 35 struct btf; 36 struct btf_type; 37 struct exception_table_entry; 38 struct seq_operations; 39 struct bpf_iter_aux_info; 40 struct bpf_local_storage; 41 struct bpf_local_storage_map; 42 struct kobject; 43 struct mem_cgroup; 44 struct module; 45 struct bpf_func_state; 46 47 extern struct idr btf_idr; 48 extern spinlock_t btf_idr_lock; 49 extern struct kobject *btf_kobj; 50 51 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); 52 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 53 struct bpf_iter_aux_info *aux); 54 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 55 struct bpf_iter_seq_info { 56 const struct seq_operations *seq_ops; 57 bpf_iter_init_seq_priv_t init_seq_private; 58 bpf_iter_fini_seq_priv_t fini_seq_private; 59 u32 seq_priv_size; 60 }; 61 62 /* map is generic key/value storage optionally accessible by eBPF programs */ 63 struct bpf_map_ops { 64 /* funcs callable from userspace (via syscall) */ 65 int (*map_alloc_check)(union bpf_attr *attr); 66 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 67 void (*map_release)(struct bpf_map *map, struct file *map_file); 68 void (*map_free)(struct bpf_map *map); 69 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 70 void (*map_release_uref)(struct bpf_map *map); 71 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 72 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 73 union bpf_attr __user *uattr); 74 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 75 void *value, u64 flags); 76 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 77 const union bpf_attr *attr, 78 union bpf_attr __user *uattr); 79 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 80 union bpf_attr __user *uattr); 81 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 82 union bpf_attr __user *uattr); 83 84 /* funcs callable from userspace and from eBPF programs */ 85 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 86 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 87 int (*map_delete_elem)(struct bpf_map *map, void *key); 88 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 89 int (*map_pop_elem)(struct bpf_map *map, void *value); 90 int (*map_peek_elem)(struct bpf_map *map, void *value); 91 92 /* funcs called by prog_array and perf_event_array map */ 93 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 94 int fd); 95 void (*map_fd_put_ptr)(void *ptr); 96 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 97 u32 (*map_fd_sys_lookup_elem)(void *ptr); 98 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 99 struct seq_file *m); 100 int (*map_check_btf)(const struct bpf_map *map, 101 const struct btf *btf, 102 const struct btf_type *key_type, 103 const struct btf_type *value_type); 104 105 /* Prog poke tracking helpers. */ 106 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 107 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 108 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 109 struct bpf_prog *new); 110 111 /* Direct value access helpers. */ 112 int (*map_direct_value_addr)(const struct bpf_map *map, 113 u64 *imm, u32 off); 114 int (*map_direct_value_meta)(const struct bpf_map *map, 115 u64 imm, u32 *off); 116 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 117 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 118 struct poll_table_struct *pts); 119 120 /* Functions called by bpf_local_storage maps */ 121 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 122 void *owner, u32 size); 123 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 124 void *owner, u32 size); 125 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 126 127 /* Misc helpers.*/ 128 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); 129 130 /* map_meta_equal must be implemented for maps that can be 131 * used as an inner map. It is a runtime check to ensure 132 * an inner map can be inserted to an outer map. 133 * 134 * Some properties of the inner map has been used during the 135 * verification time. When inserting an inner map at the runtime, 136 * map_meta_equal has to ensure the inserting map has the same 137 * properties that the verifier has used earlier. 138 */ 139 bool (*map_meta_equal)(const struct bpf_map *meta0, 140 const struct bpf_map *meta1); 141 142 143 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 144 struct bpf_func_state *caller, 145 struct bpf_func_state *callee); 146 int (*map_for_each_callback)(struct bpf_map *map, 147 bpf_callback_t callback_fn, 148 void *callback_ctx, u64 flags); 149 150 /* BTF name and id of struct allocated by map_alloc */ 151 const char * const map_btf_name; 152 int *map_btf_id; 153 154 /* bpf_iter info used to open a seq_file */ 155 const struct bpf_iter_seq_info *iter_seq_info; 156 }; 157 158 struct bpf_map { 159 /* The first two cachelines with read-mostly members of which some 160 * are also accessed in fast-path (e.g. ops, max_entries). 161 */ 162 const struct bpf_map_ops *ops ____cacheline_aligned; 163 struct bpf_map *inner_map_meta; 164 #ifdef CONFIG_SECURITY 165 void *security; 166 #endif 167 enum bpf_map_type map_type; 168 u32 key_size; 169 u32 value_size; 170 u32 max_entries; 171 u64 map_extra; /* any per-map-type extra fields */ 172 u32 map_flags; 173 int spin_lock_off; /* >=0 valid offset, <0 error */ 174 int timer_off; /* >=0 valid offset, <0 error */ 175 u32 id; 176 int numa_node; 177 u32 btf_key_type_id; 178 u32 btf_value_type_id; 179 u32 btf_vmlinux_value_type_id; 180 struct btf *btf; 181 #ifdef CONFIG_MEMCG_KMEM 182 struct mem_cgroup *memcg; 183 #endif 184 char name[BPF_OBJ_NAME_LEN]; 185 bool bypass_spec_v1; 186 bool frozen; /* write-once; write-protected by freeze_mutex */ 187 /* 14 bytes hole */ 188 189 /* The 3rd and 4th cacheline with misc members to avoid false sharing 190 * particularly with refcounting. 191 */ 192 atomic64_t refcnt ____cacheline_aligned; 193 atomic64_t usercnt; 194 struct work_struct work; 195 struct mutex freeze_mutex; 196 atomic64_t writecnt; 197 }; 198 199 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 200 { 201 return map->spin_lock_off >= 0; 202 } 203 204 static inline bool map_value_has_timer(const struct bpf_map *map) 205 { 206 return map->timer_off >= 0; 207 } 208 209 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 210 { 211 if (unlikely(map_value_has_spin_lock(map))) 212 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock)); 213 if (unlikely(map_value_has_timer(map))) 214 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer)); 215 } 216 217 /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ 218 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 219 { 220 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; 221 222 if (unlikely(map_value_has_spin_lock(map))) { 223 s_off = map->spin_lock_off; 224 s_sz = sizeof(struct bpf_spin_lock); 225 } 226 if (unlikely(map_value_has_timer(map))) { 227 t_off = map->timer_off; 228 t_sz = sizeof(struct bpf_timer); 229 } 230 231 if (unlikely(s_sz || t_sz)) { 232 if (s_off < t_off || !s_sz) { 233 swap(s_off, t_off); 234 swap(s_sz, t_sz); 235 } 236 memcpy(dst, src, t_off); 237 memcpy(dst + t_off + t_sz, 238 src + t_off + t_sz, 239 s_off - t_off - t_sz); 240 memcpy(dst + s_off + s_sz, 241 src + s_off + s_sz, 242 map->value_size - s_off - s_sz); 243 } else { 244 memcpy(dst, src, map->value_size); 245 } 246 } 247 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 248 bool lock_src); 249 void bpf_timer_cancel_and_free(void *timer); 250 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 251 252 struct bpf_offload_dev; 253 struct bpf_offloaded_map; 254 255 struct bpf_map_dev_ops { 256 int (*map_get_next_key)(struct bpf_offloaded_map *map, 257 void *key, void *next_key); 258 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 259 void *key, void *value); 260 int (*map_update_elem)(struct bpf_offloaded_map *map, 261 void *key, void *value, u64 flags); 262 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 263 }; 264 265 struct bpf_offloaded_map { 266 struct bpf_map map; 267 struct net_device *netdev; 268 const struct bpf_map_dev_ops *dev_ops; 269 void *dev_priv; 270 struct list_head offloads; 271 }; 272 273 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 274 { 275 return container_of(map, struct bpf_offloaded_map, map); 276 } 277 278 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 279 { 280 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 281 } 282 283 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 284 { 285 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 286 map->ops->map_seq_show_elem; 287 } 288 289 int map_check_no_btf(const struct bpf_map *map, 290 const struct btf *btf, 291 const struct btf_type *key_type, 292 const struct btf_type *value_type); 293 294 bool bpf_map_meta_equal(const struct bpf_map *meta0, 295 const struct bpf_map *meta1); 296 297 extern const struct bpf_map_ops bpf_map_offload_ops; 298 299 /* bpf_type_flag contains a set of flags that are applicable to the values of 300 * arg_type, ret_type and reg_type. For example, a pointer value may be null, 301 * or a memory is read-only. We classify types into two categories: base types 302 * and extended types. Extended types are base types combined with a type flag. 303 * 304 * Currently there are no more than 32 base types in arg_type, ret_type and 305 * reg_types. 306 */ 307 #define BPF_BASE_TYPE_BITS 8 308 309 enum bpf_type_flag { 310 /* PTR may be NULL. */ 311 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), 312 313 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is 314 * compatible with both mutable and immutable memory. 315 */ 316 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), 317 318 /* MEM was "allocated" from a different helper, and cannot be mixed 319 * with regular non-MEM_ALLOC'ed MEM types. 320 */ 321 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS), 322 323 __BPF_TYPE_LAST_FLAG = MEM_ALLOC, 324 }; 325 326 /* Max number of base types. */ 327 #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) 328 329 /* Max number of all types. */ 330 #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) 331 332 /* function argument constraints */ 333 enum bpf_arg_type { 334 ARG_DONTCARE = 0, /* unused argument in helper function */ 335 336 /* the following constraints used to prototype 337 * bpf_map_lookup/update/delete_elem() functions 338 */ 339 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 340 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 341 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 342 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 343 344 /* the following constraints used to prototype bpf_memcmp() and other 345 * functions that access data on eBPF program stack 346 */ 347 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 348 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 349 * helper function must fill all bytes or clear 350 * them in error case. 351 */ 352 353 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 354 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 355 356 ARG_PTR_TO_CTX, /* pointer to context */ 357 ARG_ANYTHING, /* any (initialized) argument is ok */ 358 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 359 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 360 ARG_PTR_TO_INT, /* pointer to int */ 361 ARG_PTR_TO_LONG, /* pointer to long */ 362 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 363 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 364 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 365 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 366 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 367 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 368 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 369 ARG_PTR_TO_STACK, /* pointer to stack */ 370 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 371 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 372 __BPF_ARG_TYPE_MAX, 373 374 /* Extended arg_types. */ 375 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, 376 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, 377 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, 378 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, 379 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, 380 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, 381 382 /* This must be the last entry. Its purpose is to ensure the enum is 383 * wide enough to hold the higher bits reserved for bpf_type_flag. 384 */ 385 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, 386 }; 387 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 388 389 /* type of values returned from helper functions */ 390 enum bpf_return_type { 391 RET_INTEGER, /* function returns integer */ 392 RET_VOID, /* function doesn't return anything */ 393 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 394 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ 395 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ 396 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ 397 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */ 398 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 399 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 400 __BPF_RET_TYPE_MAX, 401 402 /* Extended ret_types. */ 403 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, 404 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, 405 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, 406 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, 407 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM, 408 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, 409 410 /* This must be the last entry. Its purpose is to ensure the enum is 411 * wide enough to hold the higher bits reserved for bpf_type_flag. 412 */ 413 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, 414 }; 415 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 416 417 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 418 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 419 * instructions after verifying 420 */ 421 struct bpf_func_proto { 422 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 423 bool gpl_only; 424 bool pkt_access; 425 enum bpf_return_type ret_type; 426 union { 427 struct { 428 enum bpf_arg_type arg1_type; 429 enum bpf_arg_type arg2_type; 430 enum bpf_arg_type arg3_type; 431 enum bpf_arg_type arg4_type; 432 enum bpf_arg_type arg5_type; 433 }; 434 enum bpf_arg_type arg_type[5]; 435 }; 436 union { 437 struct { 438 u32 *arg1_btf_id; 439 u32 *arg2_btf_id; 440 u32 *arg3_btf_id; 441 u32 *arg4_btf_id; 442 u32 *arg5_btf_id; 443 }; 444 u32 *arg_btf_id[5]; 445 }; 446 int *ret_btf_id; /* return value btf_id */ 447 bool (*allowed)(const struct bpf_prog *prog); 448 }; 449 450 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 451 * the first argument to eBPF programs. 452 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 453 */ 454 struct bpf_context; 455 456 enum bpf_access_type { 457 BPF_READ = 1, 458 BPF_WRITE = 2 459 }; 460 461 /* types of values stored in eBPF registers */ 462 /* Pointer types represent: 463 * pointer 464 * pointer + imm 465 * pointer + (u16) var 466 * pointer + (u16) var + imm 467 * if (range > 0) then [ptr, ptr + range - off) is safe to access 468 * if (id > 0) means that some 'var' was added 469 * if (off > 0) means that 'imm' was added 470 */ 471 enum bpf_reg_type { 472 NOT_INIT = 0, /* nothing was written into register */ 473 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 474 PTR_TO_CTX, /* reg points to bpf_context */ 475 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 476 PTR_TO_MAP_VALUE, /* reg points to map element value */ 477 PTR_TO_MAP_KEY, /* reg points to a map element key */ 478 PTR_TO_STACK, /* reg == frame_pointer + offset */ 479 PTR_TO_PACKET_META, /* skb->data - meta_len */ 480 PTR_TO_PACKET, /* reg points to skb->data */ 481 PTR_TO_PACKET_END, /* skb->data + headlen */ 482 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 483 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 484 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 485 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 486 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 487 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 488 /* PTR_TO_BTF_ID points to a kernel struct that does not need 489 * to be null checked by the BPF program. This does not imply the 490 * pointer is _not_ null and in practice this can easily be a null 491 * pointer when reading pointer chains. The assumption is program 492 * context will handle null pointer dereference typically via fault 493 * handling. The verifier must keep this in mind and can make no 494 * assumptions about null or non-null when doing branch analysis. 495 * Further, when passed into helpers the helpers can not, without 496 * additional context, assume the value is non-null. 497 */ 498 PTR_TO_BTF_ID, 499 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 500 * been checked for null. Used primarily to inform the verifier 501 * an explicit null check is required for this struct. 502 */ 503 PTR_TO_MEM, /* reg points to valid memory region */ 504 PTR_TO_BUF, /* reg points to a read/write buffer */ 505 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ 506 PTR_TO_FUNC, /* reg points to a bpf program function */ 507 __BPF_REG_TYPE_MAX, 508 509 /* Extended reg_types. */ 510 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, 511 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, 512 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, 513 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, 514 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, 515 516 /* This must be the last entry. Its purpose is to ensure the enum is 517 * wide enough to hold the higher bits reserved for bpf_type_flag. 518 */ 519 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, 520 }; 521 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 522 523 /* The information passed from prog-specific *_is_valid_access 524 * back to the verifier. 525 */ 526 struct bpf_insn_access_aux { 527 enum bpf_reg_type reg_type; 528 union { 529 int ctx_field_size; 530 struct { 531 struct btf *btf; 532 u32 btf_id; 533 }; 534 }; 535 struct bpf_verifier_log *log; /* for verbose logs */ 536 }; 537 538 static inline void 539 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 540 { 541 aux->ctx_field_size = size; 542 } 543 544 static inline bool bpf_pseudo_func(const struct bpf_insn *insn) 545 { 546 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && 547 insn->src_reg == BPF_PSEUDO_FUNC; 548 } 549 550 struct bpf_prog_ops { 551 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 552 union bpf_attr __user *uattr); 553 }; 554 555 struct bpf_verifier_ops { 556 /* return eBPF function prototype for verification */ 557 const struct bpf_func_proto * 558 (*get_func_proto)(enum bpf_func_id func_id, 559 const struct bpf_prog *prog); 560 561 /* return true if 'size' wide access at offset 'off' within bpf_context 562 * with 'type' (read or write) is allowed 563 */ 564 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 565 const struct bpf_prog *prog, 566 struct bpf_insn_access_aux *info); 567 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 568 const struct bpf_prog *prog); 569 int (*gen_ld_abs)(const struct bpf_insn *orig, 570 struct bpf_insn *insn_buf); 571 u32 (*convert_ctx_access)(enum bpf_access_type type, 572 const struct bpf_insn *src, 573 struct bpf_insn *dst, 574 struct bpf_prog *prog, u32 *target_size); 575 int (*btf_struct_access)(struct bpf_verifier_log *log, 576 const struct btf *btf, 577 const struct btf_type *t, int off, int size, 578 enum bpf_access_type atype, 579 u32 *next_btf_id); 580 bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner); 581 }; 582 583 struct bpf_prog_offload_ops { 584 /* verifier basic callbacks */ 585 int (*insn_hook)(struct bpf_verifier_env *env, 586 int insn_idx, int prev_insn_idx); 587 int (*finalize)(struct bpf_verifier_env *env); 588 /* verifier optimization callbacks (called after .finalize) */ 589 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 590 struct bpf_insn *insn); 591 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 592 /* program management callbacks */ 593 int (*prepare)(struct bpf_prog *prog); 594 int (*translate)(struct bpf_prog *prog); 595 void (*destroy)(struct bpf_prog *prog); 596 }; 597 598 struct bpf_prog_offload { 599 struct bpf_prog *prog; 600 struct net_device *netdev; 601 struct bpf_offload_dev *offdev; 602 void *dev_priv; 603 struct list_head offloads; 604 bool dev_state; 605 bool opt_failed; 606 void *jited_image; 607 u32 jited_len; 608 }; 609 610 enum bpf_cgroup_storage_type { 611 BPF_CGROUP_STORAGE_SHARED, 612 BPF_CGROUP_STORAGE_PERCPU, 613 __BPF_CGROUP_STORAGE_MAX 614 }; 615 616 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 617 618 /* The longest tracepoint has 12 args. 619 * See include/trace/bpf_probe.h 620 */ 621 #define MAX_BPF_FUNC_ARGS 12 622 623 /* The maximum number of arguments passed through registers 624 * a single function may have. 625 */ 626 #define MAX_BPF_FUNC_REG_ARGS 5 627 628 struct btf_func_model { 629 u8 ret_size; 630 u8 nr_args; 631 u8 arg_size[MAX_BPF_FUNC_ARGS]; 632 }; 633 634 /* Restore arguments before returning from trampoline to let original function 635 * continue executing. This flag is used for fentry progs when there are no 636 * fexit progs. 637 */ 638 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 639 /* Call original function after fentry progs, but before fexit progs. 640 * Makes sense for fentry/fexit, normal calls and indirect calls. 641 */ 642 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 643 /* Skip current frame and return to parent. Makes sense for fentry/fexit 644 * programs only. Should not be used with normal calls and indirect calls. 645 */ 646 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 647 /* Store IP address of the caller on the trampoline stack, 648 * so it's available for trampoline's programs. 649 */ 650 #define BPF_TRAMP_F_IP_ARG BIT(3) 651 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 652 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 653 654 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 655 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 656 */ 657 #define BPF_MAX_TRAMP_PROGS 38 658 659 struct bpf_tramp_progs { 660 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 661 int nr_progs; 662 }; 663 664 /* Different use cases for BPF trampoline: 665 * 1. replace nop at the function entry (kprobe equivalent) 666 * flags = BPF_TRAMP_F_RESTORE_REGS 667 * fentry = a set of programs to run before returning from trampoline 668 * 669 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 670 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 671 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 672 * fentry = a set of program to run before calling original function 673 * fexit = a set of program to run after original function 674 * 675 * 3. replace direct call instruction anywhere in the function body 676 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 677 * With flags = 0 678 * fentry = a set of programs to run before returning from trampoline 679 * With flags = BPF_TRAMP_F_CALL_ORIG 680 * orig_call = original callback addr or direct function addr 681 * fentry = a set of program to run before calling original function 682 * fexit = a set of program to run after original function 683 */ 684 struct bpf_tramp_image; 685 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 686 const struct btf_func_model *m, u32 flags, 687 struct bpf_tramp_progs *tprogs, 688 void *orig_call); 689 /* these two functions are called from generated trampoline */ 690 u64 notrace __bpf_prog_enter(struct bpf_prog *prog); 691 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 692 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); 693 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); 694 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 695 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 696 697 struct bpf_ksym { 698 unsigned long start; 699 unsigned long end; 700 char name[KSYM_NAME_LEN]; 701 struct list_head lnode; 702 struct latch_tree_node tnode; 703 bool prog; 704 }; 705 706 enum bpf_tramp_prog_type { 707 BPF_TRAMP_FENTRY, 708 BPF_TRAMP_FEXIT, 709 BPF_TRAMP_MODIFY_RETURN, 710 BPF_TRAMP_MAX, 711 BPF_TRAMP_REPLACE, /* more than MAX */ 712 }; 713 714 struct bpf_tramp_image { 715 void *image; 716 struct bpf_ksym ksym; 717 struct percpu_ref pcref; 718 void *ip_after_call; 719 void *ip_epilogue; 720 union { 721 struct rcu_head rcu; 722 struct work_struct work; 723 }; 724 }; 725 726 struct bpf_trampoline { 727 /* hlist for trampoline_table */ 728 struct hlist_node hlist; 729 /* serializes access to fields of this trampoline */ 730 struct mutex mutex; 731 refcount_t refcnt; 732 u64 key; 733 struct { 734 struct btf_func_model model; 735 void *addr; 736 bool ftrace_managed; 737 } func; 738 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 739 * program by replacing one of its functions. func.addr is the address 740 * of the function it replaced. 741 */ 742 struct bpf_prog *extension_prog; 743 /* list of BPF programs using this trampoline */ 744 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 745 /* Number of attached programs. A counter per kind. */ 746 int progs_cnt[BPF_TRAMP_MAX]; 747 /* Executable image of trampoline */ 748 struct bpf_tramp_image *cur_image; 749 u64 selector; 750 struct module *mod; 751 }; 752 753 struct bpf_attach_target_info { 754 struct btf_func_model fmodel; 755 long tgt_addr; 756 const char *tgt_name; 757 const struct btf_type *tgt_type; 758 }; 759 760 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 761 762 struct bpf_dispatcher_prog { 763 struct bpf_prog *prog; 764 refcount_t users; 765 }; 766 767 struct bpf_dispatcher { 768 /* dispatcher mutex */ 769 struct mutex mutex; 770 void *func; 771 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 772 int num_progs; 773 void *image; 774 u32 image_off; 775 struct bpf_ksym ksym; 776 }; 777 778 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 779 const void *ctx, 780 const struct bpf_insn *insnsi, 781 unsigned int (*bpf_func)(const void *, 782 const struct bpf_insn *)) 783 { 784 return bpf_func(ctx, insnsi); 785 } 786 #ifdef CONFIG_BPF_JIT 787 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 788 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 789 struct bpf_trampoline *bpf_trampoline_get(u64 key, 790 struct bpf_attach_target_info *tgt_info); 791 void bpf_trampoline_put(struct bpf_trampoline *tr); 792 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); 793 #define BPF_DISPATCHER_INIT(_name) { \ 794 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 795 .func = &_name##_func, \ 796 .progs = {}, \ 797 .num_progs = 0, \ 798 .image = NULL, \ 799 .image_off = 0, \ 800 .ksym = { \ 801 .name = #_name, \ 802 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 803 }, \ 804 } 805 806 #define DEFINE_BPF_DISPATCHER(name) \ 807 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 808 const void *ctx, \ 809 const struct bpf_insn *insnsi, \ 810 unsigned int (*bpf_func)(const void *, \ 811 const struct bpf_insn *)) \ 812 { \ 813 return bpf_func(ctx, insnsi); \ 814 } \ 815 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 816 struct bpf_dispatcher bpf_dispatcher_##name = \ 817 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 818 #define DECLARE_BPF_DISPATCHER(name) \ 819 unsigned int bpf_dispatcher_##name##_func( \ 820 const void *ctx, \ 821 const struct bpf_insn *insnsi, \ 822 unsigned int (*bpf_func)(const void *, \ 823 const struct bpf_insn *)); \ 824 extern struct bpf_dispatcher bpf_dispatcher_##name; 825 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 826 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 827 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 828 struct bpf_prog *to); 829 /* Called only from JIT-enabled code, so there's no need for stubs. */ 830 void *bpf_jit_alloc_exec_page(void); 831 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 832 void bpf_image_ksym_del(struct bpf_ksym *ksym); 833 void bpf_ksym_add(struct bpf_ksym *ksym); 834 void bpf_ksym_del(struct bpf_ksym *ksym); 835 int bpf_jit_charge_modmem(u32 pages); 836 void bpf_jit_uncharge_modmem(u32 pages); 837 bool bpf_prog_has_trampoline(const struct bpf_prog *prog); 838 #else 839 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, 840 struct bpf_trampoline *tr) 841 { 842 return -ENOTSUPP; 843 } 844 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, 845 struct bpf_trampoline *tr) 846 { 847 return -ENOTSUPP; 848 } 849 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 850 struct bpf_attach_target_info *tgt_info) 851 { 852 return ERR_PTR(-EOPNOTSUPP); 853 } 854 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 855 #define DEFINE_BPF_DISPATCHER(name) 856 #define DECLARE_BPF_DISPATCHER(name) 857 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 858 #define BPF_DISPATCHER_PTR(name) NULL 859 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 860 struct bpf_prog *from, 861 struct bpf_prog *to) {} 862 static inline bool is_bpf_image_address(unsigned long address) 863 { 864 return false; 865 } 866 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) 867 { 868 return false; 869 } 870 #endif 871 872 struct bpf_func_info_aux { 873 u16 linkage; 874 bool unreliable; 875 }; 876 877 enum bpf_jit_poke_reason { 878 BPF_POKE_REASON_TAIL_CALL, 879 }; 880 881 /* Descriptor of pokes pointing /into/ the JITed image. */ 882 struct bpf_jit_poke_descriptor { 883 void *tailcall_target; 884 void *tailcall_bypass; 885 void *bypass_addr; 886 void *aux; 887 union { 888 struct { 889 struct bpf_map *map; 890 u32 key; 891 } tail_call; 892 }; 893 bool tailcall_target_stable; 894 u8 adj_off; 895 u16 reason; 896 u32 insn_idx; 897 }; 898 899 /* reg_type info for ctx arguments */ 900 struct bpf_ctx_arg_aux { 901 u32 offset; 902 enum bpf_reg_type reg_type; 903 u32 btf_id; 904 }; 905 906 struct btf_mod_pair { 907 struct btf *btf; 908 struct module *module; 909 }; 910 911 struct bpf_kfunc_desc_tab; 912 913 struct bpf_prog_aux { 914 atomic64_t refcnt; 915 u32 used_map_cnt; 916 u32 used_btf_cnt; 917 u32 max_ctx_offset; 918 u32 max_pkt_offset; 919 u32 max_tp_access; 920 u32 stack_depth; 921 u32 id; 922 u32 func_cnt; /* used by non-func prog as the number of func progs */ 923 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 924 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 925 u32 ctx_arg_info_size; 926 u32 max_rdonly_access; 927 u32 max_rdwr_access; 928 struct btf *attach_btf; 929 const struct bpf_ctx_arg_aux *ctx_arg_info; 930 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 931 struct bpf_prog *dst_prog; 932 struct bpf_trampoline *dst_trampoline; 933 enum bpf_prog_type saved_dst_prog_type; 934 enum bpf_attach_type saved_dst_attach_type; 935 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 936 bool offload_requested; 937 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 938 bool func_proto_unreliable; 939 bool sleepable; 940 bool tail_call_reachable; 941 struct hlist_node tramp_hlist; 942 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 943 const struct btf_type *attach_func_proto; 944 /* function name for valid attach_btf_id */ 945 const char *attach_func_name; 946 struct bpf_prog **func; 947 void *jit_data; /* JIT specific data. arch dependent */ 948 struct bpf_jit_poke_descriptor *poke_tab; 949 struct bpf_kfunc_desc_tab *kfunc_tab; 950 struct bpf_kfunc_btf_tab *kfunc_btf_tab; 951 u32 size_poke_tab; 952 struct bpf_ksym ksym; 953 const struct bpf_prog_ops *ops; 954 struct bpf_map **used_maps; 955 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 956 struct btf_mod_pair *used_btfs; 957 struct bpf_prog *prog; 958 struct user_struct *user; 959 u64 load_time; /* ns since boottime */ 960 u32 verified_insns; 961 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 962 char name[BPF_OBJ_NAME_LEN]; 963 #ifdef CONFIG_SECURITY 964 void *security; 965 #endif 966 struct bpf_prog_offload *offload; 967 struct btf *btf; 968 struct bpf_func_info *func_info; 969 struct bpf_func_info_aux *func_info_aux; 970 /* bpf_line_info loaded from userspace. linfo->insn_off 971 * has the xlated insn offset. 972 * Both the main and sub prog share the same linfo. 973 * The subprog can access its first linfo by 974 * using the linfo_idx. 975 */ 976 struct bpf_line_info *linfo; 977 /* jited_linfo is the jited addr of the linfo. It has a 978 * one to one mapping to linfo: 979 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 980 * Both the main and sub prog share the same jited_linfo. 981 * The subprog can access its first jited_linfo by 982 * using the linfo_idx. 983 */ 984 void **jited_linfo; 985 u32 func_info_cnt; 986 u32 nr_linfo; 987 /* subprog can use linfo_idx to access its first linfo and 988 * jited_linfo. 989 * main prog always has linfo_idx == 0 990 */ 991 u32 linfo_idx; 992 u32 num_exentries; 993 struct exception_table_entry *extable; 994 union { 995 struct work_struct work; 996 struct rcu_head rcu; 997 }; 998 }; 999 1000 struct bpf_array_aux { 1001 /* 'Ownership' of prog array is claimed by the first program that 1002 * is going to use this map or by the first program which FD is 1003 * stored in the map to make sure that all callers and callees have 1004 * the same prog type and JITed flag. 1005 */ 1006 struct { 1007 spinlock_t lock; 1008 enum bpf_prog_type type; 1009 bool jited; 1010 } owner; 1011 /* Programs with direct jumps into programs part of this array. */ 1012 struct list_head poke_progs; 1013 struct bpf_map *map; 1014 struct mutex poke_mutex; 1015 struct work_struct work; 1016 }; 1017 1018 struct bpf_link { 1019 atomic64_t refcnt; 1020 u32 id; 1021 enum bpf_link_type type; 1022 const struct bpf_link_ops *ops; 1023 struct bpf_prog *prog; 1024 struct work_struct work; 1025 }; 1026 1027 struct bpf_link_ops { 1028 void (*release)(struct bpf_link *link); 1029 void (*dealloc)(struct bpf_link *link); 1030 int (*detach)(struct bpf_link *link); 1031 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 1032 struct bpf_prog *old_prog); 1033 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 1034 int (*fill_link_info)(const struct bpf_link *link, 1035 struct bpf_link_info *info); 1036 }; 1037 1038 struct bpf_link_primer { 1039 struct bpf_link *link; 1040 struct file *file; 1041 int fd; 1042 u32 id; 1043 }; 1044 1045 struct bpf_struct_ops_value; 1046 struct btf_member; 1047 1048 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 1049 struct bpf_struct_ops { 1050 const struct bpf_verifier_ops *verifier_ops; 1051 int (*init)(struct btf *btf); 1052 int (*check_member)(const struct btf_type *t, 1053 const struct btf_member *member); 1054 int (*init_member)(const struct btf_type *t, 1055 const struct btf_member *member, 1056 void *kdata, const void *udata); 1057 int (*reg)(void *kdata); 1058 void (*unreg)(void *kdata); 1059 const struct btf_type *type; 1060 const struct btf_type *value_type; 1061 const char *name; 1062 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 1063 u32 type_id; 1064 u32 value_id; 1065 }; 1066 1067 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 1068 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 1069 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 1070 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 1071 bool bpf_struct_ops_get(const void *kdata); 1072 void bpf_struct_ops_put(const void *kdata); 1073 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 1074 void *value); 1075 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, 1076 struct bpf_prog *prog, 1077 const struct btf_func_model *model, 1078 void *image, void *image_end); 1079 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1080 { 1081 if (owner == BPF_MODULE_OWNER) 1082 return bpf_struct_ops_get(data); 1083 else 1084 return try_module_get(owner); 1085 } 1086 static inline void bpf_module_put(const void *data, struct module *owner) 1087 { 1088 if (owner == BPF_MODULE_OWNER) 1089 bpf_struct_ops_put(data); 1090 else 1091 module_put(owner); 1092 } 1093 1094 #ifdef CONFIG_NET 1095 /* Define it here to avoid the use of forward declaration */ 1096 struct bpf_dummy_ops_state { 1097 int val; 1098 }; 1099 1100 struct bpf_dummy_ops { 1101 int (*test_1)(struct bpf_dummy_ops_state *cb); 1102 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, 1103 char a3, unsigned long a4); 1104 }; 1105 1106 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, 1107 union bpf_attr __user *uattr); 1108 #endif 1109 #else 1110 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1111 { 1112 return NULL; 1113 } 1114 static inline void bpf_struct_ops_init(struct btf *btf, 1115 struct bpf_verifier_log *log) 1116 { 1117 } 1118 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1119 { 1120 return try_module_get(owner); 1121 } 1122 static inline void bpf_module_put(const void *data, struct module *owner) 1123 { 1124 module_put(owner); 1125 } 1126 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1127 void *key, 1128 void *value) 1129 { 1130 return -EINVAL; 1131 } 1132 #endif 1133 1134 struct bpf_array { 1135 struct bpf_map map; 1136 u32 elem_size; 1137 u32 index_mask; 1138 struct bpf_array_aux *aux; 1139 union { 1140 char value[0] __aligned(8); 1141 void *ptrs[0] __aligned(8); 1142 void __percpu *pptrs[0] __aligned(8); 1143 }; 1144 }; 1145 1146 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1147 #define MAX_TAIL_CALL_CNT 33 1148 1149 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1150 BPF_F_RDONLY_PROG | \ 1151 BPF_F_WRONLY | \ 1152 BPF_F_WRONLY_PROG) 1153 1154 #define BPF_MAP_CAN_READ BIT(0) 1155 #define BPF_MAP_CAN_WRITE BIT(1) 1156 1157 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1158 { 1159 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1160 1161 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1162 * not possible. 1163 */ 1164 if (access_flags & BPF_F_RDONLY_PROG) 1165 return BPF_MAP_CAN_READ; 1166 else if (access_flags & BPF_F_WRONLY_PROG) 1167 return BPF_MAP_CAN_WRITE; 1168 else 1169 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1170 } 1171 1172 static inline bool bpf_map_flags_access_ok(u32 access_flags) 1173 { 1174 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1175 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1176 } 1177 1178 struct bpf_event_entry { 1179 struct perf_event *event; 1180 struct file *perf_file; 1181 struct file *map_file; 1182 struct rcu_head rcu; 1183 }; 1184 1185 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 1186 int bpf_prog_calc_tag(struct bpf_prog *fp); 1187 1188 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1189 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); 1190 1191 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1192 unsigned long off, unsigned long len); 1193 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1194 const struct bpf_insn *src, 1195 struct bpf_insn *dst, 1196 struct bpf_prog *prog, 1197 u32 *target_size); 1198 1199 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1200 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1201 1202 /* an array of programs to be executed under rcu_lock. 1203 * 1204 * Typical usage: 1205 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); 1206 * 1207 * the structure returned by bpf_prog_array_alloc() should be populated 1208 * with program pointers and the last pointer must be NULL. 1209 * The user has to keep refcnt on the program and make sure the program 1210 * is removed from the array before bpf_prog_put(). 1211 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1212 * since other cpus are walking the array of pointers in parallel. 1213 */ 1214 struct bpf_prog_array_item { 1215 struct bpf_prog *prog; 1216 union { 1217 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1218 u64 bpf_cookie; 1219 }; 1220 }; 1221 1222 struct bpf_prog_array { 1223 struct rcu_head rcu; 1224 struct bpf_prog_array_item items[]; 1225 }; 1226 1227 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1228 void bpf_prog_array_free(struct bpf_prog_array *progs); 1229 int bpf_prog_array_length(struct bpf_prog_array *progs); 1230 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1231 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1232 __u32 __user *prog_ids, u32 cnt); 1233 1234 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1235 struct bpf_prog *old_prog); 1236 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1237 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1238 struct bpf_prog *prog); 1239 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1240 u32 *prog_ids, u32 request_cnt, 1241 u32 *prog_cnt); 1242 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1243 struct bpf_prog *exclude_prog, 1244 struct bpf_prog *include_prog, 1245 u64 bpf_cookie, 1246 struct bpf_prog_array **new_array); 1247 1248 struct bpf_run_ctx {}; 1249 1250 struct bpf_cg_run_ctx { 1251 struct bpf_run_ctx run_ctx; 1252 const struct bpf_prog_array_item *prog_item; 1253 }; 1254 1255 struct bpf_trace_run_ctx { 1256 struct bpf_run_ctx run_ctx; 1257 u64 bpf_cookie; 1258 }; 1259 1260 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1261 { 1262 struct bpf_run_ctx *old_ctx = NULL; 1263 1264 #ifdef CONFIG_BPF_SYSCALL 1265 old_ctx = current->bpf_ctx; 1266 current->bpf_ctx = new_ctx; 1267 #endif 1268 return old_ctx; 1269 } 1270 1271 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1272 { 1273 #ifdef CONFIG_BPF_SYSCALL 1274 current->bpf_ctx = old_ctx; 1275 #endif 1276 } 1277 1278 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1279 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1280 /* BPF program asks to set CN on the packet. */ 1281 #define BPF_RET_SET_CN (1 << 0) 1282 1283 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); 1284 1285 static __always_inline u32 1286 BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, 1287 const void *ctx, bpf_prog_run_fn run_prog, 1288 u32 *ret_flags) 1289 { 1290 const struct bpf_prog_array_item *item; 1291 const struct bpf_prog *prog; 1292 const struct bpf_prog_array *array; 1293 struct bpf_run_ctx *old_run_ctx; 1294 struct bpf_cg_run_ctx run_ctx; 1295 u32 ret = 1; 1296 u32 func_ret; 1297 1298 migrate_disable(); 1299 rcu_read_lock(); 1300 array = rcu_dereference(array_rcu); 1301 item = &array->items[0]; 1302 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1303 while ((prog = READ_ONCE(item->prog))) { 1304 run_ctx.prog_item = item; 1305 func_ret = run_prog(prog, ctx); 1306 ret &= (func_ret & 1); 1307 *(ret_flags) |= (func_ret >> 1); 1308 item++; 1309 } 1310 bpf_reset_run_ctx(old_run_ctx); 1311 rcu_read_unlock(); 1312 migrate_enable(); 1313 return ret; 1314 } 1315 1316 static __always_inline u32 1317 BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, 1318 const void *ctx, bpf_prog_run_fn run_prog) 1319 { 1320 const struct bpf_prog_array_item *item; 1321 const struct bpf_prog *prog; 1322 const struct bpf_prog_array *array; 1323 struct bpf_run_ctx *old_run_ctx; 1324 struct bpf_cg_run_ctx run_ctx; 1325 u32 ret = 1; 1326 1327 migrate_disable(); 1328 rcu_read_lock(); 1329 array = rcu_dereference(array_rcu); 1330 item = &array->items[0]; 1331 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1332 while ((prog = READ_ONCE(item->prog))) { 1333 run_ctx.prog_item = item; 1334 ret &= run_prog(prog, ctx); 1335 item++; 1336 } 1337 bpf_reset_run_ctx(old_run_ctx); 1338 rcu_read_unlock(); 1339 migrate_enable(); 1340 return ret; 1341 } 1342 1343 static __always_inline u32 1344 BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, 1345 const void *ctx, bpf_prog_run_fn run_prog) 1346 { 1347 const struct bpf_prog_array_item *item; 1348 const struct bpf_prog *prog; 1349 const struct bpf_prog_array *array; 1350 struct bpf_run_ctx *old_run_ctx; 1351 struct bpf_trace_run_ctx run_ctx; 1352 u32 ret = 1; 1353 1354 migrate_disable(); 1355 rcu_read_lock(); 1356 array = rcu_dereference(array_rcu); 1357 if (unlikely(!array)) 1358 goto out; 1359 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1360 item = &array->items[0]; 1361 while ((prog = READ_ONCE(item->prog))) { 1362 run_ctx.bpf_cookie = item->bpf_cookie; 1363 ret &= run_prog(prog, ctx); 1364 item++; 1365 } 1366 bpf_reset_run_ctx(old_run_ctx); 1367 out: 1368 rcu_read_unlock(); 1369 migrate_enable(); 1370 return ret; 1371 } 1372 1373 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1374 * so BPF programs can request cwr for TCP packets. 1375 * 1376 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1377 * packet. This macro changes the behavior so the low order bit 1378 * indicates whether the packet should be dropped (0) or not (1) 1379 * and the next bit is a congestion notification bit. This could be 1380 * used by TCP to call tcp_enter_cwr() 1381 * 1382 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1383 * 0: drop packet 1384 * 1: keep packet 1385 * 2: drop packet and cn 1386 * 3: keep packet and cn 1387 * 1388 * This macro then converts it to one of the NET_XMIT or an error 1389 * code that is then interpreted as drop packet (and no cn): 1390 * 0: NET_XMIT_SUCCESS skb should be transmitted 1391 * 1: NET_XMIT_DROP skb should be dropped and cn 1392 * 2: NET_XMIT_CN skb should be transmitted and cn 1393 * 3: -EPERM skb should be dropped 1394 */ 1395 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1396 ({ \ 1397 u32 _flags = 0; \ 1398 bool _cn; \ 1399 u32 _ret; \ 1400 _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ 1401 _cn = _flags & BPF_RET_SET_CN; \ 1402 if (_ret) \ 1403 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1404 else \ 1405 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1406 _ret; \ 1407 }) 1408 1409 #ifdef CONFIG_BPF_SYSCALL 1410 DECLARE_PER_CPU(int, bpf_prog_active); 1411 extern struct mutex bpf_stats_enabled_mutex; 1412 1413 /* 1414 * Block execution of BPF programs attached to instrumentation (perf, 1415 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1416 * these events can happen inside a region which holds a map bucket lock 1417 * and can deadlock on it. 1418 */ 1419 static inline void bpf_disable_instrumentation(void) 1420 { 1421 migrate_disable(); 1422 this_cpu_inc(bpf_prog_active); 1423 } 1424 1425 static inline void bpf_enable_instrumentation(void) 1426 { 1427 this_cpu_dec(bpf_prog_active); 1428 migrate_enable(); 1429 } 1430 1431 extern const struct file_operations bpf_map_fops; 1432 extern const struct file_operations bpf_prog_fops; 1433 extern const struct file_operations bpf_iter_fops; 1434 1435 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1436 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1437 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1438 #define BPF_MAP_TYPE(_id, _ops) \ 1439 extern const struct bpf_map_ops _ops; 1440 #define BPF_LINK_TYPE(_id, _name) 1441 #include <linux/bpf_types.h> 1442 #undef BPF_PROG_TYPE 1443 #undef BPF_MAP_TYPE 1444 #undef BPF_LINK_TYPE 1445 1446 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1447 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1448 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1449 1450 struct bpf_prog *bpf_prog_get(u32 ufd); 1451 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1452 bool attach_drv); 1453 void bpf_prog_add(struct bpf_prog *prog, int i); 1454 void bpf_prog_sub(struct bpf_prog *prog, int i); 1455 void bpf_prog_inc(struct bpf_prog *prog); 1456 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1457 void bpf_prog_put(struct bpf_prog *prog); 1458 1459 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1460 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1461 1462 struct bpf_map *bpf_map_get(u32 ufd); 1463 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1464 struct bpf_map *__bpf_map_get(struct fd f); 1465 void bpf_map_inc(struct bpf_map *map); 1466 void bpf_map_inc_with_uref(struct bpf_map *map); 1467 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1468 void bpf_map_put_with_uref(struct bpf_map *map); 1469 void bpf_map_put(struct bpf_map *map); 1470 void *bpf_map_area_alloc(u64 size, int numa_node); 1471 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1472 void bpf_map_area_free(void *base); 1473 bool bpf_map_write_active(const struct bpf_map *map); 1474 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1475 int generic_map_lookup_batch(struct bpf_map *map, 1476 const union bpf_attr *attr, 1477 union bpf_attr __user *uattr); 1478 int generic_map_update_batch(struct bpf_map *map, 1479 const union bpf_attr *attr, 1480 union bpf_attr __user *uattr); 1481 int generic_map_delete_batch(struct bpf_map *map, 1482 const union bpf_attr *attr, 1483 union bpf_attr __user *uattr); 1484 struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1485 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1486 1487 #ifdef CONFIG_MEMCG_KMEM 1488 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1489 int node); 1490 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1491 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1492 size_t align, gfp_t flags); 1493 #else 1494 static inline void * 1495 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1496 int node) 1497 { 1498 return kmalloc_node(size, flags, node); 1499 } 1500 1501 static inline void * 1502 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1503 { 1504 return kzalloc(size, flags); 1505 } 1506 1507 static inline void __percpu * 1508 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1509 gfp_t flags) 1510 { 1511 return __alloc_percpu_gfp(size, align, flags); 1512 } 1513 #endif 1514 1515 extern int sysctl_unprivileged_bpf_disabled; 1516 1517 static inline bool bpf_allow_ptr_leaks(void) 1518 { 1519 return perfmon_capable(); 1520 } 1521 1522 static inline bool bpf_allow_uninit_stack(void) 1523 { 1524 return perfmon_capable(); 1525 } 1526 1527 static inline bool bpf_allow_ptr_to_map_access(void) 1528 { 1529 return perfmon_capable(); 1530 } 1531 1532 static inline bool bpf_bypass_spec_v1(void) 1533 { 1534 return perfmon_capable(); 1535 } 1536 1537 static inline bool bpf_bypass_spec_v4(void) 1538 { 1539 return perfmon_capable(); 1540 } 1541 1542 int bpf_map_new_fd(struct bpf_map *map, int flags); 1543 int bpf_prog_new_fd(struct bpf_prog *prog); 1544 1545 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1546 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1547 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1548 int bpf_link_settle(struct bpf_link_primer *primer); 1549 void bpf_link_cleanup(struct bpf_link_primer *primer); 1550 void bpf_link_inc(struct bpf_link *link); 1551 void bpf_link_put(struct bpf_link *link); 1552 int bpf_link_new_fd(struct bpf_link *link); 1553 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1554 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1555 1556 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1557 int bpf_obj_get_user(const char __user *pathname, int flags); 1558 1559 #define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1560 #define DEFINE_BPF_ITER_FUNC(target, args...) \ 1561 extern int bpf_iter_ ## target(args); \ 1562 int __init bpf_iter_ ## target(args) { return 0; } 1563 1564 struct bpf_iter_aux_info { 1565 struct bpf_map *map; 1566 }; 1567 1568 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1569 union bpf_iter_link_info *linfo, 1570 struct bpf_iter_aux_info *aux); 1571 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1572 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1573 struct seq_file *seq); 1574 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1575 struct bpf_link_info *info); 1576 typedef const struct bpf_func_proto * 1577 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 1578 const struct bpf_prog *prog); 1579 1580 enum bpf_iter_feature { 1581 BPF_ITER_RESCHED = BIT(0), 1582 }; 1583 1584 #define BPF_ITER_CTX_ARG_MAX 2 1585 struct bpf_iter_reg { 1586 const char *target; 1587 bpf_iter_attach_target_t attach_target; 1588 bpf_iter_detach_target_t detach_target; 1589 bpf_iter_show_fdinfo_t show_fdinfo; 1590 bpf_iter_fill_link_info_t fill_link_info; 1591 bpf_iter_get_func_proto_t get_func_proto; 1592 u32 ctx_arg_info_size; 1593 u32 feature; 1594 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1595 const struct bpf_iter_seq_info *seq_info; 1596 }; 1597 1598 struct bpf_iter_meta { 1599 __bpf_md_ptr(struct seq_file *, seq); 1600 u64 session_id; 1601 u64 seq_num; 1602 }; 1603 1604 struct bpf_iter__bpf_map_elem { 1605 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1606 __bpf_md_ptr(struct bpf_map *, map); 1607 __bpf_md_ptr(void *, key); 1608 __bpf_md_ptr(void *, value); 1609 }; 1610 1611 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1612 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1613 bool bpf_iter_prog_supported(struct bpf_prog *prog); 1614 const struct bpf_func_proto * 1615 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1616 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 1617 int bpf_iter_new_fd(struct bpf_link *link); 1618 bool bpf_link_is_iter(struct bpf_link *link); 1619 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1620 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1621 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1622 struct seq_file *seq); 1623 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1624 struct bpf_link_info *info); 1625 1626 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 1627 struct bpf_func_state *caller, 1628 struct bpf_func_state *callee); 1629 1630 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1631 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1632 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1633 u64 flags); 1634 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1635 u64 flags); 1636 1637 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1638 1639 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1640 void *key, void *value, u64 map_flags); 1641 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1642 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1643 void *key, void *value, u64 map_flags); 1644 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1645 1646 int bpf_get_file_flag(int flags); 1647 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 1648 size_t actual_size); 1649 1650 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1651 * forced to use 'long' read/writes to try to atomically copy long counters. 1652 * Best-effort only. No barriers here, since it _will_ race with concurrent 1653 * updates from BPF programs. Called from bpf syscall and mostly used with 1654 * size 8 or 16 bytes, so ask compiler to inline it. 1655 */ 1656 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1657 { 1658 const long *lsrc = src; 1659 long *ldst = dst; 1660 1661 size /= sizeof(long); 1662 while (size--) 1663 *ldst++ = *lsrc++; 1664 } 1665 1666 /* verify correctness of eBPF program */ 1667 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 1668 1669 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1670 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1671 #endif 1672 1673 struct btf *bpf_get_btf_vmlinux(void); 1674 1675 /* Map specifics */ 1676 struct xdp_frame; 1677 struct sk_buff; 1678 struct bpf_dtab_netdev; 1679 struct bpf_cpu_map_entry; 1680 1681 void __dev_flush(void); 1682 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 1683 struct net_device *dev_rx); 1684 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 1685 struct net_device *dev_rx); 1686 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 1687 struct bpf_map *map, bool exclude_ingress); 1688 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1689 struct bpf_prog *xdp_prog); 1690 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1691 struct bpf_prog *xdp_prog, struct bpf_map *map, 1692 bool exclude_ingress); 1693 1694 void __cpu_map_flush(void); 1695 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, 1696 struct net_device *dev_rx); 1697 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1698 struct sk_buff *skb); 1699 1700 /* Return map's numa specified by userspace */ 1701 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1702 { 1703 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1704 attr->numa_node : NUMA_NO_NODE; 1705 } 1706 1707 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1708 int array_map_alloc_check(union bpf_attr *attr); 1709 1710 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1711 union bpf_attr __user *uattr); 1712 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1713 union bpf_attr __user *uattr); 1714 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1715 const union bpf_attr *kattr, 1716 union bpf_attr __user *uattr); 1717 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1718 const union bpf_attr *kattr, 1719 union bpf_attr __user *uattr); 1720 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 1721 const union bpf_attr *kattr, 1722 union bpf_attr __user *uattr); 1723 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1724 const union bpf_attr *kattr, 1725 union bpf_attr __user *uattr); 1726 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner); 1727 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1728 const struct bpf_prog *prog, 1729 struct bpf_insn_access_aux *info); 1730 1731 static inline bool bpf_tracing_ctx_access(int off, int size, 1732 enum bpf_access_type type) 1733 { 1734 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1735 return false; 1736 if (type != BPF_READ) 1737 return false; 1738 if (off % size != 0) 1739 return false; 1740 return true; 1741 } 1742 1743 static inline bool bpf_tracing_btf_ctx_access(int off, int size, 1744 enum bpf_access_type type, 1745 const struct bpf_prog *prog, 1746 struct bpf_insn_access_aux *info) 1747 { 1748 if (!bpf_tracing_ctx_access(off, size, type)) 1749 return false; 1750 return btf_ctx_access(off, size, type, prog, info); 1751 } 1752 1753 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 1754 const struct btf_type *t, int off, int size, 1755 enum bpf_access_type atype, 1756 u32 *next_btf_id); 1757 bool btf_struct_ids_match(struct bpf_verifier_log *log, 1758 const struct btf *btf, u32 id, int off, 1759 const struct btf *need_btf, u32 need_type_id); 1760 1761 int btf_distill_func_proto(struct bpf_verifier_log *log, 1762 struct btf *btf, 1763 const struct btf_type *func_proto, 1764 const char *func_name, 1765 struct btf_func_model *m); 1766 1767 struct bpf_reg_state; 1768 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 1769 struct bpf_reg_state *regs); 1770 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 1771 const struct btf *btf, u32 func_id, 1772 struct bpf_reg_state *regs); 1773 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1774 struct bpf_reg_state *reg); 1775 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 1776 struct btf *btf, const struct btf_type *t); 1777 1778 struct bpf_prog *bpf_prog_by_id(u32 id); 1779 struct bpf_link *bpf_link_by_id(u32 id); 1780 1781 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1782 void bpf_task_storage_free(struct task_struct *task); 1783 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 1784 const struct btf_func_model * 1785 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1786 const struct bpf_insn *insn); 1787 struct bpf_core_ctx { 1788 struct bpf_verifier_log *log; 1789 const struct btf *btf; 1790 }; 1791 1792 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 1793 int relo_idx, void *insn); 1794 1795 #else /* !CONFIG_BPF_SYSCALL */ 1796 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1797 { 1798 return ERR_PTR(-EOPNOTSUPP); 1799 } 1800 1801 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1802 enum bpf_prog_type type, 1803 bool attach_drv) 1804 { 1805 return ERR_PTR(-EOPNOTSUPP); 1806 } 1807 1808 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1809 { 1810 } 1811 1812 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1813 { 1814 } 1815 1816 static inline void bpf_prog_put(struct bpf_prog *prog) 1817 { 1818 } 1819 1820 static inline void bpf_prog_inc(struct bpf_prog *prog) 1821 { 1822 } 1823 1824 static inline struct bpf_prog *__must_check 1825 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1826 { 1827 return ERR_PTR(-EOPNOTSUPP); 1828 } 1829 1830 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1831 const struct bpf_link_ops *ops, 1832 struct bpf_prog *prog) 1833 { 1834 } 1835 1836 static inline int bpf_link_prime(struct bpf_link *link, 1837 struct bpf_link_primer *primer) 1838 { 1839 return -EOPNOTSUPP; 1840 } 1841 1842 static inline int bpf_link_settle(struct bpf_link_primer *primer) 1843 { 1844 return -EOPNOTSUPP; 1845 } 1846 1847 static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1848 { 1849 } 1850 1851 static inline void bpf_link_inc(struct bpf_link *link) 1852 { 1853 } 1854 1855 static inline void bpf_link_put(struct bpf_link *link) 1856 { 1857 } 1858 1859 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1860 { 1861 return -EOPNOTSUPP; 1862 } 1863 1864 static inline bool dev_map_can_have_prog(struct bpf_map *map) 1865 { 1866 return false; 1867 } 1868 1869 static inline void __dev_flush(void) 1870 { 1871 } 1872 1873 struct xdp_frame; 1874 struct bpf_dtab_netdev; 1875 struct bpf_cpu_map_entry; 1876 1877 static inline 1878 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 1879 struct net_device *dev_rx) 1880 { 1881 return 0; 1882 } 1883 1884 static inline 1885 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 1886 struct net_device *dev_rx) 1887 { 1888 return 0; 1889 } 1890 1891 static inline 1892 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 1893 struct bpf_map *map, bool exclude_ingress) 1894 { 1895 return 0; 1896 } 1897 1898 struct sk_buff; 1899 1900 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1901 struct sk_buff *skb, 1902 struct bpf_prog *xdp_prog) 1903 { 1904 return 0; 1905 } 1906 1907 static inline 1908 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1909 struct bpf_prog *xdp_prog, struct bpf_map *map, 1910 bool exclude_ingress) 1911 { 1912 return 0; 1913 } 1914 1915 static inline void __cpu_map_flush(void) 1916 { 1917 } 1918 1919 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1920 struct xdp_frame *xdpf, 1921 struct net_device *dev_rx) 1922 { 1923 return 0; 1924 } 1925 1926 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1927 struct sk_buff *skb) 1928 { 1929 return -EOPNOTSUPP; 1930 } 1931 1932 static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1933 { 1934 return false; 1935 } 1936 1937 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1938 enum bpf_prog_type type) 1939 { 1940 return ERR_PTR(-EOPNOTSUPP); 1941 } 1942 1943 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1944 const union bpf_attr *kattr, 1945 union bpf_attr __user *uattr) 1946 { 1947 return -ENOTSUPP; 1948 } 1949 1950 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1951 const union bpf_attr *kattr, 1952 union bpf_attr __user *uattr) 1953 { 1954 return -ENOTSUPP; 1955 } 1956 1957 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1958 const union bpf_attr *kattr, 1959 union bpf_attr __user *uattr) 1960 { 1961 return -ENOTSUPP; 1962 } 1963 1964 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1965 const union bpf_attr *kattr, 1966 union bpf_attr __user *uattr) 1967 { 1968 return -ENOTSUPP; 1969 } 1970 1971 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1972 const union bpf_attr *kattr, 1973 union bpf_attr __user *uattr) 1974 { 1975 return -ENOTSUPP; 1976 } 1977 1978 static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, 1979 struct module *owner) 1980 { 1981 return false; 1982 } 1983 1984 static inline void bpf_map_put(struct bpf_map *map) 1985 { 1986 } 1987 1988 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1989 { 1990 return ERR_PTR(-ENOTSUPP); 1991 } 1992 1993 static inline const struct bpf_func_proto * 1994 bpf_base_func_proto(enum bpf_func_id func_id) 1995 { 1996 return NULL; 1997 } 1998 1999 static inline void bpf_task_storage_free(struct task_struct *task) 2000 { 2001 } 2002 2003 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2004 { 2005 return false; 2006 } 2007 2008 static inline const struct btf_func_model * 2009 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2010 const struct bpf_insn *insn) 2011 { 2012 return NULL; 2013 } 2014 #endif /* CONFIG_BPF_SYSCALL */ 2015 2016 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2017 struct btf_mod_pair *used_btfs, u32 len); 2018 2019 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 2020 enum bpf_prog_type type) 2021 { 2022 return bpf_prog_get_type_dev(ufd, type, false); 2023 } 2024 2025 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2026 struct bpf_map **used_maps, u32 len); 2027 2028 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 2029 2030 int bpf_prog_offload_compile(struct bpf_prog *prog); 2031 void bpf_prog_offload_destroy(struct bpf_prog *prog); 2032 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 2033 struct bpf_prog *prog); 2034 2035 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 2036 2037 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 2038 int bpf_map_offload_update_elem(struct bpf_map *map, 2039 void *key, void *value, u64 flags); 2040 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 2041 int bpf_map_offload_get_next_key(struct bpf_map *map, 2042 void *key, void *next_key); 2043 2044 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 2045 2046 struct bpf_offload_dev * 2047 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 2048 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 2049 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 2050 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 2051 struct net_device *netdev); 2052 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 2053 struct net_device *netdev); 2054 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 2055 2056 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 2057 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 2058 2059 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 2060 { 2061 return aux->offload_requested; 2062 } 2063 2064 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 2065 { 2066 return unlikely(map->ops == &bpf_map_offload_ops); 2067 } 2068 2069 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 2070 void bpf_map_offload_map_free(struct bpf_map *map); 2071 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2072 const union bpf_attr *kattr, 2073 union bpf_attr __user *uattr); 2074 2075 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 2076 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 2077 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 2078 void sock_map_unhash(struct sock *sk); 2079 void sock_map_close(struct sock *sk, long timeout); 2080 #else 2081 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 2082 union bpf_attr *attr) 2083 { 2084 return -EOPNOTSUPP; 2085 } 2086 2087 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 2088 { 2089 return false; 2090 } 2091 2092 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 2093 { 2094 return false; 2095 } 2096 2097 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 2098 { 2099 return ERR_PTR(-EOPNOTSUPP); 2100 } 2101 2102 static inline void bpf_map_offload_map_free(struct bpf_map *map) 2103 { 2104 } 2105 2106 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2107 const union bpf_attr *kattr, 2108 union bpf_attr __user *uattr) 2109 { 2110 return -ENOTSUPP; 2111 } 2112 2113 #ifdef CONFIG_BPF_SYSCALL 2114 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 2115 struct bpf_prog *prog) 2116 { 2117 return -EINVAL; 2118 } 2119 2120 static inline int sock_map_prog_detach(const union bpf_attr *attr, 2121 enum bpf_prog_type ptype) 2122 { 2123 return -EOPNOTSUPP; 2124 } 2125 2126 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 2127 u64 flags) 2128 { 2129 return -EOPNOTSUPP; 2130 } 2131 #endif /* CONFIG_BPF_SYSCALL */ 2132 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 2133 2134 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 2135 void bpf_sk_reuseport_detach(struct sock *sk); 2136 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 2137 void *value); 2138 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 2139 void *value, u64 map_flags); 2140 #else 2141 static inline void bpf_sk_reuseport_detach(struct sock *sk) 2142 { 2143 } 2144 2145 #ifdef CONFIG_BPF_SYSCALL 2146 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 2147 void *key, void *value) 2148 { 2149 return -EOPNOTSUPP; 2150 } 2151 2152 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 2153 void *key, void *value, 2154 u64 map_flags) 2155 { 2156 return -EOPNOTSUPP; 2157 } 2158 #endif /* CONFIG_BPF_SYSCALL */ 2159 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 2160 2161 /* verifier prototypes for helper functions called from eBPF programs */ 2162 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 2163 extern const struct bpf_func_proto bpf_map_update_elem_proto; 2164 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 2165 extern const struct bpf_func_proto bpf_map_push_elem_proto; 2166 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 2167 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 2168 2169 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 2170 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 2171 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 2172 extern const struct bpf_func_proto bpf_tail_call_proto; 2173 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 2174 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 2175 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 2176 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 2177 extern const struct bpf_func_proto bpf_get_current_comm_proto; 2178 extern const struct bpf_func_proto bpf_get_stackid_proto; 2179 extern const struct bpf_func_proto bpf_get_stack_proto; 2180 extern const struct bpf_func_proto bpf_get_task_stack_proto; 2181 extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 2182 extern const struct bpf_func_proto bpf_get_stack_proto_pe; 2183 extern const struct bpf_func_proto bpf_sock_map_update_proto; 2184 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 2185 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 2186 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 2187 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 2188 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 2189 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 2190 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 2191 extern const struct bpf_func_proto bpf_spin_lock_proto; 2192 extern const struct bpf_func_proto bpf_spin_unlock_proto; 2193 extern const struct bpf_func_proto bpf_get_local_storage_proto; 2194 extern const struct bpf_func_proto bpf_strtol_proto; 2195 extern const struct bpf_func_proto bpf_strtoul_proto; 2196 extern const struct bpf_func_proto bpf_tcp_sock_proto; 2197 extern const struct bpf_func_proto bpf_jiffies64_proto; 2198 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 2199 extern const struct bpf_func_proto bpf_event_output_data_proto; 2200 extern const struct bpf_func_proto bpf_ringbuf_output_proto; 2201 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 2202 extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 2203 extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 2204 extern const struct bpf_func_proto bpf_ringbuf_query_proto; 2205 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 2206 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 2207 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 2208 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 2209 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 2210 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; 2211 extern const struct bpf_func_proto bpf_copy_from_user_proto; 2212 extern const struct bpf_func_proto bpf_snprintf_btf_proto; 2213 extern const struct bpf_func_proto bpf_snprintf_proto; 2214 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 2215 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 2216 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 2217 extern const struct bpf_func_proto bpf_sock_from_file_proto; 2218 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 2219 extern const struct bpf_func_proto bpf_task_storage_get_proto; 2220 extern const struct bpf_func_proto bpf_task_storage_delete_proto; 2221 extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2222 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2223 extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 2224 extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 2225 extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; 2226 extern const struct bpf_func_proto bpf_find_vma_proto; 2227 extern const struct bpf_func_proto bpf_loop_proto; 2228 extern const struct bpf_func_proto bpf_strncmp_proto; 2229 2230 const struct bpf_func_proto *tracing_prog_func_proto( 2231 enum bpf_func_id func_id, const struct bpf_prog *prog); 2232 2233 /* Shared helpers among cBPF and eBPF. */ 2234 void bpf_user_rnd_init_once(void); 2235 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2236 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2237 2238 #if defined(CONFIG_NET) 2239 bool bpf_sock_common_is_valid_access(int off, int size, 2240 enum bpf_access_type type, 2241 struct bpf_insn_access_aux *info); 2242 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2243 struct bpf_insn_access_aux *info); 2244 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2245 const struct bpf_insn *si, 2246 struct bpf_insn *insn_buf, 2247 struct bpf_prog *prog, 2248 u32 *target_size); 2249 #else 2250 static inline bool bpf_sock_common_is_valid_access(int off, int size, 2251 enum bpf_access_type type, 2252 struct bpf_insn_access_aux *info) 2253 { 2254 return false; 2255 } 2256 static inline bool bpf_sock_is_valid_access(int off, int size, 2257 enum bpf_access_type type, 2258 struct bpf_insn_access_aux *info) 2259 { 2260 return false; 2261 } 2262 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2263 const struct bpf_insn *si, 2264 struct bpf_insn *insn_buf, 2265 struct bpf_prog *prog, 2266 u32 *target_size) 2267 { 2268 return 0; 2269 } 2270 #endif 2271 2272 #ifdef CONFIG_INET 2273 struct sk_reuseport_kern { 2274 struct sk_buff *skb; 2275 struct sock *sk; 2276 struct sock *selected_sk; 2277 struct sock *migrating_sk; 2278 void *data_end; 2279 u32 hash; 2280 u32 reuseport_id; 2281 bool bind_inany; 2282 }; 2283 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2284 struct bpf_insn_access_aux *info); 2285 2286 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2287 const struct bpf_insn *si, 2288 struct bpf_insn *insn_buf, 2289 struct bpf_prog *prog, 2290 u32 *target_size); 2291 2292 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2293 struct bpf_insn_access_aux *info); 2294 2295 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2296 const struct bpf_insn *si, 2297 struct bpf_insn *insn_buf, 2298 struct bpf_prog *prog, 2299 u32 *target_size); 2300 #else 2301 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2302 enum bpf_access_type type, 2303 struct bpf_insn_access_aux *info) 2304 { 2305 return false; 2306 } 2307 2308 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2309 const struct bpf_insn *si, 2310 struct bpf_insn *insn_buf, 2311 struct bpf_prog *prog, 2312 u32 *target_size) 2313 { 2314 return 0; 2315 } 2316 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2317 enum bpf_access_type type, 2318 struct bpf_insn_access_aux *info) 2319 { 2320 return false; 2321 } 2322 2323 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2324 const struct bpf_insn *si, 2325 struct bpf_insn *insn_buf, 2326 struct bpf_prog *prog, 2327 u32 *target_size) 2328 { 2329 return 0; 2330 } 2331 #endif /* CONFIG_INET */ 2332 2333 enum bpf_text_poke_type { 2334 BPF_MOD_CALL, 2335 BPF_MOD_JUMP, 2336 }; 2337 2338 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2339 void *addr1, void *addr2); 2340 2341 struct btf_id_set; 2342 bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2343 2344 #define MAX_BPRINTF_VARARGS 12 2345 2346 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2347 u32 **bin_buf, u32 num_args); 2348 void bpf_bprintf_cleanup(void); 2349 2350 #endif /* _LINUX_BPF_H */ 2351