1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/refcount.h> 18 #include <linux/mutex.h> 19 #include <linux/module.h> 20 #include <linux/kallsyms.h> 21 #include <linux/capability.h> 22 #include <linux/sched/mm.h> 23 #include <linux/slab.h> 24 #include <linux/percpu-refcount.h> 25 #include <linux/bpfptr.h> 26 27 struct bpf_verifier_env; 28 struct bpf_verifier_log; 29 struct perf_event; 30 struct bpf_prog; 31 struct bpf_prog_aux; 32 struct bpf_map; 33 struct sock; 34 struct seq_file; 35 struct btf; 36 struct btf_type; 37 struct exception_table_entry; 38 struct seq_operations; 39 struct bpf_iter_aux_info; 40 struct bpf_local_storage; 41 struct bpf_local_storage_map; 42 struct kobject; 43 struct mem_cgroup; 44 struct module; 45 struct bpf_func_state; 46 47 extern struct idr btf_idr; 48 extern spinlock_t btf_idr_lock; 49 extern struct kobject *btf_kobj; 50 51 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 52 struct bpf_iter_aux_info *aux); 53 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 54 struct bpf_iter_seq_info { 55 const struct seq_operations *seq_ops; 56 bpf_iter_init_seq_priv_t init_seq_private; 57 bpf_iter_fini_seq_priv_t fini_seq_private; 58 u32 seq_priv_size; 59 }; 60 61 /* map is generic key/value storage optionally accessible by eBPF programs */ 62 struct bpf_map_ops { 63 /* funcs callable from userspace (via syscall) */ 64 int (*map_alloc_check)(union bpf_attr *attr); 65 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 66 void (*map_release)(struct bpf_map *map, struct file *map_file); 67 void (*map_free)(struct bpf_map *map); 68 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 69 void (*map_release_uref)(struct bpf_map *map); 70 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 71 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 72 union bpf_attr __user *uattr); 73 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 74 void *value, u64 flags); 75 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 76 const union bpf_attr *attr, 77 union bpf_attr __user *uattr); 78 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 79 union bpf_attr __user *uattr); 80 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 81 union bpf_attr __user *uattr); 82 83 /* funcs callable from userspace and from eBPF programs */ 84 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 85 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 86 int (*map_delete_elem)(struct bpf_map *map, void *key); 87 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 88 int (*map_pop_elem)(struct bpf_map *map, void *value); 89 int (*map_peek_elem)(struct bpf_map *map, void *value); 90 91 /* funcs called by prog_array and perf_event_array map */ 92 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 93 int fd); 94 void (*map_fd_put_ptr)(void *ptr); 95 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 96 u32 (*map_fd_sys_lookup_elem)(void *ptr); 97 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 98 struct seq_file *m); 99 int (*map_check_btf)(const struct bpf_map *map, 100 const struct btf *btf, 101 const struct btf_type *key_type, 102 const struct btf_type *value_type); 103 104 /* Prog poke tracking helpers. */ 105 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 106 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 107 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 108 struct bpf_prog *new); 109 110 /* Direct value access helpers. */ 111 int (*map_direct_value_addr)(const struct bpf_map *map, 112 u64 *imm, u32 off); 113 int (*map_direct_value_meta)(const struct bpf_map *map, 114 u64 imm, u32 *off); 115 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 116 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 117 struct poll_table_struct *pts); 118 119 /* Functions called by bpf_local_storage maps */ 120 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 121 void *owner, u32 size); 122 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 123 void *owner, u32 size); 124 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 125 126 /* Misc helpers.*/ 127 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); 128 129 /* map_meta_equal must be implemented for maps that can be 130 * used as an inner map. It is a runtime check to ensure 131 * an inner map can be inserted to an outer map. 132 * 133 * Some properties of the inner map has been used during the 134 * verification time. When inserting an inner map at the runtime, 135 * map_meta_equal has to ensure the inserting map has the same 136 * properties that the verifier has used earlier. 137 */ 138 bool (*map_meta_equal)(const struct bpf_map *meta0, 139 const struct bpf_map *meta1); 140 141 142 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 143 struct bpf_func_state *caller, 144 struct bpf_func_state *callee); 145 int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, 146 void *callback_ctx, u64 flags); 147 148 /* BTF name and id of struct allocated by map_alloc */ 149 const char * const map_btf_name; 150 int *map_btf_id; 151 152 /* bpf_iter info used to open a seq_file */ 153 const struct bpf_iter_seq_info *iter_seq_info; 154 }; 155 156 struct bpf_map { 157 /* The first two cachelines with read-mostly members of which some 158 * are also accessed in fast-path (e.g. ops, max_entries). 159 */ 160 const struct bpf_map_ops *ops ____cacheline_aligned; 161 struct bpf_map *inner_map_meta; 162 #ifdef CONFIG_SECURITY 163 void *security; 164 #endif 165 enum bpf_map_type map_type; 166 u32 key_size; 167 u32 value_size; 168 u32 max_entries; 169 u32 map_flags; 170 int spin_lock_off; /* >=0 valid offset, <0 error */ 171 int timer_off; /* >=0 valid offset, <0 error */ 172 u32 id; 173 int numa_node; 174 u32 btf_key_type_id; 175 u32 btf_value_type_id; 176 struct btf *btf; 177 #ifdef CONFIG_MEMCG_KMEM 178 struct mem_cgroup *memcg; 179 #endif 180 char name[BPF_OBJ_NAME_LEN]; 181 u32 btf_vmlinux_value_type_id; 182 bool bypass_spec_v1; 183 bool frozen; /* write-once; write-protected by freeze_mutex */ 184 /* 22 bytes hole */ 185 186 /* The 3rd and 4th cacheline with misc members to avoid false sharing 187 * particularly with refcounting. 188 */ 189 atomic64_t refcnt ____cacheline_aligned; 190 atomic64_t usercnt; 191 struct work_struct work; 192 struct mutex freeze_mutex; 193 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 194 }; 195 196 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 197 { 198 return map->spin_lock_off >= 0; 199 } 200 201 static inline bool map_value_has_timer(const struct bpf_map *map) 202 { 203 return map->timer_off >= 0; 204 } 205 206 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 207 { 208 if (unlikely(map_value_has_spin_lock(map))) 209 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 210 (struct bpf_spin_lock){}; 211 if (unlikely(map_value_has_timer(map))) 212 *(struct bpf_timer *)(dst + map->timer_off) = 213 (struct bpf_timer){}; 214 } 215 216 /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ 217 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 218 { 219 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; 220 221 if (unlikely(map_value_has_spin_lock(map))) { 222 s_off = map->spin_lock_off; 223 s_sz = sizeof(struct bpf_spin_lock); 224 } else if (unlikely(map_value_has_timer(map))) { 225 t_off = map->timer_off; 226 t_sz = sizeof(struct bpf_timer); 227 } 228 229 if (unlikely(s_sz || t_sz)) { 230 if (s_off < t_off || !s_sz) { 231 swap(s_off, t_off); 232 swap(s_sz, t_sz); 233 } 234 memcpy(dst, src, t_off); 235 memcpy(dst + t_off + t_sz, 236 src + t_off + t_sz, 237 s_off - t_off - t_sz); 238 memcpy(dst + s_off + s_sz, 239 src + s_off + s_sz, 240 map->value_size - s_off - s_sz); 241 } else { 242 memcpy(dst, src, map->value_size); 243 } 244 } 245 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 246 bool lock_src); 247 void bpf_timer_cancel_and_free(void *timer); 248 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 249 250 struct bpf_offload_dev; 251 struct bpf_offloaded_map; 252 253 struct bpf_map_dev_ops { 254 int (*map_get_next_key)(struct bpf_offloaded_map *map, 255 void *key, void *next_key); 256 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 257 void *key, void *value); 258 int (*map_update_elem)(struct bpf_offloaded_map *map, 259 void *key, void *value, u64 flags); 260 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 261 }; 262 263 struct bpf_offloaded_map { 264 struct bpf_map map; 265 struct net_device *netdev; 266 const struct bpf_map_dev_ops *dev_ops; 267 void *dev_priv; 268 struct list_head offloads; 269 }; 270 271 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 272 { 273 return container_of(map, struct bpf_offloaded_map, map); 274 } 275 276 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 277 { 278 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 279 } 280 281 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 282 { 283 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 284 map->ops->map_seq_show_elem; 285 } 286 287 int map_check_no_btf(const struct bpf_map *map, 288 const struct btf *btf, 289 const struct btf_type *key_type, 290 const struct btf_type *value_type); 291 292 bool bpf_map_meta_equal(const struct bpf_map *meta0, 293 const struct bpf_map *meta1); 294 295 extern const struct bpf_map_ops bpf_map_offload_ops; 296 297 /* function argument constraints */ 298 enum bpf_arg_type { 299 ARG_DONTCARE = 0, /* unused argument in helper function */ 300 301 /* the following constraints used to prototype 302 * bpf_map_lookup/update/delete_elem() functions 303 */ 304 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 305 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 306 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 307 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 308 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 309 310 /* the following constraints used to prototype bpf_memcmp() and other 311 * functions that access data on eBPF program stack 312 */ 313 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 314 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 315 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 316 * helper function must fill all bytes or clear 317 * them in error case. 318 */ 319 320 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 321 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 322 323 ARG_PTR_TO_CTX, /* pointer to context */ 324 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ 325 ARG_ANYTHING, /* any (initialized) argument is ok */ 326 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 327 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 328 ARG_PTR_TO_INT, /* pointer to int */ 329 ARG_PTR_TO_LONG, /* pointer to long */ 330 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 331 ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ 332 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 333 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 334 ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ 335 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 336 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 337 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 338 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 339 ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ 340 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 341 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 342 __BPF_ARG_TYPE_MAX, 343 }; 344 345 /* type of values returned from helper functions */ 346 enum bpf_return_type { 347 RET_INTEGER, /* function returns integer */ 348 RET_VOID, /* function doesn't return anything */ 349 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 350 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 351 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 352 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 353 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 354 RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ 355 RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ 356 RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ 357 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 358 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 359 }; 360 361 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 362 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 363 * instructions after verifying 364 */ 365 struct bpf_func_proto { 366 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 367 bool gpl_only; 368 bool pkt_access; 369 enum bpf_return_type ret_type; 370 union { 371 struct { 372 enum bpf_arg_type arg1_type; 373 enum bpf_arg_type arg2_type; 374 enum bpf_arg_type arg3_type; 375 enum bpf_arg_type arg4_type; 376 enum bpf_arg_type arg5_type; 377 }; 378 enum bpf_arg_type arg_type[5]; 379 }; 380 union { 381 struct { 382 u32 *arg1_btf_id; 383 u32 *arg2_btf_id; 384 u32 *arg3_btf_id; 385 u32 *arg4_btf_id; 386 u32 *arg5_btf_id; 387 }; 388 u32 *arg_btf_id[5]; 389 }; 390 int *ret_btf_id; /* return value btf_id */ 391 bool (*allowed)(const struct bpf_prog *prog); 392 }; 393 394 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 395 * the first argument to eBPF programs. 396 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 397 */ 398 struct bpf_context; 399 400 enum bpf_access_type { 401 BPF_READ = 1, 402 BPF_WRITE = 2 403 }; 404 405 /* types of values stored in eBPF registers */ 406 /* Pointer types represent: 407 * pointer 408 * pointer + imm 409 * pointer + (u16) var 410 * pointer + (u16) var + imm 411 * if (range > 0) then [ptr, ptr + range - off) is safe to access 412 * if (id > 0) means that some 'var' was added 413 * if (off > 0) means that 'imm' was added 414 */ 415 enum bpf_reg_type { 416 NOT_INIT = 0, /* nothing was written into register */ 417 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 418 PTR_TO_CTX, /* reg points to bpf_context */ 419 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 420 PTR_TO_MAP_VALUE, /* reg points to map element value */ 421 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 422 PTR_TO_STACK, /* reg == frame_pointer + offset */ 423 PTR_TO_PACKET_META, /* skb->data - meta_len */ 424 PTR_TO_PACKET, /* reg points to skb->data */ 425 PTR_TO_PACKET_END, /* skb->data + headlen */ 426 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 427 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 428 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 429 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 430 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 431 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 432 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 433 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 434 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 435 /* PTR_TO_BTF_ID points to a kernel struct that does not need 436 * to be null checked by the BPF program. This does not imply the 437 * pointer is _not_ null and in practice this can easily be a null 438 * pointer when reading pointer chains. The assumption is program 439 * context will handle null pointer dereference typically via fault 440 * handling. The verifier must keep this in mind and can make no 441 * assumptions about null or non-null when doing branch analysis. 442 * Further, when passed into helpers the helpers can not, without 443 * additional context, assume the value is non-null. 444 */ 445 PTR_TO_BTF_ID, 446 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 447 * been checked for null. Used primarily to inform the verifier 448 * an explicit null check is required for this struct. 449 */ 450 PTR_TO_BTF_ID_OR_NULL, 451 PTR_TO_MEM, /* reg points to valid memory region */ 452 PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ 453 PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ 454 PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ 455 PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ 456 PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ 457 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ 458 PTR_TO_FUNC, /* reg points to a bpf program function */ 459 PTR_TO_MAP_KEY, /* reg points to a map element key */ 460 __BPF_REG_TYPE_MAX, 461 }; 462 463 /* The information passed from prog-specific *_is_valid_access 464 * back to the verifier. 465 */ 466 struct bpf_insn_access_aux { 467 enum bpf_reg_type reg_type; 468 union { 469 int ctx_field_size; 470 struct { 471 struct btf *btf; 472 u32 btf_id; 473 }; 474 }; 475 struct bpf_verifier_log *log; /* for verbose logs */ 476 }; 477 478 static inline void 479 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 480 { 481 aux->ctx_field_size = size; 482 } 483 484 struct bpf_prog_ops { 485 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 486 union bpf_attr __user *uattr); 487 }; 488 489 struct bpf_verifier_ops { 490 /* return eBPF function prototype for verification */ 491 const struct bpf_func_proto * 492 (*get_func_proto)(enum bpf_func_id func_id, 493 const struct bpf_prog *prog); 494 495 /* return true if 'size' wide access at offset 'off' within bpf_context 496 * with 'type' (read or write) is allowed 497 */ 498 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 499 const struct bpf_prog *prog, 500 struct bpf_insn_access_aux *info); 501 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 502 const struct bpf_prog *prog); 503 int (*gen_ld_abs)(const struct bpf_insn *orig, 504 struct bpf_insn *insn_buf); 505 u32 (*convert_ctx_access)(enum bpf_access_type type, 506 const struct bpf_insn *src, 507 struct bpf_insn *dst, 508 struct bpf_prog *prog, u32 *target_size); 509 int (*btf_struct_access)(struct bpf_verifier_log *log, 510 const struct btf *btf, 511 const struct btf_type *t, int off, int size, 512 enum bpf_access_type atype, 513 u32 *next_btf_id); 514 bool (*check_kfunc_call)(u32 kfunc_btf_id); 515 }; 516 517 struct bpf_prog_offload_ops { 518 /* verifier basic callbacks */ 519 int (*insn_hook)(struct bpf_verifier_env *env, 520 int insn_idx, int prev_insn_idx); 521 int (*finalize)(struct bpf_verifier_env *env); 522 /* verifier optimization callbacks (called after .finalize) */ 523 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 524 struct bpf_insn *insn); 525 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 526 /* program management callbacks */ 527 int (*prepare)(struct bpf_prog *prog); 528 int (*translate)(struct bpf_prog *prog); 529 void (*destroy)(struct bpf_prog *prog); 530 }; 531 532 struct bpf_prog_offload { 533 struct bpf_prog *prog; 534 struct net_device *netdev; 535 struct bpf_offload_dev *offdev; 536 void *dev_priv; 537 struct list_head offloads; 538 bool dev_state; 539 bool opt_failed; 540 void *jited_image; 541 u32 jited_len; 542 }; 543 544 enum bpf_cgroup_storage_type { 545 BPF_CGROUP_STORAGE_SHARED, 546 BPF_CGROUP_STORAGE_PERCPU, 547 __BPF_CGROUP_STORAGE_MAX 548 }; 549 550 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 551 552 /* The longest tracepoint has 12 args. 553 * See include/trace/bpf_probe.h 554 */ 555 #define MAX_BPF_FUNC_ARGS 12 556 557 /* The maximum number of arguments passed through registers 558 * a single function may have. 559 */ 560 #define MAX_BPF_FUNC_REG_ARGS 5 561 562 struct btf_func_model { 563 u8 ret_size; 564 u8 nr_args; 565 u8 arg_size[MAX_BPF_FUNC_ARGS]; 566 }; 567 568 /* Restore arguments before returning from trampoline to let original function 569 * continue executing. This flag is used for fentry progs when there are no 570 * fexit progs. 571 */ 572 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 573 /* Call original function after fentry progs, but before fexit progs. 574 * Makes sense for fentry/fexit, normal calls and indirect calls. 575 */ 576 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 577 /* Skip current frame and return to parent. Makes sense for fentry/fexit 578 * programs only. Should not be used with normal calls and indirect calls. 579 */ 580 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 581 582 /* Store IP address of the caller on the trampoline stack, 583 * so it's available for trampoline's programs. 584 */ 585 #define BPF_TRAMP_F_IP_ARG BIT(3) 586 587 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 588 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 589 */ 590 #define BPF_MAX_TRAMP_PROGS 38 591 592 struct bpf_tramp_progs { 593 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 594 int nr_progs; 595 }; 596 597 /* Different use cases for BPF trampoline: 598 * 1. replace nop at the function entry (kprobe equivalent) 599 * flags = BPF_TRAMP_F_RESTORE_REGS 600 * fentry = a set of programs to run before returning from trampoline 601 * 602 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 603 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 604 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 605 * fentry = a set of program to run before calling original function 606 * fexit = a set of program to run after original function 607 * 608 * 3. replace direct call instruction anywhere in the function body 609 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 610 * With flags = 0 611 * fentry = a set of programs to run before returning from trampoline 612 * With flags = BPF_TRAMP_F_CALL_ORIG 613 * orig_call = original callback addr or direct function addr 614 * fentry = a set of program to run before calling original function 615 * fexit = a set of program to run after original function 616 */ 617 struct bpf_tramp_image; 618 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 619 const struct btf_func_model *m, u32 flags, 620 struct bpf_tramp_progs *tprogs, 621 void *orig_call); 622 /* these two functions are called from generated trampoline */ 623 u64 notrace __bpf_prog_enter(struct bpf_prog *prog); 624 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 625 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); 626 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); 627 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 628 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 629 630 struct bpf_ksym { 631 unsigned long start; 632 unsigned long end; 633 char name[KSYM_NAME_LEN]; 634 struct list_head lnode; 635 struct latch_tree_node tnode; 636 bool prog; 637 }; 638 639 enum bpf_tramp_prog_type { 640 BPF_TRAMP_FENTRY, 641 BPF_TRAMP_FEXIT, 642 BPF_TRAMP_MODIFY_RETURN, 643 BPF_TRAMP_MAX, 644 BPF_TRAMP_REPLACE, /* more than MAX */ 645 }; 646 647 struct bpf_tramp_image { 648 void *image; 649 struct bpf_ksym ksym; 650 struct percpu_ref pcref; 651 void *ip_after_call; 652 void *ip_epilogue; 653 union { 654 struct rcu_head rcu; 655 struct work_struct work; 656 }; 657 }; 658 659 struct bpf_trampoline { 660 /* hlist for trampoline_table */ 661 struct hlist_node hlist; 662 /* serializes access to fields of this trampoline */ 663 struct mutex mutex; 664 refcount_t refcnt; 665 u64 key; 666 struct { 667 struct btf_func_model model; 668 void *addr; 669 bool ftrace_managed; 670 } func; 671 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 672 * program by replacing one of its functions. func.addr is the address 673 * of the function it replaced. 674 */ 675 struct bpf_prog *extension_prog; 676 /* list of BPF programs using this trampoline */ 677 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 678 /* Number of attached programs. A counter per kind. */ 679 int progs_cnt[BPF_TRAMP_MAX]; 680 /* Executable image of trampoline */ 681 struct bpf_tramp_image *cur_image; 682 u64 selector; 683 struct module *mod; 684 }; 685 686 struct bpf_attach_target_info { 687 struct btf_func_model fmodel; 688 long tgt_addr; 689 const char *tgt_name; 690 const struct btf_type *tgt_type; 691 }; 692 693 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 694 695 struct bpf_dispatcher_prog { 696 struct bpf_prog *prog; 697 refcount_t users; 698 }; 699 700 struct bpf_dispatcher { 701 /* dispatcher mutex */ 702 struct mutex mutex; 703 void *func; 704 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 705 int num_progs; 706 void *image; 707 u32 image_off; 708 struct bpf_ksym ksym; 709 }; 710 711 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 712 const void *ctx, 713 const struct bpf_insn *insnsi, 714 unsigned int (*bpf_func)(const void *, 715 const struct bpf_insn *)) 716 { 717 return bpf_func(ctx, insnsi); 718 } 719 #ifdef CONFIG_BPF_JIT 720 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 721 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 722 struct bpf_trampoline *bpf_trampoline_get(u64 key, 723 struct bpf_attach_target_info *tgt_info); 724 void bpf_trampoline_put(struct bpf_trampoline *tr); 725 #define BPF_DISPATCHER_INIT(_name) { \ 726 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 727 .func = &_name##_func, \ 728 .progs = {}, \ 729 .num_progs = 0, \ 730 .image = NULL, \ 731 .image_off = 0, \ 732 .ksym = { \ 733 .name = #_name, \ 734 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 735 }, \ 736 } 737 738 #define DEFINE_BPF_DISPATCHER(name) \ 739 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 740 const void *ctx, \ 741 const struct bpf_insn *insnsi, \ 742 unsigned int (*bpf_func)(const void *, \ 743 const struct bpf_insn *)) \ 744 { \ 745 return bpf_func(ctx, insnsi); \ 746 } \ 747 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 748 struct bpf_dispatcher bpf_dispatcher_##name = \ 749 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 750 #define DECLARE_BPF_DISPATCHER(name) \ 751 unsigned int bpf_dispatcher_##name##_func( \ 752 const void *ctx, \ 753 const struct bpf_insn *insnsi, \ 754 unsigned int (*bpf_func)(const void *, \ 755 const struct bpf_insn *)); \ 756 extern struct bpf_dispatcher bpf_dispatcher_##name; 757 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 758 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 759 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 760 struct bpf_prog *to); 761 /* Called only from JIT-enabled code, so there's no need for stubs. */ 762 void *bpf_jit_alloc_exec_page(void); 763 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 764 void bpf_image_ksym_del(struct bpf_ksym *ksym); 765 void bpf_ksym_add(struct bpf_ksym *ksym); 766 void bpf_ksym_del(struct bpf_ksym *ksym); 767 int bpf_jit_charge_modmem(u32 pages); 768 void bpf_jit_uncharge_modmem(u32 pages); 769 #else 770 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, 771 struct bpf_trampoline *tr) 772 { 773 return -ENOTSUPP; 774 } 775 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, 776 struct bpf_trampoline *tr) 777 { 778 return -ENOTSUPP; 779 } 780 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 781 struct bpf_attach_target_info *tgt_info) 782 { 783 return ERR_PTR(-EOPNOTSUPP); 784 } 785 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 786 #define DEFINE_BPF_DISPATCHER(name) 787 #define DECLARE_BPF_DISPATCHER(name) 788 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 789 #define BPF_DISPATCHER_PTR(name) NULL 790 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 791 struct bpf_prog *from, 792 struct bpf_prog *to) {} 793 static inline bool is_bpf_image_address(unsigned long address) 794 { 795 return false; 796 } 797 #endif 798 799 struct bpf_func_info_aux { 800 u16 linkage; 801 bool unreliable; 802 }; 803 804 enum bpf_jit_poke_reason { 805 BPF_POKE_REASON_TAIL_CALL, 806 }; 807 808 /* Descriptor of pokes pointing /into/ the JITed image. */ 809 struct bpf_jit_poke_descriptor { 810 void *tailcall_target; 811 void *tailcall_bypass; 812 void *bypass_addr; 813 void *aux; 814 union { 815 struct { 816 struct bpf_map *map; 817 u32 key; 818 } tail_call; 819 }; 820 bool tailcall_target_stable; 821 u8 adj_off; 822 u16 reason; 823 u32 insn_idx; 824 }; 825 826 /* reg_type info for ctx arguments */ 827 struct bpf_ctx_arg_aux { 828 u32 offset; 829 enum bpf_reg_type reg_type; 830 u32 btf_id; 831 }; 832 833 struct btf_mod_pair { 834 struct btf *btf; 835 struct module *module; 836 }; 837 838 struct bpf_kfunc_desc_tab; 839 840 struct bpf_prog_aux { 841 atomic64_t refcnt; 842 u32 used_map_cnt; 843 u32 used_btf_cnt; 844 u32 max_ctx_offset; 845 u32 max_pkt_offset; 846 u32 max_tp_access; 847 u32 stack_depth; 848 u32 id; 849 u32 func_cnt; /* used by non-func prog as the number of func progs */ 850 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 851 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 852 u32 ctx_arg_info_size; 853 u32 max_rdonly_access; 854 u32 max_rdwr_access; 855 struct btf *attach_btf; 856 const struct bpf_ctx_arg_aux *ctx_arg_info; 857 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 858 struct bpf_prog *dst_prog; 859 struct bpf_trampoline *dst_trampoline; 860 enum bpf_prog_type saved_dst_prog_type; 861 enum bpf_attach_type saved_dst_attach_type; 862 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 863 bool offload_requested; 864 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 865 bool func_proto_unreliable; 866 bool sleepable; 867 bool tail_call_reachable; 868 struct hlist_node tramp_hlist; 869 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 870 const struct btf_type *attach_func_proto; 871 /* function name for valid attach_btf_id */ 872 const char *attach_func_name; 873 struct bpf_prog **func; 874 void *jit_data; /* JIT specific data. arch dependent */ 875 struct bpf_jit_poke_descriptor *poke_tab; 876 struct bpf_kfunc_desc_tab *kfunc_tab; 877 u32 size_poke_tab; 878 struct bpf_ksym ksym; 879 const struct bpf_prog_ops *ops; 880 struct bpf_map **used_maps; 881 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 882 struct btf_mod_pair *used_btfs; 883 struct bpf_prog *prog; 884 struct user_struct *user; 885 u64 load_time; /* ns since boottime */ 886 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 887 char name[BPF_OBJ_NAME_LEN]; 888 #ifdef CONFIG_SECURITY 889 void *security; 890 #endif 891 struct bpf_prog_offload *offload; 892 struct btf *btf; 893 struct bpf_func_info *func_info; 894 struct bpf_func_info_aux *func_info_aux; 895 /* bpf_line_info loaded from userspace. linfo->insn_off 896 * has the xlated insn offset. 897 * Both the main and sub prog share the same linfo. 898 * The subprog can access its first linfo by 899 * using the linfo_idx. 900 */ 901 struct bpf_line_info *linfo; 902 /* jited_linfo is the jited addr of the linfo. It has a 903 * one to one mapping to linfo: 904 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 905 * Both the main and sub prog share the same jited_linfo. 906 * The subprog can access its first jited_linfo by 907 * using the linfo_idx. 908 */ 909 void **jited_linfo; 910 u32 func_info_cnt; 911 u32 nr_linfo; 912 /* subprog can use linfo_idx to access its first linfo and 913 * jited_linfo. 914 * main prog always has linfo_idx == 0 915 */ 916 u32 linfo_idx; 917 u32 num_exentries; 918 struct exception_table_entry *extable; 919 union { 920 struct work_struct work; 921 struct rcu_head rcu; 922 }; 923 }; 924 925 struct bpf_array_aux { 926 /* 'Ownership' of prog array is claimed by the first program that 927 * is going to use this map or by the first program which FD is 928 * stored in the map to make sure that all callers and callees have 929 * the same prog type and JITed flag. 930 */ 931 enum bpf_prog_type type; 932 bool jited; 933 /* Programs with direct jumps into programs part of this array. */ 934 struct list_head poke_progs; 935 struct bpf_map *map; 936 struct mutex poke_mutex; 937 struct work_struct work; 938 }; 939 940 struct bpf_link { 941 atomic64_t refcnt; 942 u32 id; 943 enum bpf_link_type type; 944 const struct bpf_link_ops *ops; 945 struct bpf_prog *prog; 946 struct work_struct work; 947 }; 948 949 struct bpf_link_ops { 950 void (*release)(struct bpf_link *link); 951 void (*dealloc)(struct bpf_link *link); 952 int (*detach)(struct bpf_link *link); 953 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 954 struct bpf_prog *old_prog); 955 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 956 int (*fill_link_info)(const struct bpf_link *link, 957 struct bpf_link_info *info); 958 }; 959 960 struct bpf_link_primer { 961 struct bpf_link *link; 962 struct file *file; 963 int fd; 964 u32 id; 965 }; 966 967 struct bpf_struct_ops_value; 968 struct btf_member; 969 970 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 971 struct bpf_struct_ops { 972 const struct bpf_verifier_ops *verifier_ops; 973 int (*init)(struct btf *btf); 974 int (*check_member)(const struct btf_type *t, 975 const struct btf_member *member); 976 int (*init_member)(const struct btf_type *t, 977 const struct btf_member *member, 978 void *kdata, const void *udata); 979 int (*reg)(void *kdata); 980 void (*unreg)(void *kdata); 981 const struct btf_type *type; 982 const struct btf_type *value_type; 983 const char *name; 984 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 985 u32 type_id; 986 u32 value_id; 987 }; 988 989 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 990 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 991 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 992 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 993 bool bpf_struct_ops_get(const void *kdata); 994 void bpf_struct_ops_put(const void *kdata); 995 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 996 void *value); 997 static inline bool bpf_try_module_get(const void *data, struct module *owner) 998 { 999 if (owner == BPF_MODULE_OWNER) 1000 return bpf_struct_ops_get(data); 1001 else 1002 return try_module_get(owner); 1003 } 1004 static inline void bpf_module_put(const void *data, struct module *owner) 1005 { 1006 if (owner == BPF_MODULE_OWNER) 1007 bpf_struct_ops_put(data); 1008 else 1009 module_put(owner); 1010 } 1011 #else 1012 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1013 { 1014 return NULL; 1015 } 1016 static inline void bpf_struct_ops_init(struct btf *btf, 1017 struct bpf_verifier_log *log) 1018 { 1019 } 1020 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1021 { 1022 return try_module_get(owner); 1023 } 1024 static inline void bpf_module_put(const void *data, struct module *owner) 1025 { 1026 module_put(owner); 1027 } 1028 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1029 void *key, 1030 void *value) 1031 { 1032 return -EINVAL; 1033 } 1034 #endif 1035 1036 struct bpf_array { 1037 struct bpf_map map; 1038 u32 elem_size; 1039 u32 index_mask; 1040 struct bpf_array_aux *aux; 1041 union { 1042 char value[0] __aligned(8); 1043 void *ptrs[0] __aligned(8); 1044 void __percpu *pptrs[0] __aligned(8); 1045 }; 1046 }; 1047 1048 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1049 #define MAX_TAIL_CALL_CNT 32 1050 1051 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1052 BPF_F_RDONLY_PROG | \ 1053 BPF_F_WRONLY | \ 1054 BPF_F_WRONLY_PROG) 1055 1056 #define BPF_MAP_CAN_READ BIT(0) 1057 #define BPF_MAP_CAN_WRITE BIT(1) 1058 1059 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1060 { 1061 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1062 1063 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1064 * not possible. 1065 */ 1066 if (access_flags & BPF_F_RDONLY_PROG) 1067 return BPF_MAP_CAN_READ; 1068 else if (access_flags & BPF_F_WRONLY_PROG) 1069 return BPF_MAP_CAN_WRITE; 1070 else 1071 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1072 } 1073 1074 static inline bool bpf_map_flags_access_ok(u32 access_flags) 1075 { 1076 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1077 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1078 } 1079 1080 struct bpf_event_entry { 1081 struct perf_event *event; 1082 struct file *perf_file; 1083 struct file *map_file; 1084 struct rcu_head rcu; 1085 }; 1086 1087 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 1088 int bpf_prog_calc_tag(struct bpf_prog *fp); 1089 1090 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1091 1092 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1093 unsigned long off, unsigned long len); 1094 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1095 const struct bpf_insn *src, 1096 struct bpf_insn *dst, 1097 struct bpf_prog *prog, 1098 u32 *target_size); 1099 1100 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1101 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1102 1103 /* an array of programs to be executed under rcu_lock. 1104 * 1105 * Typical usage: 1106 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 1107 * 1108 * the structure returned by bpf_prog_array_alloc() should be populated 1109 * with program pointers and the last pointer must be NULL. 1110 * The user has to keep refcnt on the program and make sure the program 1111 * is removed from the array before bpf_prog_put(). 1112 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1113 * since other cpus are walking the array of pointers in parallel. 1114 */ 1115 struct bpf_prog_array_item { 1116 struct bpf_prog *prog; 1117 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1118 }; 1119 1120 struct bpf_prog_array { 1121 struct rcu_head rcu; 1122 struct bpf_prog_array_item items[]; 1123 }; 1124 1125 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1126 void bpf_prog_array_free(struct bpf_prog_array *progs); 1127 int bpf_prog_array_length(struct bpf_prog_array *progs); 1128 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1129 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1130 __u32 __user *prog_ids, u32 cnt); 1131 1132 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1133 struct bpf_prog *old_prog); 1134 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1135 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1136 struct bpf_prog *prog); 1137 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1138 u32 *prog_ids, u32 request_cnt, 1139 u32 *prog_cnt); 1140 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1141 struct bpf_prog *exclude_prog, 1142 struct bpf_prog *include_prog, 1143 struct bpf_prog_array **new_array); 1144 1145 struct bpf_run_ctx {}; 1146 1147 struct bpf_cg_run_ctx { 1148 struct bpf_run_ctx run_ctx; 1149 struct bpf_prog_array_item *prog_item; 1150 }; 1151 1152 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1153 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1154 /* BPF program asks to set CN on the packet. */ 1155 #define BPF_RET_SET_CN (1 << 0) 1156 1157 #define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ 1158 ({ \ 1159 struct bpf_prog_array_item *_item; \ 1160 struct bpf_prog *_prog; \ 1161 struct bpf_prog_array *_array; \ 1162 struct bpf_run_ctx *old_run_ctx; \ 1163 struct bpf_cg_run_ctx run_ctx; \ 1164 u32 _ret = 1; \ 1165 u32 func_ret; \ 1166 migrate_disable(); \ 1167 rcu_read_lock(); \ 1168 _array = rcu_dereference(array); \ 1169 _item = &_array->items[0]; \ 1170 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); \ 1171 while ((_prog = READ_ONCE(_item->prog))) { \ 1172 run_ctx.prog_item = _item; \ 1173 func_ret = func(_prog, ctx); \ 1174 _ret &= (func_ret & 1); \ 1175 *(ret_flags) |= (func_ret >> 1); \ 1176 _item++; \ 1177 } \ 1178 bpf_reset_run_ctx(old_run_ctx); \ 1179 rcu_read_unlock(); \ 1180 migrate_enable(); \ 1181 _ret; \ 1182 }) 1183 1184 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ 1185 ({ \ 1186 struct bpf_prog_array_item *_item; \ 1187 struct bpf_prog *_prog; \ 1188 struct bpf_prog_array *_array; \ 1189 struct bpf_run_ctx *old_run_ctx; \ 1190 struct bpf_cg_run_ctx run_ctx; \ 1191 u32 _ret = 1; \ 1192 migrate_disable(); \ 1193 rcu_read_lock(); \ 1194 _array = rcu_dereference(array); \ 1195 if (unlikely(check_non_null && !_array))\ 1196 goto _out; \ 1197 _item = &_array->items[0]; \ 1198 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);\ 1199 while ((_prog = READ_ONCE(_item->prog))) { \ 1200 run_ctx.prog_item = _item; \ 1201 _ret &= func(_prog, ctx); \ 1202 _item++; \ 1203 } \ 1204 bpf_reset_run_ctx(old_run_ctx); \ 1205 _out: \ 1206 rcu_read_unlock(); \ 1207 migrate_enable(); \ 1208 _ret; \ 1209 }) 1210 1211 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1212 * so BPF programs can request cwr for TCP packets. 1213 * 1214 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1215 * packet. This macro changes the behavior so the low order bit 1216 * indicates whether the packet should be dropped (0) or not (1) 1217 * and the next bit is a congestion notification bit. This could be 1218 * used by TCP to call tcp_enter_cwr() 1219 * 1220 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1221 * 0: drop packet 1222 * 1: keep packet 1223 * 2: drop packet and cn 1224 * 3: keep packet and cn 1225 * 1226 * This macro then converts it to one of the NET_XMIT or an error 1227 * code that is then interpreted as drop packet (and no cn): 1228 * 0: NET_XMIT_SUCCESS skb should be transmitted 1229 * 1: NET_XMIT_DROP skb should be dropped and cn 1230 * 2: NET_XMIT_CN skb should be transmitted and cn 1231 * 3: -EPERM skb should be dropped 1232 */ 1233 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1234 ({ \ 1235 u32 _flags = 0; \ 1236 bool _cn; \ 1237 u32 _ret; \ 1238 _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \ 1239 _cn = _flags & BPF_RET_SET_CN; \ 1240 if (_ret) \ 1241 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1242 else \ 1243 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1244 _ret; \ 1245 }) 1246 1247 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 1248 __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) 1249 1250 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 1251 __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) 1252 1253 #ifdef CONFIG_BPF_SYSCALL 1254 DECLARE_PER_CPU(int, bpf_prog_active); 1255 extern struct mutex bpf_stats_enabled_mutex; 1256 1257 /* 1258 * Block execution of BPF programs attached to instrumentation (perf, 1259 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1260 * these events can happen inside a region which holds a map bucket lock 1261 * and can deadlock on it. 1262 * 1263 * Use the preemption safe inc/dec variants on RT because migrate disable 1264 * is preemptible on RT and preemption in the middle of the RMW operation 1265 * might lead to inconsistent state. Use the raw variants for non RT 1266 * kernels as migrate_disable() maps to preempt_disable() so the slightly 1267 * more expensive save operation can be avoided. 1268 */ 1269 static inline void bpf_disable_instrumentation(void) 1270 { 1271 migrate_disable(); 1272 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1273 this_cpu_inc(bpf_prog_active); 1274 else 1275 __this_cpu_inc(bpf_prog_active); 1276 } 1277 1278 static inline void bpf_enable_instrumentation(void) 1279 { 1280 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1281 this_cpu_dec(bpf_prog_active); 1282 else 1283 __this_cpu_dec(bpf_prog_active); 1284 migrate_enable(); 1285 } 1286 1287 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1288 { 1289 struct bpf_run_ctx *old_ctx; 1290 1291 old_ctx = current->bpf_ctx; 1292 current->bpf_ctx = new_ctx; 1293 return old_ctx; 1294 } 1295 1296 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1297 { 1298 current->bpf_ctx = old_ctx; 1299 } 1300 1301 extern const struct file_operations bpf_map_fops; 1302 extern const struct file_operations bpf_prog_fops; 1303 extern const struct file_operations bpf_iter_fops; 1304 1305 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1306 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1307 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1308 #define BPF_MAP_TYPE(_id, _ops) \ 1309 extern const struct bpf_map_ops _ops; 1310 #define BPF_LINK_TYPE(_id, _name) 1311 #include <linux/bpf_types.h> 1312 #undef BPF_PROG_TYPE 1313 #undef BPF_MAP_TYPE 1314 #undef BPF_LINK_TYPE 1315 1316 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1317 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1318 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1319 1320 struct bpf_prog *bpf_prog_get(u32 ufd); 1321 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1322 bool attach_drv); 1323 void bpf_prog_add(struct bpf_prog *prog, int i); 1324 void bpf_prog_sub(struct bpf_prog *prog, int i); 1325 void bpf_prog_inc(struct bpf_prog *prog); 1326 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1327 void bpf_prog_put(struct bpf_prog *prog); 1328 1329 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1330 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1331 1332 struct bpf_map *bpf_map_get(u32 ufd); 1333 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1334 struct bpf_map *__bpf_map_get(struct fd f); 1335 void bpf_map_inc(struct bpf_map *map); 1336 void bpf_map_inc_with_uref(struct bpf_map *map); 1337 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1338 void bpf_map_put_with_uref(struct bpf_map *map); 1339 void bpf_map_put(struct bpf_map *map); 1340 void *bpf_map_area_alloc(u64 size, int numa_node); 1341 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1342 void bpf_map_area_free(void *base); 1343 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1344 int generic_map_lookup_batch(struct bpf_map *map, 1345 const union bpf_attr *attr, 1346 union bpf_attr __user *uattr); 1347 int generic_map_update_batch(struct bpf_map *map, 1348 const union bpf_attr *attr, 1349 union bpf_attr __user *uattr); 1350 int generic_map_delete_batch(struct bpf_map *map, 1351 const union bpf_attr *attr, 1352 union bpf_attr __user *uattr); 1353 struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1354 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1355 1356 #ifdef CONFIG_MEMCG_KMEM 1357 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1358 int node); 1359 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1360 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1361 size_t align, gfp_t flags); 1362 #else 1363 static inline void * 1364 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1365 int node) 1366 { 1367 return kmalloc_node(size, flags, node); 1368 } 1369 1370 static inline void * 1371 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1372 { 1373 return kzalloc(size, flags); 1374 } 1375 1376 static inline void __percpu * 1377 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1378 gfp_t flags) 1379 { 1380 return __alloc_percpu_gfp(size, align, flags); 1381 } 1382 #endif 1383 1384 extern int sysctl_unprivileged_bpf_disabled; 1385 1386 static inline bool bpf_allow_ptr_leaks(void) 1387 { 1388 return perfmon_capable(); 1389 } 1390 1391 static inline bool bpf_allow_uninit_stack(void) 1392 { 1393 return perfmon_capable(); 1394 } 1395 1396 static inline bool bpf_allow_ptr_to_map_access(void) 1397 { 1398 return perfmon_capable(); 1399 } 1400 1401 static inline bool bpf_bypass_spec_v1(void) 1402 { 1403 return perfmon_capable(); 1404 } 1405 1406 static inline bool bpf_bypass_spec_v4(void) 1407 { 1408 return perfmon_capable(); 1409 } 1410 1411 int bpf_map_new_fd(struct bpf_map *map, int flags); 1412 int bpf_prog_new_fd(struct bpf_prog *prog); 1413 1414 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1415 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1416 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1417 int bpf_link_settle(struct bpf_link_primer *primer); 1418 void bpf_link_cleanup(struct bpf_link_primer *primer); 1419 void bpf_link_inc(struct bpf_link *link); 1420 void bpf_link_put(struct bpf_link *link); 1421 int bpf_link_new_fd(struct bpf_link *link); 1422 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1423 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1424 1425 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1426 int bpf_obj_get_user(const char __user *pathname, int flags); 1427 1428 #define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1429 #define DEFINE_BPF_ITER_FUNC(target, args...) \ 1430 extern int bpf_iter_ ## target(args); \ 1431 int __init bpf_iter_ ## target(args) { return 0; } 1432 1433 struct bpf_iter_aux_info { 1434 struct bpf_map *map; 1435 }; 1436 1437 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1438 union bpf_iter_link_info *linfo, 1439 struct bpf_iter_aux_info *aux); 1440 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1441 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1442 struct seq_file *seq); 1443 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1444 struct bpf_link_info *info); 1445 typedef const struct bpf_func_proto * 1446 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 1447 const struct bpf_prog *prog); 1448 1449 enum bpf_iter_feature { 1450 BPF_ITER_RESCHED = BIT(0), 1451 }; 1452 1453 #define BPF_ITER_CTX_ARG_MAX 2 1454 struct bpf_iter_reg { 1455 const char *target; 1456 bpf_iter_attach_target_t attach_target; 1457 bpf_iter_detach_target_t detach_target; 1458 bpf_iter_show_fdinfo_t show_fdinfo; 1459 bpf_iter_fill_link_info_t fill_link_info; 1460 bpf_iter_get_func_proto_t get_func_proto; 1461 u32 ctx_arg_info_size; 1462 u32 feature; 1463 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1464 const struct bpf_iter_seq_info *seq_info; 1465 }; 1466 1467 struct bpf_iter_meta { 1468 __bpf_md_ptr(struct seq_file *, seq); 1469 u64 session_id; 1470 u64 seq_num; 1471 }; 1472 1473 struct bpf_iter__bpf_map_elem { 1474 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1475 __bpf_md_ptr(struct bpf_map *, map); 1476 __bpf_md_ptr(void *, key); 1477 __bpf_md_ptr(void *, value); 1478 }; 1479 1480 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1481 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1482 bool bpf_iter_prog_supported(struct bpf_prog *prog); 1483 const struct bpf_func_proto * 1484 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1485 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 1486 int bpf_iter_new_fd(struct bpf_link *link); 1487 bool bpf_link_is_iter(struct bpf_link *link); 1488 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1489 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1490 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1491 struct seq_file *seq); 1492 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1493 struct bpf_link_info *info); 1494 1495 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 1496 struct bpf_func_state *caller, 1497 struct bpf_func_state *callee); 1498 1499 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1500 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1501 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1502 u64 flags); 1503 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1504 u64 flags); 1505 1506 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1507 1508 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1509 void *key, void *value, u64 map_flags); 1510 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1511 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1512 void *key, void *value, u64 map_flags); 1513 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1514 1515 int bpf_get_file_flag(int flags); 1516 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 1517 size_t actual_size); 1518 1519 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1520 * forced to use 'long' read/writes to try to atomically copy long counters. 1521 * Best-effort only. No barriers here, since it _will_ race with concurrent 1522 * updates from BPF programs. Called from bpf syscall and mostly used with 1523 * size 8 or 16 bytes, so ask compiler to inline it. 1524 */ 1525 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1526 { 1527 const long *lsrc = src; 1528 long *ldst = dst; 1529 1530 size /= sizeof(long); 1531 while (size--) 1532 *ldst++ = *lsrc++; 1533 } 1534 1535 /* verify correctness of eBPF program */ 1536 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 1537 1538 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1539 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1540 #endif 1541 1542 struct btf *bpf_get_btf_vmlinux(void); 1543 1544 /* Map specifics */ 1545 struct xdp_buff; 1546 struct sk_buff; 1547 struct bpf_dtab_netdev; 1548 struct bpf_cpu_map_entry; 1549 1550 void __dev_flush(void); 1551 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1552 struct net_device *dev_rx); 1553 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1554 struct net_device *dev_rx); 1555 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1556 struct bpf_map *map, bool exclude_ingress); 1557 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1558 struct bpf_prog *xdp_prog); 1559 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1560 struct bpf_prog *xdp_prog, struct bpf_map *map, 1561 bool exclude_ingress); 1562 1563 void __cpu_map_flush(void); 1564 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1565 struct net_device *dev_rx); 1566 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1567 struct sk_buff *skb); 1568 1569 /* Return map's numa specified by userspace */ 1570 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1571 { 1572 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1573 attr->numa_node : NUMA_NO_NODE; 1574 } 1575 1576 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1577 int array_map_alloc_check(union bpf_attr *attr); 1578 1579 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1580 union bpf_attr __user *uattr); 1581 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1582 union bpf_attr __user *uattr); 1583 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1584 const union bpf_attr *kattr, 1585 union bpf_attr __user *uattr); 1586 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1587 const union bpf_attr *kattr, 1588 union bpf_attr __user *uattr); 1589 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 1590 const union bpf_attr *kattr, 1591 union bpf_attr __user *uattr); 1592 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1593 const union bpf_attr *kattr, 1594 union bpf_attr __user *uattr); 1595 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); 1596 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1597 const struct bpf_prog *prog, 1598 struct bpf_insn_access_aux *info); 1599 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 1600 const struct btf_type *t, int off, int size, 1601 enum bpf_access_type atype, 1602 u32 *next_btf_id); 1603 bool btf_struct_ids_match(struct bpf_verifier_log *log, 1604 const struct btf *btf, u32 id, int off, 1605 const struct btf *need_btf, u32 need_type_id); 1606 1607 int btf_distill_func_proto(struct bpf_verifier_log *log, 1608 struct btf *btf, 1609 const struct btf_type *func_proto, 1610 const char *func_name, 1611 struct btf_func_model *m); 1612 1613 struct bpf_reg_state; 1614 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 1615 struct bpf_reg_state *regs); 1616 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 1617 const struct btf *btf, u32 func_id, 1618 struct bpf_reg_state *regs); 1619 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1620 struct bpf_reg_state *reg); 1621 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 1622 struct btf *btf, const struct btf_type *t); 1623 1624 struct bpf_prog *bpf_prog_by_id(u32 id); 1625 struct bpf_link *bpf_link_by_id(u32 id); 1626 1627 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1628 void bpf_task_storage_free(struct task_struct *task); 1629 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 1630 const struct btf_func_model * 1631 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1632 const struct bpf_insn *insn); 1633 #else /* !CONFIG_BPF_SYSCALL */ 1634 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1635 { 1636 return ERR_PTR(-EOPNOTSUPP); 1637 } 1638 1639 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1640 enum bpf_prog_type type, 1641 bool attach_drv) 1642 { 1643 return ERR_PTR(-EOPNOTSUPP); 1644 } 1645 1646 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1647 { 1648 } 1649 1650 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1651 { 1652 } 1653 1654 static inline void bpf_prog_put(struct bpf_prog *prog) 1655 { 1656 } 1657 1658 static inline void bpf_prog_inc(struct bpf_prog *prog) 1659 { 1660 } 1661 1662 static inline struct bpf_prog *__must_check 1663 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1664 { 1665 return ERR_PTR(-EOPNOTSUPP); 1666 } 1667 1668 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1669 const struct bpf_link_ops *ops, 1670 struct bpf_prog *prog) 1671 { 1672 } 1673 1674 static inline int bpf_link_prime(struct bpf_link *link, 1675 struct bpf_link_primer *primer) 1676 { 1677 return -EOPNOTSUPP; 1678 } 1679 1680 static inline int bpf_link_settle(struct bpf_link_primer *primer) 1681 { 1682 return -EOPNOTSUPP; 1683 } 1684 1685 static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1686 { 1687 } 1688 1689 static inline void bpf_link_inc(struct bpf_link *link) 1690 { 1691 } 1692 1693 static inline void bpf_link_put(struct bpf_link *link) 1694 { 1695 } 1696 1697 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1698 { 1699 return -EOPNOTSUPP; 1700 } 1701 1702 static inline bool dev_map_can_have_prog(struct bpf_map *map) 1703 { 1704 return false; 1705 } 1706 1707 static inline void __dev_flush(void) 1708 { 1709 } 1710 1711 struct xdp_buff; 1712 struct bpf_dtab_netdev; 1713 struct bpf_cpu_map_entry; 1714 1715 static inline 1716 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1717 struct net_device *dev_rx) 1718 { 1719 return 0; 1720 } 1721 1722 static inline 1723 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1724 struct net_device *dev_rx) 1725 { 1726 return 0; 1727 } 1728 1729 static inline 1730 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1731 struct bpf_map *map, bool exclude_ingress) 1732 { 1733 return 0; 1734 } 1735 1736 struct sk_buff; 1737 1738 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1739 struct sk_buff *skb, 1740 struct bpf_prog *xdp_prog) 1741 { 1742 return 0; 1743 } 1744 1745 static inline 1746 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1747 struct bpf_prog *xdp_prog, struct bpf_map *map, 1748 bool exclude_ingress) 1749 { 1750 return 0; 1751 } 1752 1753 static inline void __cpu_map_flush(void) 1754 { 1755 } 1756 1757 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1758 struct xdp_buff *xdp, 1759 struct net_device *dev_rx) 1760 { 1761 return 0; 1762 } 1763 1764 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1765 struct sk_buff *skb) 1766 { 1767 return -EOPNOTSUPP; 1768 } 1769 1770 static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1771 { 1772 return false; 1773 } 1774 1775 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1776 enum bpf_prog_type type) 1777 { 1778 return ERR_PTR(-EOPNOTSUPP); 1779 } 1780 1781 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1782 const union bpf_attr *kattr, 1783 union bpf_attr __user *uattr) 1784 { 1785 return -ENOTSUPP; 1786 } 1787 1788 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1789 const union bpf_attr *kattr, 1790 union bpf_attr __user *uattr) 1791 { 1792 return -ENOTSUPP; 1793 } 1794 1795 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1796 const union bpf_attr *kattr, 1797 union bpf_attr __user *uattr) 1798 { 1799 return -ENOTSUPP; 1800 } 1801 1802 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1803 const union bpf_attr *kattr, 1804 union bpf_attr __user *uattr) 1805 { 1806 return -ENOTSUPP; 1807 } 1808 1809 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1810 const union bpf_attr *kattr, 1811 union bpf_attr __user *uattr) 1812 { 1813 return -ENOTSUPP; 1814 } 1815 1816 static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) 1817 { 1818 return false; 1819 } 1820 1821 static inline void bpf_map_put(struct bpf_map *map) 1822 { 1823 } 1824 1825 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1826 { 1827 return ERR_PTR(-ENOTSUPP); 1828 } 1829 1830 static inline const struct bpf_func_proto * 1831 bpf_base_func_proto(enum bpf_func_id func_id) 1832 { 1833 return NULL; 1834 } 1835 1836 static inline void bpf_task_storage_free(struct task_struct *task) 1837 { 1838 } 1839 1840 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 1841 { 1842 return false; 1843 } 1844 1845 static inline const struct btf_func_model * 1846 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1847 const struct bpf_insn *insn) 1848 { 1849 return NULL; 1850 } 1851 #endif /* CONFIG_BPF_SYSCALL */ 1852 1853 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 1854 struct btf_mod_pair *used_btfs, u32 len); 1855 1856 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1857 enum bpf_prog_type type) 1858 { 1859 return bpf_prog_get_type_dev(ufd, type, false); 1860 } 1861 1862 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1863 struct bpf_map **used_maps, u32 len); 1864 1865 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1866 1867 int bpf_prog_offload_compile(struct bpf_prog *prog); 1868 void bpf_prog_offload_destroy(struct bpf_prog *prog); 1869 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1870 struct bpf_prog *prog); 1871 1872 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1873 1874 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1875 int bpf_map_offload_update_elem(struct bpf_map *map, 1876 void *key, void *value, u64 flags); 1877 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1878 int bpf_map_offload_get_next_key(struct bpf_map *map, 1879 void *key, void *next_key); 1880 1881 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1882 1883 struct bpf_offload_dev * 1884 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1885 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1886 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1887 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1888 struct net_device *netdev); 1889 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1890 struct net_device *netdev); 1891 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1892 1893 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1894 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1895 1896 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1897 { 1898 return aux->offload_requested; 1899 } 1900 1901 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1902 { 1903 return unlikely(map->ops == &bpf_map_offload_ops); 1904 } 1905 1906 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1907 void bpf_map_offload_map_free(struct bpf_map *map); 1908 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1909 const union bpf_attr *kattr, 1910 union bpf_attr __user *uattr); 1911 1912 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1913 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 1914 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 1915 void sock_map_unhash(struct sock *sk); 1916 void sock_map_close(struct sock *sk, long timeout); 1917 #else 1918 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1919 union bpf_attr *attr) 1920 { 1921 return -EOPNOTSUPP; 1922 } 1923 1924 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1925 { 1926 return false; 1927 } 1928 1929 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1930 { 1931 return false; 1932 } 1933 1934 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1935 { 1936 return ERR_PTR(-EOPNOTSUPP); 1937 } 1938 1939 static inline void bpf_map_offload_map_free(struct bpf_map *map) 1940 { 1941 } 1942 1943 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1944 const union bpf_attr *kattr, 1945 union bpf_attr __user *uattr) 1946 { 1947 return -ENOTSUPP; 1948 } 1949 1950 #ifdef CONFIG_BPF_SYSCALL 1951 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1952 struct bpf_prog *prog) 1953 { 1954 return -EINVAL; 1955 } 1956 1957 static inline int sock_map_prog_detach(const union bpf_attr *attr, 1958 enum bpf_prog_type ptype) 1959 { 1960 return -EOPNOTSUPP; 1961 } 1962 1963 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 1964 u64 flags) 1965 { 1966 return -EOPNOTSUPP; 1967 } 1968 #endif /* CONFIG_BPF_SYSCALL */ 1969 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1970 1971 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1972 void bpf_sk_reuseport_detach(struct sock *sk); 1973 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1974 void *value); 1975 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1976 void *value, u64 map_flags); 1977 #else 1978 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1979 { 1980 } 1981 1982 #ifdef CONFIG_BPF_SYSCALL 1983 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1984 void *key, void *value) 1985 { 1986 return -EOPNOTSUPP; 1987 } 1988 1989 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1990 void *key, void *value, 1991 u64 map_flags) 1992 { 1993 return -EOPNOTSUPP; 1994 } 1995 #endif /* CONFIG_BPF_SYSCALL */ 1996 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1997 1998 /* verifier prototypes for helper functions called from eBPF programs */ 1999 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 2000 extern const struct bpf_func_proto bpf_map_update_elem_proto; 2001 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 2002 extern const struct bpf_func_proto bpf_map_push_elem_proto; 2003 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 2004 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 2005 2006 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 2007 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 2008 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 2009 extern const struct bpf_func_proto bpf_tail_call_proto; 2010 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 2011 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 2012 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 2013 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 2014 extern const struct bpf_func_proto bpf_get_current_comm_proto; 2015 extern const struct bpf_func_proto bpf_get_stackid_proto; 2016 extern const struct bpf_func_proto bpf_get_stack_proto; 2017 extern const struct bpf_func_proto bpf_get_task_stack_proto; 2018 extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 2019 extern const struct bpf_func_proto bpf_get_stack_proto_pe; 2020 extern const struct bpf_func_proto bpf_sock_map_update_proto; 2021 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 2022 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 2023 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 2024 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 2025 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 2026 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 2027 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 2028 extern const struct bpf_func_proto bpf_spin_lock_proto; 2029 extern const struct bpf_func_proto bpf_spin_unlock_proto; 2030 extern const struct bpf_func_proto bpf_get_local_storage_proto; 2031 extern const struct bpf_func_proto bpf_strtol_proto; 2032 extern const struct bpf_func_proto bpf_strtoul_proto; 2033 extern const struct bpf_func_proto bpf_tcp_sock_proto; 2034 extern const struct bpf_func_proto bpf_jiffies64_proto; 2035 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 2036 extern const struct bpf_func_proto bpf_event_output_data_proto; 2037 extern const struct bpf_func_proto bpf_ringbuf_output_proto; 2038 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 2039 extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 2040 extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 2041 extern const struct bpf_func_proto bpf_ringbuf_query_proto; 2042 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 2043 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 2044 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 2045 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 2046 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 2047 extern const struct bpf_func_proto bpf_copy_from_user_proto; 2048 extern const struct bpf_func_proto bpf_snprintf_btf_proto; 2049 extern const struct bpf_func_proto bpf_snprintf_proto; 2050 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 2051 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 2052 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 2053 extern const struct bpf_func_proto bpf_sock_from_file_proto; 2054 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 2055 extern const struct bpf_func_proto bpf_task_storage_get_proto; 2056 extern const struct bpf_func_proto bpf_task_storage_delete_proto; 2057 extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2058 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2059 extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 2060 extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 2061 2062 const struct bpf_func_proto *bpf_tracing_func_proto( 2063 enum bpf_func_id func_id, const struct bpf_prog *prog); 2064 2065 const struct bpf_func_proto *tracing_prog_func_proto( 2066 enum bpf_func_id func_id, const struct bpf_prog *prog); 2067 2068 /* Shared helpers among cBPF and eBPF. */ 2069 void bpf_user_rnd_init_once(void); 2070 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2071 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2072 2073 #if defined(CONFIG_NET) 2074 bool bpf_sock_common_is_valid_access(int off, int size, 2075 enum bpf_access_type type, 2076 struct bpf_insn_access_aux *info); 2077 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2078 struct bpf_insn_access_aux *info); 2079 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2080 const struct bpf_insn *si, 2081 struct bpf_insn *insn_buf, 2082 struct bpf_prog *prog, 2083 u32 *target_size); 2084 #else 2085 static inline bool bpf_sock_common_is_valid_access(int off, int size, 2086 enum bpf_access_type type, 2087 struct bpf_insn_access_aux *info) 2088 { 2089 return false; 2090 } 2091 static inline bool bpf_sock_is_valid_access(int off, int size, 2092 enum bpf_access_type type, 2093 struct bpf_insn_access_aux *info) 2094 { 2095 return false; 2096 } 2097 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2098 const struct bpf_insn *si, 2099 struct bpf_insn *insn_buf, 2100 struct bpf_prog *prog, 2101 u32 *target_size) 2102 { 2103 return 0; 2104 } 2105 #endif 2106 2107 #ifdef CONFIG_INET 2108 struct sk_reuseport_kern { 2109 struct sk_buff *skb; 2110 struct sock *sk; 2111 struct sock *selected_sk; 2112 struct sock *migrating_sk; 2113 void *data_end; 2114 u32 hash; 2115 u32 reuseport_id; 2116 bool bind_inany; 2117 }; 2118 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2119 struct bpf_insn_access_aux *info); 2120 2121 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2122 const struct bpf_insn *si, 2123 struct bpf_insn *insn_buf, 2124 struct bpf_prog *prog, 2125 u32 *target_size); 2126 2127 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2128 struct bpf_insn_access_aux *info); 2129 2130 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2131 const struct bpf_insn *si, 2132 struct bpf_insn *insn_buf, 2133 struct bpf_prog *prog, 2134 u32 *target_size); 2135 #else 2136 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2137 enum bpf_access_type type, 2138 struct bpf_insn_access_aux *info) 2139 { 2140 return false; 2141 } 2142 2143 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2144 const struct bpf_insn *si, 2145 struct bpf_insn *insn_buf, 2146 struct bpf_prog *prog, 2147 u32 *target_size) 2148 { 2149 return 0; 2150 } 2151 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2152 enum bpf_access_type type, 2153 struct bpf_insn_access_aux *info) 2154 { 2155 return false; 2156 } 2157 2158 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2159 const struct bpf_insn *si, 2160 struct bpf_insn *insn_buf, 2161 struct bpf_prog *prog, 2162 u32 *target_size) 2163 { 2164 return 0; 2165 } 2166 #endif /* CONFIG_INET */ 2167 2168 enum bpf_text_poke_type { 2169 BPF_MOD_CALL, 2170 BPF_MOD_JUMP, 2171 }; 2172 2173 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2174 void *addr1, void *addr2); 2175 2176 struct btf_id_set; 2177 bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2178 2179 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2180 u32 **bin_buf, u32 num_args); 2181 void bpf_bprintf_cleanup(void); 2182 2183 #endif /* _LINUX_BPF_H */ 2184