1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/mm_types.h> 16 #include <linux/wait.h> 17 #include <linux/refcount.h> 18 #include <linux/mutex.h> 19 #include <linux/module.h> 20 #include <linux/kallsyms.h> 21 #include <linux/capability.h> 22 #include <linux/sched/mm.h> 23 #include <linux/slab.h> 24 #include <linux/percpu-refcount.h> 25 #include <linux/bpfptr.h> 26 27 struct bpf_verifier_env; 28 struct bpf_verifier_log; 29 struct perf_event; 30 struct bpf_prog; 31 struct bpf_prog_aux; 32 struct bpf_map; 33 struct sock; 34 struct seq_file; 35 struct btf; 36 struct btf_type; 37 struct exception_table_entry; 38 struct seq_operations; 39 struct bpf_iter_aux_info; 40 struct bpf_local_storage; 41 struct bpf_local_storage_map; 42 struct kobject; 43 struct mem_cgroup; 44 struct module; 45 struct bpf_func_state; 46 47 extern struct idr btf_idr; 48 extern spinlock_t btf_idr_lock; 49 extern struct kobject *btf_kobj; 50 51 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); 52 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 53 struct bpf_iter_aux_info *aux); 54 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 55 struct bpf_iter_seq_info { 56 const struct seq_operations *seq_ops; 57 bpf_iter_init_seq_priv_t init_seq_private; 58 bpf_iter_fini_seq_priv_t fini_seq_private; 59 u32 seq_priv_size; 60 }; 61 62 /* map is generic key/value storage optionally accessible by eBPF programs */ 63 struct bpf_map_ops { 64 /* funcs callable from userspace (via syscall) */ 65 int (*map_alloc_check)(union bpf_attr *attr); 66 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 67 void (*map_release)(struct bpf_map *map, struct file *map_file); 68 void (*map_free)(struct bpf_map *map); 69 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 70 void (*map_release_uref)(struct bpf_map *map); 71 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 72 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 73 union bpf_attr __user *uattr); 74 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 75 void *value, u64 flags); 76 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 77 const union bpf_attr *attr, 78 union bpf_attr __user *uattr); 79 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 80 union bpf_attr __user *uattr); 81 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 82 union bpf_attr __user *uattr); 83 84 /* funcs callable from userspace and from eBPF programs */ 85 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 86 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 87 int (*map_delete_elem)(struct bpf_map *map, void *key); 88 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 89 int (*map_pop_elem)(struct bpf_map *map, void *value); 90 int (*map_peek_elem)(struct bpf_map *map, void *value); 91 92 /* funcs called by prog_array and perf_event_array map */ 93 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 94 int fd); 95 void (*map_fd_put_ptr)(void *ptr); 96 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 97 u32 (*map_fd_sys_lookup_elem)(void *ptr); 98 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 99 struct seq_file *m); 100 int (*map_check_btf)(const struct bpf_map *map, 101 const struct btf *btf, 102 const struct btf_type *key_type, 103 const struct btf_type *value_type); 104 105 /* Prog poke tracking helpers. */ 106 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 107 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 108 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 109 struct bpf_prog *new); 110 111 /* Direct value access helpers. */ 112 int (*map_direct_value_addr)(const struct bpf_map *map, 113 u64 *imm, u32 off); 114 int (*map_direct_value_meta)(const struct bpf_map *map, 115 u64 imm, u32 *off); 116 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 117 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 118 struct poll_table_struct *pts); 119 120 /* Functions called by bpf_local_storage maps */ 121 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 122 void *owner, u32 size); 123 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 124 void *owner, u32 size); 125 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 126 127 /* Misc helpers.*/ 128 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); 129 130 /* map_meta_equal must be implemented for maps that can be 131 * used as an inner map. It is a runtime check to ensure 132 * an inner map can be inserted to an outer map. 133 * 134 * Some properties of the inner map has been used during the 135 * verification time. When inserting an inner map at the runtime, 136 * map_meta_equal has to ensure the inserting map has the same 137 * properties that the verifier has used earlier. 138 */ 139 bool (*map_meta_equal)(const struct bpf_map *meta0, 140 const struct bpf_map *meta1); 141 142 143 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 144 struct bpf_func_state *caller, 145 struct bpf_func_state *callee); 146 int (*map_for_each_callback)(struct bpf_map *map, 147 bpf_callback_t callback_fn, 148 void *callback_ctx, u64 flags); 149 150 /* BTF name and id of struct allocated by map_alloc */ 151 const char * const map_btf_name; 152 int *map_btf_id; 153 154 /* bpf_iter info used to open a seq_file */ 155 const struct bpf_iter_seq_info *iter_seq_info; 156 }; 157 158 struct bpf_map { 159 /* The first two cachelines with read-mostly members of which some 160 * are also accessed in fast-path (e.g. ops, max_entries). 161 */ 162 const struct bpf_map_ops *ops ____cacheline_aligned; 163 struct bpf_map *inner_map_meta; 164 #ifdef CONFIG_SECURITY 165 void *security; 166 #endif 167 enum bpf_map_type map_type; 168 u32 key_size; 169 u32 value_size; 170 u32 max_entries; 171 u64 map_extra; /* any per-map-type extra fields */ 172 u32 map_flags; 173 int spin_lock_off; /* >=0 valid offset, <0 error */ 174 int timer_off; /* >=0 valid offset, <0 error */ 175 u32 id; 176 int numa_node; 177 u32 btf_key_type_id; 178 u32 btf_value_type_id; 179 u32 btf_vmlinux_value_type_id; 180 struct btf *btf; 181 #ifdef CONFIG_MEMCG_KMEM 182 struct mem_cgroup *memcg; 183 #endif 184 char name[BPF_OBJ_NAME_LEN]; 185 bool bypass_spec_v1; 186 bool frozen; /* write-once; write-protected by freeze_mutex */ 187 /* 14 bytes hole */ 188 189 /* The 3rd and 4th cacheline with misc members to avoid false sharing 190 * particularly with refcounting. 191 */ 192 atomic64_t refcnt ____cacheline_aligned; 193 atomic64_t usercnt; 194 struct work_struct work; 195 struct mutex freeze_mutex; 196 atomic64_t writecnt; 197 }; 198 199 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 200 { 201 return map->spin_lock_off >= 0; 202 } 203 204 static inline bool map_value_has_timer(const struct bpf_map *map) 205 { 206 return map->timer_off >= 0; 207 } 208 209 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 210 { 211 if (unlikely(map_value_has_spin_lock(map))) 212 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 213 (struct bpf_spin_lock){}; 214 if (unlikely(map_value_has_timer(map))) 215 *(struct bpf_timer *)(dst + map->timer_off) = 216 (struct bpf_timer){}; 217 } 218 219 /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ 220 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 221 { 222 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; 223 224 if (unlikely(map_value_has_spin_lock(map))) { 225 s_off = map->spin_lock_off; 226 s_sz = sizeof(struct bpf_spin_lock); 227 } else if (unlikely(map_value_has_timer(map))) { 228 t_off = map->timer_off; 229 t_sz = sizeof(struct bpf_timer); 230 } 231 232 if (unlikely(s_sz || t_sz)) { 233 if (s_off < t_off || !s_sz) { 234 swap(s_off, t_off); 235 swap(s_sz, t_sz); 236 } 237 memcpy(dst, src, t_off); 238 memcpy(dst + t_off + t_sz, 239 src + t_off + t_sz, 240 s_off - t_off - t_sz); 241 memcpy(dst + s_off + s_sz, 242 src + s_off + s_sz, 243 map->value_size - s_off - s_sz); 244 } else { 245 memcpy(dst, src, map->value_size); 246 } 247 } 248 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 249 bool lock_src); 250 void bpf_timer_cancel_and_free(void *timer); 251 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 252 253 struct bpf_offload_dev; 254 struct bpf_offloaded_map; 255 256 struct bpf_map_dev_ops { 257 int (*map_get_next_key)(struct bpf_offloaded_map *map, 258 void *key, void *next_key); 259 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 260 void *key, void *value); 261 int (*map_update_elem)(struct bpf_offloaded_map *map, 262 void *key, void *value, u64 flags); 263 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 264 }; 265 266 struct bpf_offloaded_map { 267 struct bpf_map map; 268 struct net_device *netdev; 269 const struct bpf_map_dev_ops *dev_ops; 270 void *dev_priv; 271 struct list_head offloads; 272 }; 273 274 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 275 { 276 return container_of(map, struct bpf_offloaded_map, map); 277 } 278 279 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 280 { 281 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 282 } 283 284 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 285 { 286 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 287 map->ops->map_seq_show_elem; 288 } 289 290 int map_check_no_btf(const struct bpf_map *map, 291 const struct btf *btf, 292 const struct btf_type *key_type, 293 const struct btf_type *value_type); 294 295 bool bpf_map_meta_equal(const struct bpf_map *meta0, 296 const struct bpf_map *meta1); 297 298 extern const struct bpf_map_ops bpf_map_offload_ops; 299 300 /* function argument constraints */ 301 enum bpf_arg_type { 302 ARG_DONTCARE = 0, /* unused argument in helper function */ 303 304 /* the following constraints used to prototype 305 * bpf_map_lookup/update/delete_elem() functions 306 */ 307 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 308 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 309 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 310 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 311 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 312 313 /* the following constraints used to prototype bpf_memcmp() and other 314 * functions that access data on eBPF program stack 315 */ 316 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 317 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 318 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 319 * helper function must fill all bytes or clear 320 * them in error case. 321 */ 322 323 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 324 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 325 326 ARG_PTR_TO_CTX, /* pointer to context */ 327 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ 328 ARG_ANYTHING, /* any (initialized) argument is ok */ 329 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 330 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 331 ARG_PTR_TO_INT, /* pointer to int */ 332 ARG_PTR_TO_LONG, /* pointer to long */ 333 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 334 ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ 335 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 336 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 337 ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ 338 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 339 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 340 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 341 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 342 ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ 343 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 344 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 345 __BPF_ARG_TYPE_MAX, 346 }; 347 348 /* type of values returned from helper functions */ 349 enum bpf_return_type { 350 RET_INTEGER, /* function returns integer */ 351 RET_VOID, /* function doesn't return anything */ 352 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 353 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 354 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 355 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 356 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 357 RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ 358 RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ 359 RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ 360 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 361 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 362 }; 363 364 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 365 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 366 * instructions after verifying 367 */ 368 struct bpf_func_proto { 369 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 370 bool gpl_only; 371 bool pkt_access; 372 enum bpf_return_type ret_type; 373 union { 374 struct { 375 enum bpf_arg_type arg1_type; 376 enum bpf_arg_type arg2_type; 377 enum bpf_arg_type arg3_type; 378 enum bpf_arg_type arg4_type; 379 enum bpf_arg_type arg5_type; 380 }; 381 enum bpf_arg_type arg_type[5]; 382 }; 383 union { 384 struct { 385 u32 *arg1_btf_id; 386 u32 *arg2_btf_id; 387 u32 *arg3_btf_id; 388 u32 *arg4_btf_id; 389 u32 *arg5_btf_id; 390 }; 391 u32 *arg_btf_id[5]; 392 }; 393 int *ret_btf_id; /* return value btf_id */ 394 bool (*allowed)(const struct bpf_prog *prog); 395 }; 396 397 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 398 * the first argument to eBPF programs. 399 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 400 */ 401 struct bpf_context; 402 403 enum bpf_access_type { 404 BPF_READ = 1, 405 BPF_WRITE = 2 406 }; 407 408 /* types of values stored in eBPF registers */ 409 /* Pointer types represent: 410 * pointer 411 * pointer + imm 412 * pointer + (u16) var 413 * pointer + (u16) var + imm 414 * if (range > 0) then [ptr, ptr + range - off) is safe to access 415 * if (id > 0) means that some 'var' was added 416 * if (off > 0) means that 'imm' was added 417 */ 418 enum bpf_reg_type { 419 NOT_INIT = 0, /* nothing was written into register */ 420 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 421 PTR_TO_CTX, /* reg points to bpf_context */ 422 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 423 PTR_TO_MAP_VALUE, /* reg points to map element value */ 424 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 425 PTR_TO_STACK, /* reg == frame_pointer + offset */ 426 PTR_TO_PACKET_META, /* skb->data - meta_len */ 427 PTR_TO_PACKET, /* reg points to skb->data */ 428 PTR_TO_PACKET_END, /* skb->data + headlen */ 429 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 430 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 431 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 432 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 433 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 434 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 435 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 436 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 437 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 438 /* PTR_TO_BTF_ID points to a kernel struct that does not need 439 * to be null checked by the BPF program. This does not imply the 440 * pointer is _not_ null and in practice this can easily be a null 441 * pointer when reading pointer chains. The assumption is program 442 * context will handle null pointer dereference typically via fault 443 * handling. The verifier must keep this in mind and can make no 444 * assumptions about null or non-null when doing branch analysis. 445 * Further, when passed into helpers the helpers can not, without 446 * additional context, assume the value is non-null. 447 */ 448 PTR_TO_BTF_ID, 449 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 450 * been checked for null. Used primarily to inform the verifier 451 * an explicit null check is required for this struct. 452 */ 453 PTR_TO_BTF_ID_OR_NULL, 454 PTR_TO_MEM, /* reg points to valid memory region */ 455 PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ 456 PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ 457 PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ 458 PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ 459 PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ 460 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ 461 PTR_TO_FUNC, /* reg points to a bpf program function */ 462 PTR_TO_MAP_KEY, /* reg points to a map element key */ 463 __BPF_REG_TYPE_MAX, 464 }; 465 466 /* The information passed from prog-specific *_is_valid_access 467 * back to the verifier. 468 */ 469 struct bpf_insn_access_aux { 470 enum bpf_reg_type reg_type; 471 union { 472 int ctx_field_size; 473 struct { 474 struct btf *btf; 475 u32 btf_id; 476 }; 477 }; 478 struct bpf_verifier_log *log; /* for verbose logs */ 479 }; 480 481 static inline void 482 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 483 { 484 aux->ctx_field_size = size; 485 } 486 487 static inline bool bpf_pseudo_func(const struct bpf_insn *insn) 488 { 489 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && 490 insn->src_reg == BPF_PSEUDO_FUNC; 491 } 492 493 struct bpf_prog_ops { 494 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 495 union bpf_attr __user *uattr); 496 }; 497 498 struct bpf_verifier_ops { 499 /* return eBPF function prototype for verification */ 500 const struct bpf_func_proto * 501 (*get_func_proto)(enum bpf_func_id func_id, 502 const struct bpf_prog *prog); 503 504 /* return true if 'size' wide access at offset 'off' within bpf_context 505 * with 'type' (read or write) is allowed 506 */ 507 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 508 const struct bpf_prog *prog, 509 struct bpf_insn_access_aux *info); 510 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 511 const struct bpf_prog *prog); 512 int (*gen_ld_abs)(const struct bpf_insn *orig, 513 struct bpf_insn *insn_buf); 514 u32 (*convert_ctx_access)(enum bpf_access_type type, 515 const struct bpf_insn *src, 516 struct bpf_insn *dst, 517 struct bpf_prog *prog, u32 *target_size); 518 int (*btf_struct_access)(struct bpf_verifier_log *log, 519 const struct btf *btf, 520 const struct btf_type *t, int off, int size, 521 enum bpf_access_type atype, 522 u32 *next_btf_id); 523 bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner); 524 }; 525 526 struct bpf_prog_offload_ops { 527 /* verifier basic callbacks */ 528 int (*insn_hook)(struct bpf_verifier_env *env, 529 int insn_idx, int prev_insn_idx); 530 int (*finalize)(struct bpf_verifier_env *env); 531 /* verifier optimization callbacks (called after .finalize) */ 532 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 533 struct bpf_insn *insn); 534 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 535 /* program management callbacks */ 536 int (*prepare)(struct bpf_prog *prog); 537 int (*translate)(struct bpf_prog *prog); 538 void (*destroy)(struct bpf_prog *prog); 539 }; 540 541 struct bpf_prog_offload { 542 struct bpf_prog *prog; 543 struct net_device *netdev; 544 struct bpf_offload_dev *offdev; 545 void *dev_priv; 546 struct list_head offloads; 547 bool dev_state; 548 bool opt_failed; 549 void *jited_image; 550 u32 jited_len; 551 }; 552 553 enum bpf_cgroup_storage_type { 554 BPF_CGROUP_STORAGE_SHARED, 555 BPF_CGROUP_STORAGE_PERCPU, 556 __BPF_CGROUP_STORAGE_MAX 557 }; 558 559 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 560 561 /* The longest tracepoint has 12 args. 562 * See include/trace/bpf_probe.h 563 */ 564 #define MAX_BPF_FUNC_ARGS 12 565 566 /* The maximum number of arguments passed through registers 567 * a single function may have. 568 */ 569 #define MAX_BPF_FUNC_REG_ARGS 5 570 571 struct btf_func_model { 572 u8 ret_size; 573 u8 nr_args; 574 u8 arg_size[MAX_BPF_FUNC_ARGS]; 575 }; 576 577 /* Restore arguments before returning from trampoline to let original function 578 * continue executing. This flag is used for fentry progs when there are no 579 * fexit progs. 580 */ 581 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 582 /* Call original function after fentry progs, but before fexit progs. 583 * Makes sense for fentry/fexit, normal calls and indirect calls. 584 */ 585 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 586 /* Skip current frame and return to parent. Makes sense for fentry/fexit 587 * programs only. Should not be used with normal calls and indirect calls. 588 */ 589 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 590 /* Store IP address of the caller on the trampoline stack, 591 * so it's available for trampoline's programs. 592 */ 593 #define BPF_TRAMP_F_IP_ARG BIT(3) 594 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 595 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 596 597 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 598 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 599 */ 600 #define BPF_MAX_TRAMP_PROGS 38 601 602 struct bpf_tramp_progs { 603 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 604 int nr_progs; 605 }; 606 607 /* Different use cases for BPF trampoline: 608 * 1. replace nop at the function entry (kprobe equivalent) 609 * flags = BPF_TRAMP_F_RESTORE_REGS 610 * fentry = a set of programs to run before returning from trampoline 611 * 612 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 613 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 614 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 615 * fentry = a set of program to run before calling original function 616 * fexit = a set of program to run after original function 617 * 618 * 3. replace direct call instruction anywhere in the function body 619 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 620 * With flags = 0 621 * fentry = a set of programs to run before returning from trampoline 622 * With flags = BPF_TRAMP_F_CALL_ORIG 623 * orig_call = original callback addr or direct function addr 624 * fentry = a set of program to run before calling original function 625 * fexit = a set of program to run after original function 626 */ 627 struct bpf_tramp_image; 628 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 629 const struct btf_func_model *m, u32 flags, 630 struct bpf_tramp_progs *tprogs, 631 void *orig_call); 632 /* these two functions are called from generated trampoline */ 633 u64 notrace __bpf_prog_enter(struct bpf_prog *prog); 634 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 635 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); 636 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); 637 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 638 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 639 640 struct bpf_ksym { 641 unsigned long start; 642 unsigned long end; 643 char name[KSYM_NAME_LEN]; 644 struct list_head lnode; 645 struct latch_tree_node tnode; 646 bool prog; 647 }; 648 649 enum bpf_tramp_prog_type { 650 BPF_TRAMP_FENTRY, 651 BPF_TRAMP_FEXIT, 652 BPF_TRAMP_MODIFY_RETURN, 653 BPF_TRAMP_MAX, 654 BPF_TRAMP_REPLACE, /* more than MAX */ 655 }; 656 657 struct bpf_tramp_image { 658 void *image; 659 struct bpf_ksym ksym; 660 struct percpu_ref pcref; 661 void *ip_after_call; 662 void *ip_epilogue; 663 union { 664 struct rcu_head rcu; 665 struct work_struct work; 666 }; 667 }; 668 669 struct bpf_trampoline { 670 /* hlist for trampoline_table */ 671 struct hlist_node hlist; 672 /* serializes access to fields of this trampoline */ 673 struct mutex mutex; 674 refcount_t refcnt; 675 u64 key; 676 struct { 677 struct btf_func_model model; 678 void *addr; 679 bool ftrace_managed; 680 } func; 681 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 682 * program by replacing one of its functions. func.addr is the address 683 * of the function it replaced. 684 */ 685 struct bpf_prog *extension_prog; 686 /* list of BPF programs using this trampoline */ 687 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 688 /* Number of attached programs. A counter per kind. */ 689 int progs_cnt[BPF_TRAMP_MAX]; 690 /* Executable image of trampoline */ 691 struct bpf_tramp_image *cur_image; 692 u64 selector; 693 struct module *mod; 694 }; 695 696 struct bpf_attach_target_info { 697 struct btf_func_model fmodel; 698 long tgt_addr; 699 const char *tgt_name; 700 const struct btf_type *tgt_type; 701 }; 702 703 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 704 705 struct bpf_dispatcher_prog { 706 struct bpf_prog *prog; 707 refcount_t users; 708 }; 709 710 struct bpf_dispatcher { 711 /* dispatcher mutex */ 712 struct mutex mutex; 713 void *func; 714 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 715 int num_progs; 716 void *image; 717 u32 image_off; 718 struct bpf_ksym ksym; 719 }; 720 721 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 722 const void *ctx, 723 const struct bpf_insn *insnsi, 724 unsigned int (*bpf_func)(const void *, 725 const struct bpf_insn *)) 726 { 727 return bpf_func(ctx, insnsi); 728 } 729 #ifdef CONFIG_BPF_JIT 730 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 731 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 732 struct bpf_trampoline *bpf_trampoline_get(u64 key, 733 struct bpf_attach_target_info *tgt_info); 734 void bpf_trampoline_put(struct bpf_trampoline *tr); 735 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); 736 #define BPF_DISPATCHER_INIT(_name) { \ 737 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 738 .func = &_name##_func, \ 739 .progs = {}, \ 740 .num_progs = 0, \ 741 .image = NULL, \ 742 .image_off = 0, \ 743 .ksym = { \ 744 .name = #_name, \ 745 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 746 }, \ 747 } 748 749 #define DEFINE_BPF_DISPATCHER(name) \ 750 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 751 const void *ctx, \ 752 const struct bpf_insn *insnsi, \ 753 unsigned int (*bpf_func)(const void *, \ 754 const struct bpf_insn *)) \ 755 { \ 756 return bpf_func(ctx, insnsi); \ 757 } \ 758 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 759 struct bpf_dispatcher bpf_dispatcher_##name = \ 760 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 761 #define DECLARE_BPF_DISPATCHER(name) \ 762 unsigned int bpf_dispatcher_##name##_func( \ 763 const void *ctx, \ 764 const struct bpf_insn *insnsi, \ 765 unsigned int (*bpf_func)(const void *, \ 766 const struct bpf_insn *)); \ 767 extern struct bpf_dispatcher bpf_dispatcher_##name; 768 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 769 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 770 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 771 struct bpf_prog *to); 772 /* Called only from JIT-enabled code, so there's no need for stubs. */ 773 void *bpf_jit_alloc_exec_page(void); 774 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 775 void bpf_image_ksym_del(struct bpf_ksym *ksym); 776 void bpf_ksym_add(struct bpf_ksym *ksym); 777 void bpf_ksym_del(struct bpf_ksym *ksym); 778 int bpf_jit_charge_modmem(u32 pages); 779 void bpf_jit_uncharge_modmem(u32 pages); 780 #else 781 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, 782 struct bpf_trampoline *tr) 783 { 784 return -ENOTSUPP; 785 } 786 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, 787 struct bpf_trampoline *tr) 788 { 789 return -ENOTSUPP; 790 } 791 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 792 struct bpf_attach_target_info *tgt_info) 793 { 794 return ERR_PTR(-EOPNOTSUPP); 795 } 796 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 797 #define DEFINE_BPF_DISPATCHER(name) 798 #define DECLARE_BPF_DISPATCHER(name) 799 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 800 #define BPF_DISPATCHER_PTR(name) NULL 801 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 802 struct bpf_prog *from, 803 struct bpf_prog *to) {} 804 static inline bool is_bpf_image_address(unsigned long address) 805 { 806 return false; 807 } 808 #endif 809 810 struct bpf_func_info_aux { 811 u16 linkage; 812 bool unreliable; 813 }; 814 815 enum bpf_jit_poke_reason { 816 BPF_POKE_REASON_TAIL_CALL, 817 }; 818 819 /* Descriptor of pokes pointing /into/ the JITed image. */ 820 struct bpf_jit_poke_descriptor { 821 void *tailcall_target; 822 void *tailcall_bypass; 823 void *bypass_addr; 824 void *aux; 825 union { 826 struct { 827 struct bpf_map *map; 828 u32 key; 829 } tail_call; 830 }; 831 bool tailcall_target_stable; 832 u8 adj_off; 833 u16 reason; 834 u32 insn_idx; 835 }; 836 837 /* reg_type info for ctx arguments */ 838 struct bpf_ctx_arg_aux { 839 u32 offset; 840 enum bpf_reg_type reg_type; 841 u32 btf_id; 842 }; 843 844 struct btf_mod_pair { 845 struct btf *btf; 846 struct module *module; 847 }; 848 849 struct bpf_kfunc_desc_tab; 850 851 struct bpf_prog_aux { 852 atomic64_t refcnt; 853 u32 used_map_cnt; 854 u32 used_btf_cnt; 855 u32 max_ctx_offset; 856 u32 max_pkt_offset; 857 u32 max_tp_access; 858 u32 stack_depth; 859 u32 id; 860 u32 func_cnt; /* used by non-func prog as the number of func progs */ 861 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 862 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 863 u32 ctx_arg_info_size; 864 u32 max_rdonly_access; 865 u32 max_rdwr_access; 866 struct btf *attach_btf; 867 const struct bpf_ctx_arg_aux *ctx_arg_info; 868 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 869 struct bpf_prog *dst_prog; 870 struct bpf_trampoline *dst_trampoline; 871 enum bpf_prog_type saved_dst_prog_type; 872 enum bpf_attach_type saved_dst_attach_type; 873 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 874 bool offload_requested; 875 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 876 bool func_proto_unreliable; 877 bool sleepable; 878 bool tail_call_reachable; 879 struct hlist_node tramp_hlist; 880 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 881 const struct btf_type *attach_func_proto; 882 /* function name for valid attach_btf_id */ 883 const char *attach_func_name; 884 struct bpf_prog **func; 885 void *jit_data; /* JIT specific data. arch dependent */ 886 struct bpf_jit_poke_descriptor *poke_tab; 887 struct bpf_kfunc_desc_tab *kfunc_tab; 888 struct bpf_kfunc_btf_tab *kfunc_btf_tab; 889 u32 size_poke_tab; 890 struct bpf_ksym ksym; 891 const struct bpf_prog_ops *ops; 892 struct bpf_map **used_maps; 893 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 894 struct btf_mod_pair *used_btfs; 895 struct bpf_prog *prog; 896 struct user_struct *user; 897 u64 load_time; /* ns since boottime */ 898 u32 verified_insns; 899 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 900 char name[BPF_OBJ_NAME_LEN]; 901 #ifdef CONFIG_SECURITY 902 void *security; 903 #endif 904 struct bpf_prog_offload *offload; 905 struct btf *btf; 906 struct bpf_func_info *func_info; 907 struct bpf_func_info_aux *func_info_aux; 908 /* bpf_line_info loaded from userspace. linfo->insn_off 909 * has the xlated insn offset. 910 * Both the main and sub prog share the same linfo. 911 * The subprog can access its first linfo by 912 * using the linfo_idx. 913 */ 914 struct bpf_line_info *linfo; 915 /* jited_linfo is the jited addr of the linfo. It has a 916 * one to one mapping to linfo: 917 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 918 * Both the main and sub prog share the same jited_linfo. 919 * The subprog can access its first jited_linfo by 920 * using the linfo_idx. 921 */ 922 void **jited_linfo; 923 u32 func_info_cnt; 924 u32 nr_linfo; 925 /* subprog can use linfo_idx to access its first linfo and 926 * jited_linfo. 927 * main prog always has linfo_idx == 0 928 */ 929 u32 linfo_idx; 930 u32 num_exentries; 931 struct exception_table_entry *extable; 932 union { 933 struct work_struct work; 934 struct rcu_head rcu; 935 }; 936 }; 937 938 struct bpf_array_aux { 939 /* 'Ownership' of prog array is claimed by the first program that 940 * is going to use this map or by the first program which FD is 941 * stored in the map to make sure that all callers and callees have 942 * the same prog type and JITed flag. 943 */ 944 struct { 945 spinlock_t lock; 946 enum bpf_prog_type type; 947 bool jited; 948 } owner; 949 /* Programs with direct jumps into programs part of this array. */ 950 struct list_head poke_progs; 951 struct bpf_map *map; 952 struct mutex poke_mutex; 953 struct work_struct work; 954 }; 955 956 struct bpf_link { 957 atomic64_t refcnt; 958 u32 id; 959 enum bpf_link_type type; 960 const struct bpf_link_ops *ops; 961 struct bpf_prog *prog; 962 struct work_struct work; 963 }; 964 965 struct bpf_link_ops { 966 void (*release)(struct bpf_link *link); 967 void (*dealloc)(struct bpf_link *link); 968 int (*detach)(struct bpf_link *link); 969 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 970 struct bpf_prog *old_prog); 971 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 972 int (*fill_link_info)(const struct bpf_link *link, 973 struct bpf_link_info *info); 974 }; 975 976 struct bpf_link_primer { 977 struct bpf_link *link; 978 struct file *file; 979 int fd; 980 u32 id; 981 }; 982 983 struct bpf_struct_ops_value; 984 struct btf_member; 985 986 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 987 struct bpf_struct_ops { 988 const struct bpf_verifier_ops *verifier_ops; 989 int (*init)(struct btf *btf); 990 int (*check_member)(const struct btf_type *t, 991 const struct btf_member *member); 992 int (*init_member)(const struct btf_type *t, 993 const struct btf_member *member, 994 void *kdata, const void *udata); 995 int (*reg)(void *kdata); 996 void (*unreg)(void *kdata); 997 const struct btf_type *type; 998 const struct btf_type *value_type; 999 const char *name; 1000 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 1001 u32 type_id; 1002 u32 value_id; 1003 }; 1004 1005 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 1006 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 1007 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 1008 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 1009 bool bpf_struct_ops_get(const void *kdata); 1010 void bpf_struct_ops_put(const void *kdata); 1011 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 1012 void *value); 1013 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, 1014 struct bpf_prog *prog, 1015 const struct btf_func_model *model, 1016 void *image, void *image_end); 1017 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1018 { 1019 if (owner == BPF_MODULE_OWNER) 1020 return bpf_struct_ops_get(data); 1021 else 1022 return try_module_get(owner); 1023 } 1024 static inline void bpf_module_put(const void *data, struct module *owner) 1025 { 1026 if (owner == BPF_MODULE_OWNER) 1027 bpf_struct_ops_put(data); 1028 else 1029 module_put(owner); 1030 } 1031 1032 #ifdef CONFIG_NET 1033 /* Define it here to avoid the use of forward declaration */ 1034 struct bpf_dummy_ops_state { 1035 int val; 1036 }; 1037 1038 struct bpf_dummy_ops { 1039 int (*test_1)(struct bpf_dummy_ops_state *cb); 1040 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, 1041 char a3, unsigned long a4); 1042 }; 1043 1044 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, 1045 union bpf_attr __user *uattr); 1046 #endif 1047 #else 1048 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1049 { 1050 return NULL; 1051 } 1052 static inline void bpf_struct_ops_init(struct btf *btf, 1053 struct bpf_verifier_log *log) 1054 { 1055 } 1056 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1057 { 1058 return try_module_get(owner); 1059 } 1060 static inline void bpf_module_put(const void *data, struct module *owner) 1061 { 1062 module_put(owner); 1063 } 1064 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1065 void *key, 1066 void *value) 1067 { 1068 return -EINVAL; 1069 } 1070 #endif 1071 1072 struct bpf_array { 1073 struct bpf_map map; 1074 u32 elem_size; 1075 u32 index_mask; 1076 struct bpf_array_aux *aux; 1077 union { 1078 char value[0] __aligned(8); 1079 void *ptrs[0] __aligned(8); 1080 void __percpu *pptrs[0] __aligned(8); 1081 }; 1082 }; 1083 1084 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1085 #define MAX_TAIL_CALL_CNT 32 1086 1087 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1088 BPF_F_RDONLY_PROG | \ 1089 BPF_F_WRONLY | \ 1090 BPF_F_WRONLY_PROG) 1091 1092 #define BPF_MAP_CAN_READ BIT(0) 1093 #define BPF_MAP_CAN_WRITE BIT(1) 1094 1095 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1096 { 1097 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1098 1099 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1100 * not possible. 1101 */ 1102 if (access_flags & BPF_F_RDONLY_PROG) 1103 return BPF_MAP_CAN_READ; 1104 else if (access_flags & BPF_F_WRONLY_PROG) 1105 return BPF_MAP_CAN_WRITE; 1106 else 1107 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1108 } 1109 1110 static inline bool bpf_map_flags_access_ok(u32 access_flags) 1111 { 1112 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1113 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1114 } 1115 1116 struct bpf_event_entry { 1117 struct perf_event *event; 1118 struct file *perf_file; 1119 struct file *map_file; 1120 struct rcu_head rcu; 1121 }; 1122 1123 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 1124 int bpf_prog_calc_tag(struct bpf_prog *fp); 1125 1126 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1127 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); 1128 1129 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1130 unsigned long off, unsigned long len); 1131 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1132 const struct bpf_insn *src, 1133 struct bpf_insn *dst, 1134 struct bpf_prog *prog, 1135 u32 *target_size); 1136 1137 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1138 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1139 1140 /* an array of programs to be executed under rcu_lock. 1141 * 1142 * Typical usage: 1143 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); 1144 * 1145 * the structure returned by bpf_prog_array_alloc() should be populated 1146 * with program pointers and the last pointer must be NULL. 1147 * The user has to keep refcnt on the program and make sure the program 1148 * is removed from the array before bpf_prog_put(). 1149 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1150 * since other cpus are walking the array of pointers in parallel. 1151 */ 1152 struct bpf_prog_array_item { 1153 struct bpf_prog *prog; 1154 union { 1155 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1156 u64 bpf_cookie; 1157 }; 1158 }; 1159 1160 struct bpf_prog_array { 1161 struct rcu_head rcu; 1162 struct bpf_prog_array_item items[]; 1163 }; 1164 1165 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1166 void bpf_prog_array_free(struct bpf_prog_array *progs); 1167 int bpf_prog_array_length(struct bpf_prog_array *progs); 1168 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1169 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1170 __u32 __user *prog_ids, u32 cnt); 1171 1172 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1173 struct bpf_prog *old_prog); 1174 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1175 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1176 struct bpf_prog *prog); 1177 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1178 u32 *prog_ids, u32 request_cnt, 1179 u32 *prog_cnt); 1180 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1181 struct bpf_prog *exclude_prog, 1182 struct bpf_prog *include_prog, 1183 u64 bpf_cookie, 1184 struct bpf_prog_array **new_array); 1185 1186 struct bpf_run_ctx {}; 1187 1188 struct bpf_cg_run_ctx { 1189 struct bpf_run_ctx run_ctx; 1190 const struct bpf_prog_array_item *prog_item; 1191 }; 1192 1193 struct bpf_trace_run_ctx { 1194 struct bpf_run_ctx run_ctx; 1195 u64 bpf_cookie; 1196 }; 1197 1198 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1199 { 1200 struct bpf_run_ctx *old_ctx = NULL; 1201 1202 #ifdef CONFIG_BPF_SYSCALL 1203 old_ctx = current->bpf_ctx; 1204 current->bpf_ctx = new_ctx; 1205 #endif 1206 return old_ctx; 1207 } 1208 1209 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1210 { 1211 #ifdef CONFIG_BPF_SYSCALL 1212 current->bpf_ctx = old_ctx; 1213 #endif 1214 } 1215 1216 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1217 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1218 /* BPF program asks to set CN on the packet. */ 1219 #define BPF_RET_SET_CN (1 << 0) 1220 1221 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); 1222 1223 static __always_inline u32 1224 BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, 1225 const void *ctx, bpf_prog_run_fn run_prog, 1226 u32 *ret_flags) 1227 { 1228 const struct bpf_prog_array_item *item; 1229 const struct bpf_prog *prog; 1230 const struct bpf_prog_array *array; 1231 struct bpf_run_ctx *old_run_ctx; 1232 struct bpf_cg_run_ctx run_ctx; 1233 u32 ret = 1; 1234 u32 func_ret; 1235 1236 migrate_disable(); 1237 rcu_read_lock(); 1238 array = rcu_dereference(array_rcu); 1239 item = &array->items[0]; 1240 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1241 while ((prog = READ_ONCE(item->prog))) { 1242 run_ctx.prog_item = item; 1243 func_ret = run_prog(prog, ctx); 1244 ret &= (func_ret & 1); 1245 *(ret_flags) |= (func_ret >> 1); 1246 item++; 1247 } 1248 bpf_reset_run_ctx(old_run_ctx); 1249 rcu_read_unlock(); 1250 migrate_enable(); 1251 return ret; 1252 } 1253 1254 static __always_inline u32 1255 BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, 1256 const void *ctx, bpf_prog_run_fn run_prog) 1257 { 1258 const struct bpf_prog_array_item *item; 1259 const struct bpf_prog *prog; 1260 const struct bpf_prog_array *array; 1261 struct bpf_run_ctx *old_run_ctx; 1262 struct bpf_cg_run_ctx run_ctx; 1263 u32 ret = 1; 1264 1265 migrate_disable(); 1266 rcu_read_lock(); 1267 array = rcu_dereference(array_rcu); 1268 item = &array->items[0]; 1269 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1270 while ((prog = READ_ONCE(item->prog))) { 1271 run_ctx.prog_item = item; 1272 ret &= run_prog(prog, ctx); 1273 item++; 1274 } 1275 bpf_reset_run_ctx(old_run_ctx); 1276 rcu_read_unlock(); 1277 migrate_enable(); 1278 return ret; 1279 } 1280 1281 static __always_inline u32 1282 BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, 1283 const void *ctx, bpf_prog_run_fn run_prog) 1284 { 1285 const struct bpf_prog_array_item *item; 1286 const struct bpf_prog *prog; 1287 const struct bpf_prog_array *array; 1288 struct bpf_run_ctx *old_run_ctx; 1289 struct bpf_trace_run_ctx run_ctx; 1290 u32 ret = 1; 1291 1292 migrate_disable(); 1293 rcu_read_lock(); 1294 array = rcu_dereference(array_rcu); 1295 if (unlikely(!array)) 1296 goto out; 1297 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1298 item = &array->items[0]; 1299 while ((prog = READ_ONCE(item->prog))) { 1300 run_ctx.bpf_cookie = item->bpf_cookie; 1301 ret &= run_prog(prog, ctx); 1302 item++; 1303 } 1304 bpf_reset_run_ctx(old_run_ctx); 1305 out: 1306 rcu_read_unlock(); 1307 migrate_enable(); 1308 return ret; 1309 } 1310 1311 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1312 * so BPF programs can request cwr for TCP packets. 1313 * 1314 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1315 * packet. This macro changes the behavior so the low order bit 1316 * indicates whether the packet should be dropped (0) or not (1) 1317 * and the next bit is a congestion notification bit. This could be 1318 * used by TCP to call tcp_enter_cwr() 1319 * 1320 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1321 * 0: drop packet 1322 * 1: keep packet 1323 * 2: drop packet and cn 1324 * 3: keep packet and cn 1325 * 1326 * This macro then converts it to one of the NET_XMIT or an error 1327 * code that is then interpreted as drop packet (and no cn): 1328 * 0: NET_XMIT_SUCCESS skb should be transmitted 1329 * 1: NET_XMIT_DROP skb should be dropped and cn 1330 * 2: NET_XMIT_CN skb should be transmitted and cn 1331 * 3: -EPERM skb should be dropped 1332 */ 1333 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1334 ({ \ 1335 u32 _flags = 0; \ 1336 bool _cn; \ 1337 u32 _ret; \ 1338 _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ 1339 _cn = _flags & BPF_RET_SET_CN; \ 1340 if (_ret) \ 1341 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1342 else \ 1343 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1344 _ret; \ 1345 }) 1346 1347 #ifdef CONFIG_BPF_SYSCALL 1348 DECLARE_PER_CPU(int, bpf_prog_active); 1349 extern struct mutex bpf_stats_enabled_mutex; 1350 1351 /* 1352 * Block execution of BPF programs attached to instrumentation (perf, 1353 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1354 * these events can happen inside a region which holds a map bucket lock 1355 * and can deadlock on it. 1356 */ 1357 static inline void bpf_disable_instrumentation(void) 1358 { 1359 migrate_disable(); 1360 this_cpu_inc(bpf_prog_active); 1361 } 1362 1363 static inline void bpf_enable_instrumentation(void) 1364 { 1365 this_cpu_dec(bpf_prog_active); 1366 migrate_enable(); 1367 } 1368 1369 extern const struct file_operations bpf_map_fops; 1370 extern const struct file_operations bpf_prog_fops; 1371 extern const struct file_operations bpf_iter_fops; 1372 1373 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1374 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1375 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1376 #define BPF_MAP_TYPE(_id, _ops) \ 1377 extern const struct bpf_map_ops _ops; 1378 #define BPF_LINK_TYPE(_id, _name) 1379 #include <linux/bpf_types.h> 1380 #undef BPF_PROG_TYPE 1381 #undef BPF_MAP_TYPE 1382 #undef BPF_LINK_TYPE 1383 1384 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1385 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1386 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1387 1388 struct bpf_prog *bpf_prog_get(u32 ufd); 1389 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1390 bool attach_drv); 1391 void bpf_prog_add(struct bpf_prog *prog, int i); 1392 void bpf_prog_sub(struct bpf_prog *prog, int i); 1393 void bpf_prog_inc(struct bpf_prog *prog); 1394 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1395 void bpf_prog_put(struct bpf_prog *prog); 1396 1397 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1398 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1399 1400 struct bpf_map *bpf_map_get(u32 ufd); 1401 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1402 struct bpf_map *__bpf_map_get(struct fd f); 1403 void bpf_map_inc(struct bpf_map *map); 1404 void bpf_map_inc_with_uref(struct bpf_map *map); 1405 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1406 void bpf_map_put_with_uref(struct bpf_map *map); 1407 void bpf_map_put(struct bpf_map *map); 1408 void *bpf_map_area_alloc(u64 size, int numa_node); 1409 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1410 void bpf_map_area_free(void *base); 1411 bool bpf_map_write_active(const struct bpf_map *map); 1412 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1413 int generic_map_lookup_batch(struct bpf_map *map, 1414 const union bpf_attr *attr, 1415 union bpf_attr __user *uattr); 1416 int generic_map_update_batch(struct bpf_map *map, 1417 const union bpf_attr *attr, 1418 union bpf_attr __user *uattr); 1419 int generic_map_delete_batch(struct bpf_map *map, 1420 const union bpf_attr *attr, 1421 union bpf_attr __user *uattr); 1422 struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1423 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1424 1425 #ifdef CONFIG_MEMCG_KMEM 1426 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1427 int node); 1428 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1429 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1430 size_t align, gfp_t flags); 1431 #else 1432 static inline void * 1433 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1434 int node) 1435 { 1436 return kmalloc_node(size, flags, node); 1437 } 1438 1439 static inline void * 1440 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1441 { 1442 return kzalloc(size, flags); 1443 } 1444 1445 static inline void __percpu * 1446 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1447 gfp_t flags) 1448 { 1449 return __alloc_percpu_gfp(size, align, flags); 1450 } 1451 #endif 1452 1453 extern int sysctl_unprivileged_bpf_disabled; 1454 1455 static inline bool bpf_allow_ptr_leaks(void) 1456 { 1457 return perfmon_capable(); 1458 } 1459 1460 static inline bool bpf_allow_uninit_stack(void) 1461 { 1462 return perfmon_capable(); 1463 } 1464 1465 static inline bool bpf_allow_ptr_to_map_access(void) 1466 { 1467 return perfmon_capable(); 1468 } 1469 1470 static inline bool bpf_bypass_spec_v1(void) 1471 { 1472 return perfmon_capable(); 1473 } 1474 1475 static inline bool bpf_bypass_spec_v4(void) 1476 { 1477 return perfmon_capable(); 1478 } 1479 1480 int bpf_map_new_fd(struct bpf_map *map, int flags); 1481 int bpf_prog_new_fd(struct bpf_prog *prog); 1482 1483 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1484 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1485 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1486 int bpf_link_settle(struct bpf_link_primer *primer); 1487 void bpf_link_cleanup(struct bpf_link_primer *primer); 1488 void bpf_link_inc(struct bpf_link *link); 1489 void bpf_link_put(struct bpf_link *link); 1490 int bpf_link_new_fd(struct bpf_link *link); 1491 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1492 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1493 1494 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1495 int bpf_obj_get_user(const char __user *pathname, int flags); 1496 1497 #define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1498 #define DEFINE_BPF_ITER_FUNC(target, args...) \ 1499 extern int bpf_iter_ ## target(args); \ 1500 int __init bpf_iter_ ## target(args) { return 0; } 1501 1502 struct bpf_iter_aux_info { 1503 struct bpf_map *map; 1504 }; 1505 1506 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1507 union bpf_iter_link_info *linfo, 1508 struct bpf_iter_aux_info *aux); 1509 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1510 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1511 struct seq_file *seq); 1512 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1513 struct bpf_link_info *info); 1514 typedef const struct bpf_func_proto * 1515 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 1516 const struct bpf_prog *prog); 1517 1518 enum bpf_iter_feature { 1519 BPF_ITER_RESCHED = BIT(0), 1520 }; 1521 1522 #define BPF_ITER_CTX_ARG_MAX 2 1523 struct bpf_iter_reg { 1524 const char *target; 1525 bpf_iter_attach_target_t attach_target; 1526 bpf_iter_detach_target_t detach_target; 1527 bpf_iter_show_fdinfo_t show_fdinfo; 1528 bpf_iter_fill_link_info_t fill_link_info; 1529 bpf_iter_get_func_proto_t get_func_proto; 1530 u32 ctx_arg_info_size; 1531 u32 feature; 1532 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1533 const struct bpf_iter_seq_info *seq_info; 1534 }; 1535 1536 struct bpf_iter_meta { 1537 __bpf_md_ptr(struct seq_file *, seq); 1538 u64 session_id; 1539 u64 seq_num; 1540 }; 1541 1542 struct bpf_iter__bpf_map_elem { 1543 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1544 __bpf_md_ptr(struct bpf_map *, map); 1545 __bpf_md_ptr(void *, key); 1546 __bpf_md_ptr(void *, value); 1547 }; 1548 1549 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1550 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1551 bool bpf_iter_prog_supported(struct bpf_prog *prog); 1552 const struct bpf_func_proto * 1553 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1554 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 1555 int bpf_iter_new_fd(struct bpf_link *link); 1556 bool bpf_link_is_iter(struct bpf_link *link); 1557 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1558 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1559 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1560 struct seq_file *seq); 1561 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1562 struct bpf_link_info *info); 1563 1564 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 1565 struct bpf_func_state *caller, 1566 struct bpf_func_state *callee); 1567 1568 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1569 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1570 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1571 u64 flags); 1572 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1573 u64 flags); 1574 1575 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1576 1577 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1578 void *key, void *value, u64 map_flags); 1579 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1580 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1581 void *key, void *value, u64 map_flags); 1582 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1583 1584 int bpf_get_file_flag(int flags); 1585 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 1586 size_t actual_size); 1587 1588 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1589 * forced to use 'long' read/writes to try to atomically copy long counters. 1590 * Best-effort only. No barriers here, since it _will_ race with concurrent 1591 * updates from BPF programs. Called from bpf syscall and mostly used with 1592 * size 8 or 16 bytes, so ask compiler to inline it. 1593 */ 1594 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1595 { 1596 const long *lsrc = src; 1597 long *ldst = dst; 1598 1599 size /= sizeof(long); 1600 while (size--) 1601 *ldst++ = *lsrc++; 1602 } 1603 1604 /* verify correctness of eBPF program */ 1605 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 1606 1607 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1608 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1609 #endif 1610 1611 struct btf *bpf_get_btf_vmlinux(void); 1612 1613 /* Map specifics */ 1614 struct xdp_buff; 1615 struct sk_buff; 1616 struct bpf_dtab_netdev; 1617 struct bpf_cpu_map_entry; 1618 1619 void __dev_flush(void); 1620 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1621 struct net_device *dev_rx); 1622 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1623 struct net_device *dev_rx); 1624 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1625 struct bpf_map *map, bool exclude_ingress); 1626 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1627 struct bpf_prog *xdp_prog); 1628 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1629 struct bpf_prog *xdp_prog, struct bpf_map *map, 1630 bool exclude_ingress); 1631 1632 void __cpu_map_flush(void); 1633 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1634 struct net_device *dev_rx); 1635 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1636 struct sk_buff *skb); 1637 1638 /* Return map's numa specified by userspace */ 1639 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1640 { 1641 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1642 attr->numa_node : NUMA_NO_NODE; 1643 } 1644 1645 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1646 int array_map_alloc_check(union bpf_attr *attr); 1647 1648 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1649 union bpf_attr __user *uattr); 1650 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1651 union bpf_attr __user *uattr); 1652 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1653 const union bpf_attr *kattr, 1654 union bpf_attr __user *uattr); 1655 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1656 const union bpf_attr *kattr, 1657 union bpf_attr __user *uattr); 1658 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 1659 const union bpf_attr *kattr, 1660 union bpf_attr __user *uattr); 1661 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1662 const union bpf_attr *kattr, 1663 union bpf_attr __user *uattr); 1664 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner); 1665 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1666 const struct bpf_prog *prog, 1667 struct bpf_insn_access_aux *info); 1668 1669 static inline bool bpf_tracing_ctx_access(int off, int size, 1670 enum bpf_access_type type) 1671 { 1672 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1673 return false; 1674 if (type != BPF_READ) 1675 return false; 1676 if (off % size != 0) 1677 return false; 1678 return true; 1679 } 1680 1681 static inline bool bpf_tracing_btf_ctx_access(int off, int size, 1682 enum bpf_access_type type, 1683 const struct bpf_prog *prog, 1684 struct bpf_insn_access_aux *info) 1685 { 1686 if (!bpf_tracing_ctx_access(off, size, type)) 1687 return false; 1688 return btf_ctx_access(off, size, type, prog, info); 1689 } 1690 1691 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 1692 const struct btf_type *t, int off, int size, 1693 enum bpf_access_type atype, 1694 u32 *next_btf_id); 1695 bool btf_struct_ids_match(struct bpf_verifier_log *log, 1696 const struct btf *btf, u32 id, int off, 1697 const struct btf *need_btf, u32 need_type_id); 1698 1699 int btf_distill_func_proto(struct bpf_verifier_log *log, 1700 struct btf *btf, 1701 const struct btf_type *func_proto, 1702 const char *func_name, 1703 struct btf_func_model *m); 1704 1705 struct bpf_reg_state; 1706 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 1707 struct bpf_reg_state *regs); 1708 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 1709 const struct btf *btf, u32 func_id, 1710 struct bpf_reg_state *regs); 1711 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1712 struct bpf_reg_state *reg); 1713 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 1714 struct btf *btf, const struct btf_type *t); 1715 1716 struct bpf_prog *bpf_prog_by_id(u32 id); 1717 struct bpf_link *bpf_link_by_id(u32 id); 1718 1719 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1720 void bpf_task_storage_free(struct task_struct *task); 1721 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 1722 const struct btf_func_model * 1723 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1724 const struct bpf_insn *insn); 1725 #else /* !CONFIG_BPF_SYSCALL */ 1726 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1727 { 1728 return ERR_PTR(-EOPNOTSUPP); 1729 } 1730 1731 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1732 enum bpf_prog_type type, 1733 bool attach_drv) 1734 { 1735 return ERR_PTR(-EOPNOTSUPP); 1736 } 1737 1738 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1739 { 1740 } 1741 1742 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1743 { 1744 } 1745 1746 static inline void bpf_prog_put(struct bpf_prog *prog) 1747 { 1748 } 1749 1750 static inline void bpf_prog_inc(struct bpf_prog *prog) 1751 { 1752 } 1753 1754 static inline struct bpf_prog *__must_check 1755 bpf_prog_inc_not_zero(struct bpf_prog *prog) 1756 { 1757 return ERR_PTR(-EOPNOTSUPP); 1758 } 1759 1760 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1761 const struct bpf_link_ops *ops, 1762 struct bpf_prog *prog) 1763 { 1764 } 1765 1766 static inline int bpf_link_prime(struct bpf_link *link, 1767 struct bpf_link_primer *primer) 1768 { 1769 return -EOPNOTSUPP; 1770 } 1771 1772 static inline int bpf_link_settle(struct bpf_link_primer *primer) 1773 { 1774 return -EOPNOTSUPP; 1775 } 1776 1777 static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1778 { 1779 } 1780 1781 static inline void bpf_link_inc(struct bpf_link *link) 1782 { 1783 } 1784 1785 static inline void bpf_link_put(struct bpf_link *link) 1786 { 1787 } 1788 1789 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1790 { 1791 return -EOPNOTSUPP; 1792 } 1793 1794 static inline bool dev_map_can_have_prog(struct bpf_map *map) 1795 { 1796 return false; 1797 } 1798 1799 static inline void __dev_flush(void) 1800 { 1801 } 1802 1803 struct xdp_buff; 1804 struct bpf_dtab_netdev; 1805 struct bpf_cpu_map_entry; 1806 1807 static inline 1808 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1809 struct net_device *dev_rx) 1810 { 1811 return 0; 1812 } 1813 1814 static inline 1815 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1816 struct net_device *dev_rx) 1817 { 1818 return 0; 1819 } 1820 1821 static inline 1822 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1823 struct bpf_map *map, bool exclude_ingress) 1824 { 1825 return 0; 1826 } 1827 1828 struct sk_buff; 1829 1830 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1831 struct sk_buff *skb, 1832 struct bpf_prog *xdp_prog) 1833 { 1834 return 0; 1835 } 1836 1837 static inline 1838 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1839 struct bpf_prog *xdp_prog, struct bpf_map *map, 1840 bool exclude_ingress) 1841 { 1842 return 0; 1843 } 1844 1845 static inline void __cpu_map_flush(void) 1846 { 1847 } 1848 1849 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1850 struct xdp_buff *xdp, 1851 struct net_device *dev_rx) 1852 { 1853 return 0; 1854 } 1855 1856 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1857 struct sk_buff *skb) 1858 { 1859 return -EOPNOTSUPP; 1860 } 1861 1862 static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1863 { 1864 return false; 1865 } 1866 1867 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1868 enum bpf_prog_type type) 1869 { 1870 return ERR_PTR(-EOPNOTSUPP); 1871 } 1872 1873 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1874 const union bpf_attr *kattr, 1875 union bpf_attr __user *uattr) 1876 { 1877 return -ENOTSUPP; 1878 } 1879 1880 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1881 const union bpf_attr *kattr, 1882 union bpf_attr __user *uattr) 1883 { 1884 return -ENOTSUPP; 1885 } 1886 1887 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1888 const union bpf_attr *kattr, 1889 union bpf_attr __user *uattr) 1890 { 1891 return -ENOTSUPP; 1892 } 1893 1894 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1895 const union bpf_attr *kattr, 1896 union bpf_attr __user *uattr) 1897 { 1898 return -ENOTSUPP; 1899 } 1900 1901 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1902 const union bpf_attr *kattr, 1903 union bpf_attr __user *uattr) 1904 { 1905 return -ENOTSUPP; 1906 } 1907 1908 static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, 1909 struct module *owner) 1910 { 1911 return false; 1912 } 1913 1914 static inline void bpf_map_put(struct bpf_map *map) 1915 { 1916 } 1917 1918 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1919 { 1920 return ERR_PTR(-ENOTSUPP); 1921 } 1922 1923 static inline const struct bpf_func_proto * 1924 bpf_base_func_proto(enum bpf_func_id func_id) 1925 { 1926 return NULL; 1927 } 1928 1929 static inline void bpf_task_storage_free(struct task_struct *task) 1930 { 1931 } 1932 1933 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 1934 { 1935 return false; 1936 } 1937 1938 static inline const struct btf_func_model * 1939 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1940 const struct bpf_insn *insn) 1941 { 1942 return NULL; 1943 } 1944 #endif /* CONFIG_BPF_SYSCALL */ 1945 1946 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 1947 struct btf_mod_pair *used_btfs, u32 len); 1948 1949 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1950 enum bpf_prog_type type) 1951 { 1952 return bpf_prog_get_type_dev(ufd, type, false); 1953 } 1954 1955 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1956 struct bpf_map **used_maps, u32 len); 1957 1958 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1959 1960 int bpf_prog_offload_compile(struct bpf_prog *prog); 1961 void bpf_prog_offload_destroy(struct bpf_prog *prog); 1962 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1963 struct bpf_prog *prog); 1964 1965 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1966 1967 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1968 int bpf_map_offload_update_elem(struct bpf_map *map, 1969 void *key, void *value, u64 flags); 1970 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1971 int bpf_map_offload_get_next_key(struct bpf_map *map, 1972 void *key, void *next_key); 1973 1974 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1975 1976 struct bpf_offload_dev * 1977 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1978 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1979 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1980 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1981 struct net_device *netdev); 1982 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1983 struct net_device *netdev); 1984 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1985 1986 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1987 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1988 1989 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1990 { 1991 return aux->offload_requested; 1992 } 1993 1994 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1995 { 1996 return unlikely(map->ops == &bpf_map_offload_ops); 1997 } 1998 1999 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 2000 void bpf_map_offload_map_free(struct bpf_map *map); 2001 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2002 const union bpf_attr *kattr, 2003 union bpf_attr __user *uattr); 2004 2005 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 2006 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 2007 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 2008 void sock_map_unhash(struct sock *sk); 2009 void sock_map_close(struct sock *sk, long timeout); 2010 #else 2011 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 2012 union bpf_attr *attr) 2013 { 2014 return -EOPNOTSUPP; 2015 } 2016 2017 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 2018 { 2019 return false; 2020 } 2021 2022 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 2023 { 2024 return false; 2025 } 2026 2027 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 2028 { 2029 return ERR_PTR(-EOPNOTSUPP); 2030 } 2031 2032 static inline void bpf_map_offload_map_free(struct bpf_map *map) 2033 { 2034 } 2035 2036 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2037 const union bpf_attr *kattr, 2038 union bpf_attr __user *uattr) 2039 { 2040 return -ENOTSUPP; 2041 } 2042 2043 #ifdef CONFIG_BPF_SYSCALL 2044 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 2045 struct bpf_prog *prog) 2046 { 2047 return -EINVAL; 2048 } 2049 2050 static inline int sock_map_prog_detach(const union bpf_attr *attr, 2051 enum bpf_prog_type ptype) 2052 { 2053 return -EOPNOTSUPP; 2054 } 2055 2056 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 2057 u64 flags) 2058 { 2059 return -EOPNOTSUPP; 2060 } 2061 #endif /* CONFIG_BPF_SYSCALL */ 2062 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 2063 2064 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 2065 void bpf_sk_reuseport_detach(struct sock *sk); 2066 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 2067 void *value); 2068 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 2069 void *value, u64 map_flags); 2070 #else 2071 static inline void bpf_sk_reuseport_detach(struct sock *sk) 2072 { 2073 } 2074 2075 #ifdef CONFIG_BPF_SYSCALL 2076 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 2077 void *key, void *value) 2078 { 2079 return -EOPNOTSUPP; 2080 } 2081 2082 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 2083 void *key, void *value, 2084 u64 map_flags) 2085 { 2086 return -EOPNOTSUPP; 2087 } 2088 #endif /* CONFIG_BPF_SYSCALL */ 2089 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 2090 2091 /* verifier prototypes for helper functions called from eBPF programs */ 2092 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 2093 extern const struct bpf_func_proto bpf_map_update_elem_proto; 2094 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 2095 extern const struct bpf_func_proto bpf_map_push_elem_proto; 2096 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 2097 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 2098 2099 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 2100 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 2101 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 2102 extern const struct bpf_func_proto bpf_tail_call_proto; 2103 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 2104 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 2105 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 2106 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 2107 extern const struct bpf_func_proto bpf_get_current_comm_proto; 2108 extern const struct bpf_func_proto bpf_get_stackid_proto; 2109 extern const struct bpf_func_proto bpf_get_stack_proto; 2110 extern const struct bpf_func_proto bpf_get_task_stack_proto; 2111 extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 2112 extern const struct bpf_func_proto bpf_get_stack_proto_pe; 2113 extern const struct bpf_func_proto bpf_sock_map_update_proto; 2114 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 2115 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 2116 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 2117 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 2118 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 2119 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 2120 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 2121 extern const struct bpf_func_proto bpf_spin_lock_proto; 2122 extern const struct bpf_func_proto bpf_spin_unlock_proto; 2123 extern const struct bpf_func_proto bpf_get_local_storage_proto; 2124 extern const struct bpf_func_proto bpf_strtol_proto; 2125 extern const struct bpf_func_proto bpf_strtoul_proto; 2126 extern const struct bpf_func_proto bpf_tcp_sock_proto; 2127 extern const struct bpf_func_proto bpf_jiffies64_proto; 2128 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 2129 extern const struct bpf_func_proto bpf_event_output_data_proto; 2130 extern const struct bpf_func_proto bpf_ringbuf_output_proto; 2131 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 2132 extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 2133 extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 2134 extern const struct bpf_func_proto bpf_ringbuf_query_proto; 2135 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 2136 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 2137 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 2138 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 2139 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 2140 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; 2141 extern const struct bpf_func_proto bpf_copy_from_user_proto; 2142 extern const struct bpf_func_proto bpf_snprintf_btf_proto; 2143 extern const struct bpf_func_proto bpf_snprintf_proto; 2144 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 2145 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 2146 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 2147 extern const struct bpf_func_proto bpf_sock_from_file_proto; 2148 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 2149 extern const struct bpf_func_proto bpf_task_storage_get_proto; 2150 extern const struct bpf_func_proto bpf_task_storage_delete_proto; 2151 extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2152 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2153 extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 2154 extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 2155 extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; 2156 2157 const struct bpf_func_proto *tracing_prog_func_proto( 2158 enum bpf_func_id func_id, const struct bpf_prog *prog); 2159 2160 /* Shared helpers among cBPF and eBPF. */ 2161 void bpf_user_rnd_init_once(void); 2162 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2163 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2164 2165 #if defined(CONFIG_NET) 2166 bool bpf_sock_common_is_valid_access(int off, int size, 2167 enum bpf_access_type type, 2168 struct bpf_insn_access_aux *info); 2169 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2170 struct bpf_insn_access_aux *info); 2171 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2172 const struct bpf_insn *si, 2173 struct bpf_insn *insn_buf, 2174 struct bpf_prog *prog, 2175 u32 *target_size); 2176 #else 2177 static inline bool bpf_sock_common_is_valid_access(int off, int size, 2178 enum bpf_access_type type, 2179 struct bpf_insn_access_aux *info) 2180 { 2181 return false; 2182 } 2183 static inline bool bpf_sock_is_valid_access(int off, int size, 2184 enum bpf_access_type type, 2185 struct bpf_insn_access_aux *info) 2186 { 2187 return false; 2188 } 2189 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2190 const struct bpf_insn *si, 2191 struct bpf_insn *insn_buf, 2192 struct bpf_prog *prog, 2193 u32 *target_size) 2194 { 2195 return 0; 2196 } 2197 #endif 2198 2199 #ifdef CONFIG_INET 2200 struct sk_reuseport_kern { 2201 struct sk_buff *skb; 2202 struct sock *sk; 2203 struct sock *selected_sk; 2204 struct sock *migrating_sk; 2205 void *data_end; 2206 u32 hash; 2207 u32 reuseport_id; 2208 bool bind_inany; 2209 }; 2210 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2211 struct bpf_insn_access_aux *info); 2212 2213 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2214 const struct bpf_insn *si, 2215 struct bpf_insn *insn_buf, 2216 struct bpf_prog *prog, 2217 u32 *target_size); 2218 2219 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2220 struct bpf_insn_access_aux *info); 2221 2222 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2223 const struct bpf_insn *si, 2224 struct bpf_insn *insn_buf, 2225 struct bpf_prog *prog, 2226 u32 *target_size); 2227 #else 2228 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2229 enum bpf_access_type type, 2230 struct bpf_insn_access_aux *info) 2231 { 2232 return false; 2233 } 2234 2235 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2236 const struct bpf_insn *si, 2237 struct bpf_insn *insn_buf, 2238 struct bpf_prog *prog, 2239 u32 *target_size) 2240 { 2241 return 0; 2242 } 2243 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2244 enum bpf_access_type type, 2245 struct bpf_insn_access_aux *info) 2246 { 2247 return false; 2248 } 2249 2250 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2251 const struct bpf_insn *si, 2252 struct bpf_insn *insn_buf, 2253 struct bpf_prog *prog, 2254 u32 *target_size) 2255 { 2256 return 0; 2257 } 2258 #endif /* CONFIG_INET */ 2259 2260 enum bpf_text_poke_type { 2261 BPF_MOD_CALL, 2262 BPF_MOD_JUMP, 2263 }; 2264 2265 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2266 void *addr1, void *addr2); 2267 2268 struct btf_id_set; 2269 bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2270 2271 #define MAX_BPRINTF_VARARGS 12 2272 2273 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2274 u32 **bin_buf, u32 num_args); 2275 void bpf_bprintf_cleanup(void); 2276 2277 #endif /* _LINUX_BPF_H */ 2278