1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/wait.h> 16 #include <linux/u64_stats_sync.h> 17 18 struct bpf_verifier_env; 19 struct perf_event; 20 struct bpf_prog; 21 struct bpf_map; 22 struct sock; 23 struct seq_file; 24 struct btf; 25 struct btf_type; 26 27 /* map is generic key/value storage optionally accesible by eBPF programs */ 28 struct bpf_map_ops { 29 /* funcs callable from userspace (via syscall) */ 30 int (*map_alloc_check)(union bpf_attr *attr); 31 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 32 void (*map_release)(struct bpf_map *map, struct file *map_file); 33 void (*map_free)(struct bpf_map *map); 34 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 35 void (*map_release_uref)(struct bpf_map *map); 36 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 37 38 /* funcs callable from userspace and from eBPF programs */ 39 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 40 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 41 int (*map_delete_elem)(struct bpf_map *map, void *key); 42 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 43 int (*map_pop_elem)(struct bpf_map *map, void *value); 44 int (*map_peek_elem)(struct bpf_map *map, void *value); 45 46 /* funcs called by prog_array and perf_event_array map */ 47 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 48 int fd); 49 void (*map_fd_put_ptr)(void *ptr); 50 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 51 u32 (*map_fd_sys_lookup_elem)(void *ptr); 52 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 53 struct seq_file *m); 54 int (*map_check_btf)(const struct bpf_map *map, 55 const struct btf *btf, 56 const struct btf_type *key_type, 57 const struct btf_type *value_type); 58 59 /* Direct value access helpers. */ 60 int (*map_direct_value_addr)(const struct bpf_map *map, 61 u64 *imm, u32 off); 62 int (*map_direct_value_meta)(const struct bpf_map *map, 63 u64 imm, u32 *off); 64 }; 65 66 struct bpf_map { 67 /* The first two cachelines with read-mostly members of which some 68 * are also accessed in fast-path (e.g. ops, max_entries). 69 */ 70 const struct bpf_map_ops *ops ____cacheline_aligned; 71 struct bpf_map *inner_map_meta; 72 #ifdef CONFIG_SECURITY 73 void *security; 74 #endif 75 enum bpf_map_type map_type; 76 u32 key_size; 77 u32 value_size; 78 u32 max_entries; 79 u32 map_flags; 80 int spin_lock_off; /* >=0 valid offset, <0 error */ 81 u32 id; 82 int numa_node; 83 u32 btf_key_type_id; 84 u32 btf_value_type_id; 85 struct btf *btf; 86 u32 pages; 87 bool unpriv_array; 88 bool frozen; /* write-once */ 89 /* 48 bytes hole */ 90 91 /* The 3rd and 4th cacheline with misc members to avoid false sharing 92 * particularly with refcounting. 93 */ 94 struct user_struct *user ____cacheline_aligned; 95 atomic_t refcnt; 96 atomic_t usercnt; 97 struct work_struct work; 98 char name[BPF_OBJ_NAME_LEN]; 99 }; 100 101 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 102 { 103 return map->spin_lock_off >= 0; 104 } 105 106 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 107 { 108 if (likely(!map_value_has_spin_lock(map))) 109 return; 110 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 111 (struct bpf_spin_lock){}; 112 } 113 114 /* copy everything but bpf_spin_lock */ 115 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 116 { 117 if (unlikely(map_value_has_spin_lock(map))) { 118 u32 off = map->spin_lock_off; 119 120 memcpy(dst, src, off); 121 memcpy(dst + off + sizeof(struct bpf_spin_lock), 122 src + off + sizeof(struct bpf_spin_lock), 123 map->value_size - off - sizeof(struct bpf_spin_lock)); 124 } else { 125 memcpy(dst, src, map->value_size); 126 } 127 } 128 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 129 bool lock_src); 130 131 struct bpf_offload_dev; 132 struct bpf_offloaded_map; 133 134 struct bpf_map_dev_ops { 135 int (*map_get_next_key)(struct bpf_offloaded_map *map, 136 void *key, void *next_key); 137 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 138 void *key, void *value); 139 int (*map_update_elem)(struct bpf_offloaded_map *map, 140 void *key, void *value, u64 flags); 141 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 142 }; 143 144 struct bpf_offloaded_map { 145 struct bpf_map map; 146 struct net_device *netdev; 147 const struct bpf_map_dev_ops *dev_ops; 148 void *dev_priv; 149 struct list_head offloads; 150 }; 151 152 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 153 { 154 return container_of(map, struct bpf_offloaded_map, map); 155 } 156 157 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 158 { 159 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 160 } 161 162 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 163 { 164 return map->btf && map->ops->map_seq_show_elem; 165 } 166 167 int map_check_no_btf(const struct bpf_map *map, 168 const struct btf *btf, 169 const struct btf_type *key_type, 170 const struct btf_type *value_type); 171 172 extern const struct bpf_map_ops bpf_map_offload_ops; 173 174 /* function argument constraints */ 175 enum bpf_arg_type { 176 ARG_DONTCARE = 0, /* unused argument in helper function */ 177 178 /* the following constraints used to prototype 179 * bpf_map_lookup/update/delete_elem() functions 180 */ 181 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 182 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 183 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 184 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 185 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 186 187 /* the following constraints used to prototype bpf_memcmp() and other 188 * functions that access data on eBPF program stack 189 */ 190 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 191 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 192 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 193 * helper function must fill all bytes or clear 194 * them in error case. 195 */ 196 197 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 198 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 199 200 ARG_PTR_TO_CTX, /* pointer to context */ 201 ARG_ANYTHING, /* any (initialized) argument is ok */ 202 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 203 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 204 ARG_PTR_TO_INT, /* pointer to int */ 205 ARG_PTR_TO_LONG, /* pointer to long */ 206 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 207 }; 208 209 /* type of values returned from helper functions */ 210 enum bpf_return_type { 211 RET_INTEGER, /* function returns integer */ 212 RET_VOID, /* function doesn't return anything */ 213 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 214 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 215 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 216 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 217 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 218 }; 219 220 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 221 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 222 * instructions after verifying 223 */ 224 struct bpf_func_proto { 225 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 226 bool gpl_only; 227 bool pkt_access; 228 enum bpf_return_type ret_type; 229 enum bpf_arg_type arg1_type; 230 enum bpf_arg_type arg2_type; 231 enum bpf_arg_type arg3_type; 232 enum bpf_arg_type arg4_type; 233 enum bpf_arg_type arg5_type; 234 }; 235 236 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 237 * the first argument to eBPF programs. 238 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 239 */ 240 struct bpf_context; 241 242 enum bpf_access_type { 243 BPF_READ = 1, 244 BPF_WRITE = 2 245 }; 246 247 /* types of values stored in eBPF registers */ 248 /* Pointer types represent: 249 * pointer 250 * pointer + imm 251 * pointer + (u16) var 252 * pointer + (u16) var + imm 253 * if (range > 0) then [ptr, ptr + range - off) is safe to access 254 * if (id > 0) means that some 'var' was added 255 * if (off > 0) means that 'imm' was added 256 */ 257 enum bpf_reg_type { 258 NOT_INIT = 0, /* nothing was written into register */ 259 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 260 PTR_TO_CTX, /* reg points to bpf_context */ 261 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 262 PTR_TO_MAP_VALUE, /* reg points to map element value */ 263 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 264 PTR_TO_STACK, /* reg == frame_pointer + offset */ 265 PTR_TO_PACKET_META, /* skb->data - meta_len */ 266 PTR_TO_PACKET, /* reg points to skb->data */ 267 PTR_TO_PACKET_END, /* skb->data + headlen */ 268 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 269 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 270 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 271 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 272 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 273 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 274 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 275 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 276 }; 277 278 /* The information passed from prog-specific *_is_valid_access 279 * back to the verifier. 280 */ 281 struct bpf_insn_access_aux { 282 enum bpf_reg_type reg_type; 283 int ctx_field_size; 284 }; 285 286 static inline void 287 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 288 { 289 aux->ctx_field_size = size; 290 } 291 292 struct bpf_prog_ops { 293 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 294 union bpf_attr __user *uattr); 295 }; 296 297 struct bpf_verifier_ops { 298 /* return eBPF function prototype for verification */ 299 const struct bpf_func_proto * 300 (*get_func_proto)(enum bpf_func_id func_id, 301 const struct bpf_prog *prog); 302 303 /* return true if 'size' wide access at offset 'off' within bpf_context 304 * with 'type' (read or write) is allowed 305 */ 306 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 307 const struct bpf_prog *prog, 308 struct bpf_insn_access_aux *info); 309 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 310 const struct bpf_prog *prog); 311 int (*gen_ld_abs)(const struct bpf_insn *orig, 312 struct bpf_insn *insn_buf); 313 u32 (*convert_ctx_access)(enum bpf_access_type type, 314 const struct bpf_insn *src, 315 struct bpf_insn *dst, 316 struct bpf_prog *prog, u32 *target_size); 317 }; 318 319 struct bpf_prog_offload_ops { 320 /* verifier basic callbacks */ 321 int (*insn_hook)(struct bpf_verifier_env *env, 322 int insn_idx, int prev_insn_idx); 323 int (*finalize)(struct bpf_verifier_env *env); 324 /* verifier optimization callbacks (called after .finalize) */ 325 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 326 struct bpf_insn *insn); 327 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 328 /* program management callbacks */ 329 int (*prepare)(struct bpf_prog *prog); 330 int (*translate)(struct bpf_prog *prog); 331 void (*destroy)(struct bpf_prog *prog); 332 }; 333 334 struct bpf_prog_offload { 335 struct bpf_prog *prog; 336 struct net_device *netdev; 337 struct bpf_offload_dev *offdev; 338 void *dev_priv; 339 struct list_head offloads; 340 bool dev_state; 341 bool opt_failed; 342 void *jited_image; 343 u32 jited_len; 344 }; 345 346 enum bpf_cgroup_storage_type { 347 BPF_CGROUP_STORAGE_SHARED, 348 BPF_CGROUP_STORAGE_PERCPU, 349 __BPF_CGROUP_STORAGE_MAX 350 }; 351 352 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 353 354 struct bpf_prog_stats { 355 u64 cnt; 356 u64 nsecs; 357 struct u64_stats_sync syncp; 358 }; 359 360 struct bpf_prog_aux { 361 atomic_t refcnt; 362 u32 used_map_cnt; 363 u32 max_ctx_offset; 364 u32 max_pkt_offset; 365 u32 max_tp_access; 366 u32 stack_depth; 367 u32 id; 368 u32 func_cnt; /* used by non-func prog as the number of func progs */ 369 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 370 bool offload_requested; 371 struct bpf_prog **func; 372 void *jit_data; /* JIT specific data. arch dependent */ 373 struct latch_tree_node ksym_tnode; 374 struct list_head ksym_lnode; 375 const struct bpf_prog_ops *ops; 376 struct bpf_map **used_maps; 377 struct bpf_prog *prog; 378 struct user_struct *user; 379 u64 load_time; /* ns since boottime */ 380 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 381 char name[BPF_OBJ_NAME_LEN]; 382 #ifdef CONFIG_SECURITY 383 void *security; 384 #endif 385 struct bpf_prog_offload *offload; 386 struct btf *btf; 387 struct bpf_func_info *func_info; 388 /* bpf_line_info loaded from userspace. linfo->insn_off 389 * has the xlated insn offset. 390 * Both the main and sub prog share the same linfo. 391 * The subprog can access its first linfo by 392 * using the linfo_idx. 393 */ 394 struct bpf_line_info *linfo; 395 /* jited_linfo is the jited addr of the linfo. It has a 396 * one to one mapping to linfo: 397 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 398 * Both the main and sub prog share the same jited_linfo. 399 * The subprog can access its first jited_linfo by 400 * using the linfo_idx. 401 */ 402 void **jited_linfo; 403 u32 func_info_cnt; 404 u32 nr_linfo; 405 /* subprog can use linfo_idx to access its first linfo and 406 * jited_linfo. 407 * main prog always has linfo_idx == 0 408 */ 409 u32 linfo_idx; 410 struct bpf_prog_stats __percpu *stats; 411 union { 412 struct work_struct work; 413 struct rcu_head rcu; 414 }; 415 }; 416 417 struct bpf_array { 418 struct bpf_map map; 419 u32 elem_size; 420 u32 index_mask; 421 /* 'ownership' of prog_array is claimed by the first program that 422 * is going to use this map or by the first program which FD is stored 423 * in the map to make sure that all callers and callees have the same 424 * prog_type and JITed flag 425 */ 426 enum bpf_prog_type owner_prog_type; 427 bool owner_jited; 428 union { 429 char value[0] __aligned(8); 430 void *ptrs[0] __aligned(8); 431 void __percpu *pptrs[0] __aligned(8); 432 }; 433 }; 434 435 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 436 #define MAX_TAIL_CALL_CNT 32 437 438 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 439 BPF_F_RDONLY_PROG | \ 440 BPF_F_WRONLY | \ 441 BPF_F_WRONLY_PROG) 442 443 #define BPF_MAP_CAN_READ BIT(0) 444 #define BPF_MAP_CAN_WRITE BIT(1) 445 446 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 447 { 448 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 449 450 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 451 * not possible. 452 */ 453 if (access_flags & BPF_F_RDONLY_PROG) 454 return BPF_MAP_CAN_READ; 455 else if (access_flags & BPF_F_WRONLY_PROG) 456 return BPF_MAP_CAN_WRITE; 457 else 458 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 459 } 460 461 static inline bool bpf_map_flags_access_ok(u32 access_flags) 462 { 463 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 464 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 465 } 466 467 struct bpf_event_entry { 468 struct perf_event *event; 469 struct file *perf_file; 470 struct file *map_file; 471 struct rcu_head rcu; 472 }; 473 474 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 475 int bpf_prog_calc_tag(struct bpf_prog *fp); 476 477 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 478 479 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 480 unsigned long off, unsigned long len); 481 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 482 const struct bpf_insn *src, 483 struct bpf_insn *dst, 484 struct bpf_prog *prog, 485 u32 *target_size); 486 487 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 488 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 489 490 /* an array of programs to be executed under rcu_lock. 491 * 492 * Typical usage: 493 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 494 * 495 * the structure returned by bpf_prog_array_alloc() should be populated 496 * with program pointers and the last pointer must be NULL. 497 * The user has to keep refcnt on the program and make sure the program 498 * is removed from the array before bpf_prog_put(). 499 * The 'struct bpf_prog_array *' should only be replaced with xchg() 500 * since other cpus are walking the array of pointers in parallel. 501 */ 502 struct bpf_prog_array_item { 503 struct bpf_prog *prog; 504 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 505 }; 506 507 struct bpf_prog_array { 508 struct rcu_head rcu; 509 struct bpf_prog_array_item items[0]; 510 }; 511 512 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 513 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); 514 int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); 515 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 516 __u32 __user *prog_ids, u32 cnt); 517 518 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, 519 struct bpf_prog *old_prog); 520 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 521 u32 *prog_ids, u32 request_cnt, 522 u32 *prog_cnt); 523 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 524 struct bpf_prog *exclude_prog, 525 struct bpf_prog *include_prog, 526 struct bpf_prog_array **new_array); 527 528 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 529 ({ \ 530 struct bpf_prog_array_item *_item; \ 531 struct bpf_prog *_prog; \ 532 struct bpf_prog_array *_array; \ 533 u32 _ret = 1; \ 534 preempt_disable(); \ 535 rcu_read_lock(); \ 536 _array = rcu_dereference(array); \ 537 if (unlikely(check_non_null && !_array))\ 538 goto _out; \ 539 _item = &_array->items[0]; \ 540 while ((_prog = READ_ONCE(_item->prog))) { \ 541 bpf_cgroup_storage_set(_item->cgroup_storage); \ 542 _ret &= func(_prog, ctx); \ 543 _item++; \ 544 } \ 545 _out: \ 546 rcu_read_unlock(); \ 547 preempt_enable(); \ 548 _ret; \ 549 }) 550 551 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 552 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 553 554 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 555 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 556 557 #ifdef CONFIG_BPF_SYSCALL 558 DECLARE_PER_CPU(int, bpf_prog_active); 559 560 extern const struct file_operations bpf_map_fops; 561 extern const struct file_operations bpf_prog_fops; 562 563 #define BPF_PROG_TYPE(_id, _name) \ 564 extern const struct bpf_prog_ops _name ## _prog_ops; \ 565 extern const struct bpf_verifier_ops _name ## _verifier_ops; 566 #define BPF_MAP_TYPE(_id, _ops) \ 567 extern const struct bpf_map_ops _ops; 568 #include <linux/bpf_types.h> 569 #undef BPF_PROG_TYPE 570 #undef BPF_MAP_TYPE 571 572 extern const struct bpf_prog_ops bpf_offload_prog_ops; 573 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 574 extern const struct bpf_verifier_ops xdp_analyzer_ops; 575 576 struct bpf_prog *bpf_prog_get(u32 ufd); 577 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 578 bool attach_drv); 579 struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 580 void bpf_prog_sub(struct bpf_prog *prog, int i); 581 struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 582 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 583 void bpf_prog_put(struct bpf_prog *prog); 584 int __bpf_prog_charge(struct user_struct *user, u32 pages); 585 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 586 587 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 588 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 589 590 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 591 struct bpf_map *__bpf_map_get(struct fd f); 592 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 593 void bpf_map_put_with_uref(struct bpf_map *map); 594 void bpf_map_put(struct bpf_map *map); 595 int bpf_map_precharge_memlock(u32 pages); 596 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 597 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 598 void *bpf_map_area_alloc(size_t size, int numa_node); 599 void bpf_map_area_free(void *base); 600 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 601 602 extern int sysctl_unprivileged_bpf_disabled; 603 extern int sysctl_bpf_stats_enabled; 604 605 int bpf_map_new_fd(struct bpf_map *map, int flags); 606 int bpf_prog_new_fd(struct bpf_prog *prog); 607 608 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 609 int bpf_obj_get_user(const char __user *pathname, int flags); 610 611 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 612 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 613 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 614 u64 flags); 615 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 616 u64 flags); 617 618 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 619 620 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 621 void *key, void *value, u64 map_flags); 622 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 623 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 624 void *key, void *value, u64 map_flags); 625 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 626 627 int bpf_get_file_flag(int flags); 628 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 629 size_t actual_size); 630 631 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 632 * forced to use 'long' read/writes to try to atomically copy long counters. 633 * Best-effort only. No barriers here, since it _will_ race with concurrent 634 * updates from BPF programs. Called from bpf syscall and mostly used with 635 * size 8 or 16 bytes, so ask compiler to inline it. 636 */ 637 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 638 { 639 const long *lsrc = src; 640 long *ldst = dst; 641 642 size /= sizeof(long); 643 while (size--) 644 *ldst++ = *lsrc++; 645 } 646 647 /* verify correctness of eBPF program */ 648 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 649 union bpf_attr __user *uattr); 650 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 651 652 /* Map specifics */ 653 struct xdp_buff; 654 struct sk_buff; 655 656 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 657 void __dev_map_insert_ctx(struct bpf_map *map, u32 index); 658 void __dev_map_flush(struct bpf_map *map); 659 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 660 struct net_device *dev_rx); 661 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 662 struct bpf_prog *xdp_prog); 663 664 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 665 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); 666 void __cpu_map_flush(struct bpf_map *map); 667 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 668 struct net_device *dev_rx); 669 670 /* Return map's numa specified by userspace */ 671 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 672 { 673 return (attr->map_flags & BPF_F_NUMA_NODE) ? 674 attr->numa_node : NUMA_NO_NODE; 675 } 676 677 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 678 int array_map_alloc_check(union bpf_attr *attr); 679 680 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 681 union bpf_attr __user *uattr); 682 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 683 union bpf_attr __user *uattr); 684 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 685 const union bpf_attr *kattr, 686 union bpf_attr __user *uattr); 687 #else /* !CONFIG_BPF_SYSCALL */ 688 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 689 { 690 return ERR_PTR(-EOPNOTSUPP); 691 } 692 693 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 694 enum bpf_prog_type type, 695 bool attach_drv) 696 { 697 return ERR_PTR(-EOPNOTSUPP); 698 } 699 700 static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, 701 int i) 702 { 703 return ERR_PTR(-EOPNOTSUPP); 704 } 705 706 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 707 { 708 } 709 710 static inline void bpf_prog_put(struct bpf_prog *prog) 711 { 712 } 713 714 static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) 715 { 716 return ERR_PTR(-EOPNOTSUPP); 717 } 718 719 static inline struct bpf_prog *__must_check 720 bpf_prog_inc_not_zero(struct bpf_prog *prog) 721 { 722 return ERR_PTR(-EOPNOTSUPP); 723 } 724 725 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 726 { 727 return 0; 728 } 729 730 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 731 { 732 } 733 734 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 735 { 736 return -EOPNOTSUPP; 737 } 738 739 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 740 u32 key) 741 { 742 return NULL; 743 } 744 745 static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) 746 { 747 } 748 749 static inline void __dev_map_flush(struct bpf_map *map) 750 { 751 } 752 753 struct xdp_buff; 754 struct bpf_dtab_netdev; 755 756 static inline 757 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 758 struct net_device *dev_rx) 759 { 760 return 0; 761 } 762 763 struct sk_buff; 764 765 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 766 struct sk_buff *skb, 767 struct bpf_prog *xdp_prog) 768 { 769 return 0; 770 } 771 772 static inline 773 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 774 { 775 return NULL; 776 } 777 778 static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) 779 { 780 } 781 782 static inline void __cpu_map_flush(struct bpf_map *map) 783 { 784 } 785 786 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 787 struct xdp_buff *xdp, 788 struct net_device *dev_rx) 789 { 790 return 0; 791 } 792 793 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 794 enum bpf_prog_type type) 795 { 796 return ERR_PTR(-EOPNOTSUPP); 797 } 798 799 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 800 const union bpf_attr *kattr, 801 union bpf_attr __user *uattr) 802 { 803 return -ENOTSUPP; 804 } 805 806 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 807 const union bpf_attr *kattr, 808 union bpf_attr __user *uattr) 809 { 810 return -ENOTSUPP; 811 } 812 813 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 814 const union bpf_attr *kattr, 815 union bpf_attr __user *uattr) 816 { 817 return -ENOTSUPP; 818 } 819 #endif /* CONFIG_BPF_SYSCALL */ 820 821 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 822 enum bpf_prog_type type) 823 { 824 return bpf_prog_get_type_dev(ufd, type, false); 825 } 826 827 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 828 829 int bpf_prog_offload_compile(struct bpf_prog *prog); 830 void bpf_prog_offload_destroy(struct bpf_prog *prog); 831 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 832 struct bpf_prog *prog); 833 834 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 835 836 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 837 int bpf_map_offload_update_elem(struct bpf_map *map, 838 void *key, void *value, u64 flags); 839 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 840 int bpf_map_offload_get_next_key(struct bpf_map *map, 841 void *key, void *next_key); 842 843 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 844 845 struct bpf_offload_dev * 846 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 847 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 848 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 849 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 850 struct net_device *netdev); 851 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 852 struct net_device *netdev); 853 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 854 855 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 856 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 857 858 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 859 { 860 return aux->offload_requested; 861 } 862 863 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 864 { 865 return unlikely(map->ops == &bpf_map_offload_ops); 866 } 867 868 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 869 void bpf_map_offload_map_free(struct bpf_map *map); 870 #else 871 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 872 union bpf_attr *attr) 873 { 874 return -EOPNOTSUPP; 875 } 876 877 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 878 { 879 return false; 880 } 881 882 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 883 { 884 return false; 885 } 886 887 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 888 { 889 return ERR_PTR(-EOPNOTSUPP); 890 } 891 892 static inline void bpf_map_offload_map_free(struct bpf_map *map) 893 { 894 } 895 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 896 897 #if defined(CONFIG_BPF_STREAM_PARSER) 898 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 899 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 900 #else 901 static inline int sock_map_prog_update(struct bpf_map *map, 902 struct bpf_prog *prog, u32 which) 903 { 904 return -EOPNOTSUPP; 905 } 906 907 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 908 struct bpf_prog *prog) 909 { 910 return -EINVAL; 911 } 912 #endif 913 914 #if defined(CONFIG_XDP_SOCKETS) 915 struct xdp_sock; 916 struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); 917 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, 918 struct xdp_sock *xs); 919 void __xsk_map_flush(struct bpf_map *map); 920 #else 921 struct xdp_sock; 922 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 923 u32 key) 924 { 925 return NULL; 926 } 927 928 static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, 929 struct xdp_sock *xs) 930 { 931 return -EOPNOTSUPP; 932 } 933 934 static inline void __xsk_map_flush(struct bpf_map *map) 935 { 936 } 937 #endif 938 939 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 940 void bpf_sk_reuseport_detach(struct sock *sk); 941 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 942 void *value); 943 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 944 void *value, u64 map_flags); 945 #else 946 static inline void bpf_sk_reuseport_detach(struct sock *sk) 947 { 948 } 949 950 #ifdef CONFIG_BPF_SYSCALL 951 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 952 void *key, void *value) 953 { 954 return -EOPNOTSUPP; 955 } 956 957 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 958 void *key, void *value, 959 u64 map_flags) 960 { 961 return -EOPNOTSUPP; 962 } 963 #endif /* CONFIG_BPF_SYSCALL */ 964 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 965 966 /* verifier prototypes for helper functions called from eBPF programs */ 967 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 968 extern const struct bpf_func_proto bpf_map_update_elem_proto; 969 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 970 extern const struct bpf_func_proto bpf_map_push_elem_proto; 971 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 972 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 973 974 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 975 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 976 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 977 extern const struct bpf_func_proto bpf_tail_call_proto; 978 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 979 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 980 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 981 extern const struct bpf_func_proto bpf_get_current_comm_proto; 982 extern const struct bpf_func_proto bpf_get_stackid_proto; 983 extern const struct bpf_func_proto bpf_get_stack_proto; 984 extern const struct bpf_func_proto bpf_sock_map_update_proto; 985 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 986 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 987 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 988 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 989 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 990 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 991 extern const struct bpf_func_proto bpf_spin_lock_proto; 992 extern const struct bpf_func_proto bpf_spin_unlock_proto; 993 extern const struct bpf_func_proto bpf_get_local_storage_proto; 994 extern const struct bpf_func_proto bpf_strtol_proto; 995 extern const struct bpf_func_proto bpf_strtoul_proto; 996 997 /* Shared helpers among cBPF and eBPF. */ 998 void bpf_user_rnd_init_once(void); 999 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1000 1001 #if defined(CONFIG_NET) 1002 bool bpf_sock_common_is_valid_access(int off, int size, 1003 enum bpf_access_type type, 1004 struct bpf_insn_access_aux *info); 1005 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1006 struct bpf_insn_access_aux *info); 1007 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1008 const struct bpf_insn *si, 1009 struct bpf_insn *insn_buf, 1010 struct bpf_prog *prog, 1011 u32 *target_size); 1012 #else 1013 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1014 enum bpf_access_type type, 1015 struct bpf_insn_access_aux *info) 1016 { 1017 return false; 1018 } 1019 static inline bool bpf_sock_is_valid_access(int off, int size, 1020 enum bpf_access_type type, 1021 struct bpf_insn_access_aux *info) 1022 { 1023 return false; 1024 } 1025 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1026 const struct bpf_insn *si, 1027 struct bpf_insn *insn_buf, 1028 struct bpf_prog *prog, 1029 u32 *target_size) 1030 { 1031 return 0; 1032 } 1033 #endif 1034 1035 #ifdef CONFIG_INET 1036 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1037 struct bpf_insn_access_aux *info); 1038 1039 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1040 const struct bpf_insn *si, 1041 struct bpf_insn *insn_buf, 1042 struct bpf_prog *prog, 1043 u32 *target_size); 1044 #else 1045 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1046 enum bpf_access_type type, 1047 struct bpf_insn_access_aux *info) 1048 { 1049 return false; 1050 } 1051 1052 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1053 const struct bpf_insn *si, 1054 struct bpf_insn *insn_buf, 1055 struct bpf_prog *prog, 1056 u32 *target_size) 1057 { 1058 return 0; 1059 } 1060 #endif /* CONFIG_INET */ 1061 1062 #endif /* _LINUX_BPF_H */ 1063