1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 9 #include <linux/workqueue.h> 10 #include <linux/file.h> 11 #include <linux/percpu.h> 12 #include <linux/err.h> 13 #include <linux/rbtree_latch.h> 14 #include <linux/numa.h> 15 #include <linux/wait.h> 16 #include <linux/u64_stats_sync.h> 17 18 struct bpf_verifier_env; 19 struct bpf_verifier_log; 20 struct perf_event; 21 struct bpf_prog; 22 struct bpf_map; 23 struct sock; 24 struct seq_file; 25 struct btf; 26 struct btf_type; 27 struct exception_table_entry; 28 29 extern struct idr btf_idr; 30 extern spinlock_t btf_idr_lock; 31 32 /* map is generic key/value storage optionally accesible by eBPF programs */ 33 struct bpf_map_ops { 34 /* funcs callable from userspace (via syscall) */ 35 int (*map_alloc_check)(union bpf_attr *attr); 36 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 37 void (*map_release)(struct bpf_map *map, struct file *map_file); 38 void (*map_free)(struct bpf_map *map); 39 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 40 void (*map_release_uref)(struct bpf_map *map); 41 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 42 43 /* funcs callable from userspace and from eBPF programs */ 44 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 45 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 46 int (*map_delete_elem)(struct bpf_map *map, void *key); 47 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 48 int (*map_pop_elem)(struct bpf_map *map, void *value); 49 int (*map_peek_elem)(struct bpf_map *map, void *value); 50 51 /* funcs called by prog_array and perf_event_array map */ 52 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 53 int fd); 54 void (*map_fd_put_ptr)(void *ptr); 55 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 56 u32 (*map_fd_sys_lookup_elem)(void *ptr); 57 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 58 struct seq_file *m); 59 int (*map_check_btf)(const struct bpf_map *map, 60 const struct btf *btf, 61 const struct btf_type *key_type, 62 const struct btf_type *value_type); 63 64 /* Direct value access helpers. */ 65 int (*map_direct_value_addr)(const struct bpf_map *map, 66 u64 *imm, u32 off); 67 int (*map_direct_value_meta)(const struct bpf_map *map, 68 u64 imm, u32 *off); 69 }; 70 71 struct bpf_map_memory { 72 u32 pages; 73 struct user_struct *user; 74 }; 75 76 struct bpf_map { 77 /* The first two cachelines with read-mostly members of which some 78 * are also accessed in fast-path (e.g. ops, max_entries). 79 */ 80 const struct bpf_map_ops *ops ____cacheline_aligned; 81 struct bpf_map *inner_map_meta; 82 #ifdef CONFIG_SECURITY 83 void *security; 84 #endif 85 enum bpf_map_type map_type; 86 u32 key_size; 87 u32 value_size; 88 u32 max_entries; 89 u32 map_flags; 90 int spin_lock_off; /* >=0 valid offset, <0 error */ 91 u32 id; 92 int numa_node; 93 u32 btf_key_type_id; 94 u32 btf_value_type_id; 95 struct btf *btf; 96 struct bpf_map_memory memory; 97 bool unpriv_array; 98 bool frozen; /* write-once */ 99 /* 48 bytes hole */ 100 101 /* The 3rd and 4th cacheline with misc members to avoid false sharing 102 * particularly with refcounting. 103 */ 104 atomic_t refcnt ____cacheline_aligned; 105 atomic_t usercnt; 106 struct work_struct work; 107 char name[BPF_OBJ_NAME_LEN]; 108 }; 109 110 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 111 { 112 return map->spin_lock_off >= 0; 113 } 114 115 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 116 { 117 if (likely(!map_value_has_spin_lock(map))) 118 return; 119 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 120 (struct bpf_spin_lock){}; 121 } 122 123 /* copy everything but bpf_spin_lock */ 124 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 125 { 126 if (unlikely(map_value_has_spin_lock(map))) { 127 u32 off = map->spin_lock_off; 128 129 memcpy(dst, src, off); 130 memcpy(dst + off + sizeof(struct bpf_spin_lock), 131 src + off + sizeof(struct bpf_spin_lock), 132 map->value_size - off - sizeof(struct bpf_spin_lock)); 133 } else { 134 memcpy(dst, src, map->value_size); 135 } 136 } 137 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 138 bool lock_src); 139 140 struct bpf_offload_dev; 141 struct bpf_offloaded_map; 142 143 struct bpf_map_dev_ops { 144 int (*map_get_next_key)(struct bpf_offloaded_map *map, 145 void *key, void *next_key); 146 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 147 void *key, void *value); 148 int (*map_update_elem)(struct bpf_offloaded_map *map, 149 void *key, void *value, u64 flags); 150 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 151 }; 152 153 struct bpf_offloaded_map { 154 struct bpf_map map; 155 struct net_device *netdev; 156 const struct bpf_map_dev_ops *dev_ops; 157 void *dev_priv; 158 struct list_head offloads; 159 }; 160 161 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 162 { 163 return container_of(map, struct bpf_offloaded_map, map); 164 } 165 166 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 167 { 168 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 169 } 170 171 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 172 { 173 return map->btf && map->ops->map_seq_show_elem; 174 } 175 176 int map_check_no_btf(const struct bpf_map *map, 177 const struct btf *btf, 178 const struct btf_type *key_type, 179 const struct btf_type *value_type); 180 181 extern const struct bpf_map_ops bpf_map_offload_ops; 182 183 /* function argument constraints */ 184 enum bpf_arg_type { 185 ARG_DONTCARE = 0, /* unused argument in helper function */ 186 187 /* the following constraints used to prototype 188 * bpf_map_lookup/update/delete_elem() functions 189 */ 190 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 191 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 192 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 193 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 194 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 195 196 /* the following constraints used to prototype bpf_memcmp() and other 197 * functions that access data on eBPF program stack 198 */ 199 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 200 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 201 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 202 * helper function must fill all bytes or clear 203 * them in error case. 204 */ 205 206 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 207 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 208 209 ARG_PTR_TO_CTX, /* pointer to context */ 210 ARG_ANYTHING, /* any (initialized) argument is ok */ 211 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 212 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 213 ARG_PTR_TO_INT, /* pointer to int */ 214 ARG_PTR_TO_LONG, /* pointer to long */ 215 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 216 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 217 }; 218 219 /* type of values returned from helper functions */ 220 enum bpf_return_type { 221 RET_INTEGER, /* function returns integer */ 222 RET_VOID, /* function doesn't return anything */ 223 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 224 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 225 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 226 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 227 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 228 }; 229 230 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 231 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 232 * instructions after verifying 233 */ 234 struct bpf_func_proto { 235 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 236 bool gpl_only; 237 bool pkt_access; 238 enum bpf_return_type ret_type; 239 union { 240 struct { 241 enum bpf_arg_type arg1_type; 242 enum bpf_arg_type arg2_type; 243 enum bpf_arg_type arg3_type; 244 enum bpf_arg_type arg4_type; 245 enum bpf_arg_type arg5_type; 246 }; 247 enum bpf_arg_type arg_type[5]; 248 }; 249 u32 *btf_id; /* BTF ids of arguments */ 250 }; 251 252 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 253 * the first argument to eBPF programs. 254 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 255 */ 256 struct bpf_context; 257 258 enum bpf_access_type { 259 BPF_READ = 1, 260 BPF_WRITE = 2 261 }; 262 263 /* types of values stored in eBPF registers */ 264 /* Pointer types represent: 265 * pointer 266 * pointer + imm 267 * pointer + (u16) var 268 * pointer + (u16) var + imm 269 * if (range > 0) then [ptr, ptr + range - off) is safe to access 270 * if (id > 0) means that some 'var' was added 271 * if (off > 0) means that 'imm' was added 272 */ 273 enum bpf_reg_type { 274 NOT_INIT = 0, /* nothing was written into register */ 275 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 276 PTR_TO_CTX, /* reg points to bpf_context */ 277 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 278 PTR_TO_MAP_VALUE, /* reg points to map element value */ 279 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 280 PTR_TO_STACK, /* reg == frame_pointer + offset */ 281 PTR_TO_PACKET_META, /* skb->data - meta_len */ 282 PTR_TO_PACKET, /* reg points to skb->data */ 283 PTR_TO_PACKET_END, /* skb->data + headlen */ 284 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 285 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 286 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 287 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 288 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 289 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 290 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 291 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 292 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 293 PTR_TO_BTF_ID, /* reg points to kernel struct */ 294 }; 295 296 /* The information passed from prog-specific *_is_valid_access 297 * back to the verifier. 298 */ 299 struct bpf_insn_access_aux { 300 enum bpf_reg_type reg_type; 301 union { 302 int ctx_field_size; 303 u32 btf_id; 304 }; 305 struct bpf_verifier_log *log; /* for verbose logs */ 306 }; 307 308 static inline void 309 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 310 { 311 aux->ctx_field_size = size; 312 } 313 314 struct bpf_prog_ops { 315 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 316 union bpf_attr __user *uattr); 317 }; 318 319 struct bpf_verifier_ops { 320 /* return eBPF function prototype for verification */ 321 const struct bpf_func_proto * 322 (*get_func_proto)(enum bpf_func_id func_id, 323 const struct bpf_prog *prog); 324 325 /* return true if 'size' wide access at offset 'off' within bpf_context 326 * with 'type' (read or write) is allowed 327 */ 328 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 329 const struct bpf_prog *prog, 330 struct bpf_insn_access_aux *info); 331 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 332 const struct bpf_prog *prog); 333 int (*gen_ld_abs)(const struct bpf_insn *orig, 334 struct bpf_insn *insn_buf); 335 u32 (*convert_ctx_access)(enum bpf_access_type type, 336 const struct bpf_insn *src, 337 struct bpf_insn *dst, 338 struct bpf_prog *prog, u32 *target_size); 339 }; 340 341 struct bpf_prog_offload_ops { 342 /* verifier basic callbacks */ 343 int (*insn_hook)(struct bpf_verifier_env *env, 344 int insn_idx, int prev_insn_idx); 345 int (*finalize)(struct bpf_verifier_env *env); 346 /* verifier optimization callbacks (called after .finalize) */ 347 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 348 struct bpf_insn *insn); 349 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 350 /* program management callbacks */ 351 int (*prepare)(struct bpf_prog *prog); 352 int (*translate)(struct bpf_prog *prog); 353 void (*destroy)(struct bpf_prog *prog); 354 }; 355 356 struct bpf_prog_offload { 357 struct bpf_prog *prog; 358 struct net_device *netdev; 359 struct bpf_offload_dev *offdev; 360 void *dev_priv; 361 struct list_head offloads; 362 bool dev_state; 363 bool opt_failed; 364 void *jited_image; 365 u32 jited_len; 366 }; 367 368 enum bpf_cgroup_storage_type { 369 BPF_CGROUP_STORAGE_SHARED, 370 BPF_CGROUP_STORAGE_PERCPU, 371 __BPF_CGROUP_STORAGE_MAX 372 }; 373 374 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 375 376 /* The longest tracepoint has 12 args. 377 * See include/trace/bpf_probe.h 378 */ 379 #define MAX_BPF_FUNC_ARGS 12 380 381 struct bpf_prog_stats { 382 u64 cnt; 383 u64 nsecs; 384 struct u64_stats_sync syncp; 385 } __aligned(2 * sizeof(u64)); 386 387 struct bpf_prog_aux { 388 atomic_t refcnt; 389 u32 used_map_cnt; 390 u32 max_ctx_offset; 391 u32 max_pkt_offset; 392 u32 max_tp_access; 393 u32 stack_depth; 394 u32 id; 395 u32 func_cnt; /* used by non-func prog as the number of func progs */ 396 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 397 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 398 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 399 bool offload_requested; 400 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 401 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 402 const struct btf_type *attach_func_proto; 403 /* function name for valid attach_btf_id */ 404 const char *attach_func_name; 405 struct bpf_prog **func; 406 void *jit_data; /* JIT specific data. arch dependent */ 407 struct latch_tree_node ksym_tnode; 408 struct list_head ksym_lnode; 409 const struct bpf_prog_ops *ops; 410 struct bpf_map **used_maps; 411 struct bpf_prog *prog; 412 struct user_struct *user; 413 u64 load_time; /* ns since boottime */ 414 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 415 char name[BPF_OBJ_NAME_LEN]; 416 #ifdef CONFIG_SECURITY 417 void *security; 418 #endif 419 struct bpf_prog_offload *offload; 420 struct btf *btf; 421 struct bpf_func_info *func_info; 422 /* bpf_line_info loaded from userspace. linfo->insn_off 423 * has the xlated insn offset. 424 * Both the main and sub prog share the same linfo. 425 * The subprog can access its first linfo by 426 * using the linfo_idx. 427 */ 428 struct bpf_line_info *linfo; 429 /* jited_linfo is the jited addr of the linfo. It has a 430 * one to one mapping to linfo: 431 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 432 * Both the main and sub prog share the same jited_linfo. 433 * The subprog can access its first jited_linfo by 434 * using the linfo_idx. 435 */ 436 void **jited_linfo; 437 u32 func_info_cnt; 438 u32 nr_linfo; 439 /* subprog can use linfo_idx to access its first linfo and 440 * jited_linfo. 441 * main prog always has linfo_idx == 0 442 */ 443 u32 linfo_idx; 444 u32 num_exentries; 445 struct exception_table_entry *extable; 446 struct bpf_prog_stats __percpu *stats; 447 union { 448 struct work_struct work; 449 struct rcu_head rcu; 450 }; 451 }; 452 453 struct bpf_array { 454 struct bpf_map map; 455 u32 elem_size; 456 u32 index_mask; 457 /* 'ownership' of prog_array is claimed by the first program that 458 * is going to use this map or by the first program which FD is stored 459 * in the map to make sure that all callers and callees have the same 460 * prog_type and JITed flag 461 */ 462 enum bpf_prog_type owner_prog_type; 463 bool owner_jited; 464 union { 465 char value[0] __aligned(8); 466 void *ptrs[0] __aligned(8); 467 void __percpu *pptrs[0] __aligned(8); 468 }; 469 }; 470 471 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 472 #define MAX_TAIL_CALL_CNT 32 473 474 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 475 BPF_F_RDONLY_PROG | \ 476 BPF_F_WRONLY | \ 477 BPF_F_WRONLY_PROG) 478 479 #define BPF_MAP_CAN_READ BIT(0) 480 #define BPF_MAP_CAN_WRITE BIT(1) 481 482 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 483 { 484 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 485 486 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 487 * not possible. 488 */ 489 if (access_flags & BPF_F_RDONLY_PROG) 490 return BPF_MAP_CAN_READ; 491 else if (access_flags & BPF_F_WRONLY_PROG) 492 return BPF_MAP_CAN_WRITE; 493 else 494 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 495 } 496 497 static inline bool bpf_map_flags_access_ok(u32 access_flags) 498 { 499 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 500 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 501 } 502 503 struct bpf_event_entry { 504 struct perf_event *event; 505 struct file *perf_file; 506 struct file *map_file; 507 struct rcu_head rcu; 508 }; 509 510 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 511 int bpf_prog_calc_tag(struct bpf_prog *fp); 512 const char *kernel_type_name(u32 btf_type_id); 513 514 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 515 516 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 517 unsigned long off, unsigned long len); 518 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 519 const struct bpf_insn *src, 520 struct bpf_insn *dst, 521 struct bpf_prog *prog, 522 u32 *target_size); 523 524 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 525 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 526 527 /* an array of programs to be executed under rcu_lock. 528 * 529 * Typical usage: 530 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 531 * 532 * the structure returned by bpf_prog_array_alloc() should be populated 533 * with program pointers and the last pointer must be NULL. 534 * The user has to keep refcnt on the program and make sure the program 535 * is removed from the array before bpf_prog_put(). 536 * The 'struct bpf_prog_array *' should only be replaced with xchg() 537 * since other cpus are walking the array of pointers in parallel. 538 */ 539 struct bpf_prog_array_item { 540 struct bpf_prog *prog; 541 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 542 }; 543 544 struct bpf_prog_array { 545 struct rcu_head rcu; 546 struct bpf_prog_array_item items[0]; 547 }; 548 549 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 550 void bpf_prog_array_free(struct bpf_prog_array *progs); 551 int bpf_prog_array_length(struct bpf_prog_array *progs); 552 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 553 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 554 __u32 __user *prog_ids, u32 cnt); 555 556 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 557 struct bpf_prog *old_prog); 558 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 559 u32 *prog_ids, u32 request_cnt, 560 u32 *prog_cnt); 561 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 562 struct bpf_prog *exclude_prog, 563 struct bpf_prog *include_prog, 564 struct bpf_prog_array **new_array); 565 566 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 567 ({ \ 568 struct bpf_prog_array_item *_item; \ 569 struct bpf_prog *_prog; \ 570 struct bpf_prog_array *_array; \ 571 u32 _ret = 1; \ 572 preempt_disable(); \ 573 rcu_read_lock(); \ 574 _array = rcu_dereference(array); \ 575 if (unlikely(check_non_null && !_array))\ 576 goto _out; \ 577 _item = &_array->items[0]; \ 578 while ((_prog = READ_ONCE(_item->prog))) { \ 579 bpf_cgroup_storage_set(_item->cgroup_storage); \ 580 _ret &= func(_prog, ctx); \ 581 _item++; \ 582 } \ 583 _out: \ 584 rcu_read_unlock(); \ 585 preempt_enable(); \ 586 _ret; \ 587 }) 588 589 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 590 * so BPF programs can request cwr for TCP packets. 591 * 592 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 593 * packet. This macro changes the behavior so the low order bit 594 * indicates whether the packet should be dropped (0) or not (1) 595 * and the next bit is a congestion notification bit. This could be 596 * used by TCP to call tcp_enter_cwr() 597 * 598 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 599 * 0: drop packet 600 * 1: keep packet 601 * 2: drop packet and cn 602 * 3: keep packet and cn 603 * 604 * This macro then converts it to one of the NET_XMIT or an error 605 * code that is then interpreted as drop packet (and no cn): 606 * 0: NET_XMIT_SUCCESS skb should be transmitted 607 * 1: NET_XMIT_DROP skb should be dropped and cn 608 * 2: NET_XMIT_CN skb should be transmitted and cn 609 * 3: -EPERM skb should be dropped 610 */ 611 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 612 ({ \ 613 struct bpf_prog_array_item *_item; \ 614 struct bpf_prog *_prog; \ 615 struct bpf_prog_array *_array; \ 616 u32 ret; \ 617 u32 _ret = 1; \ 618 u32 _cn = 0; \ 619 preempt_disable(); \ 620 rcu_read_lock(); \ 621 _array = rcu_dereference(array); \ 622 _item = &_array->items[0]; \ 623 while ((_prog = READ_ONCE(_item->prog))) { \ 624 bpf_cgroup_storage_set(_item->cgroup_storage); \ 625 ret = func(_prog, ctx); \ 626 _ret &= (ret & 1); \ 627 _cn |= (ret & 2); \ 628 _item++; \ 629 } \ 630 rcu_read_unlock(); \ 631 preempt_enable(); \ 632 if (_ret) \ 633 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 634 else \ 635 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 636 _ret; \ 637 }) 638 639 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 640 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 641 642 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 643 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 644 645 #ifdef CONFIG_BPF_SYSCALL 646 DECLARE_PER_CPU(int, bpf_prog_active); 647 648 extern const struct file_operations bpf_map_fops; 649 extern const struct file_operations bpf_prog_fops; 650 651 #define BPF_PROG_TYPE(_id, _name) \ 652 extern const struct bpf_prog_ops _name ## _prog_ops; \ 653 extern const struct bpf_verifier_ops _name ## _verifier_ops; 654 #define BPF_MAP_TYPE(_id, _ops) \ 655 extern const struct bpf_map_ops _ops; 656 #include <linux/bpf_types.h> 657 #undef BPF_PROG_TYPE 658 #undef BPF_MAP_TYPE 659 660 extern const struct bpf_prog_ops bpf_offload_prog_ops; 661 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 662 extern const struct bpf_verifier_ops xdp_analyzer_ops; 663 664 struct bpf_prog *bpf_prog_get(u32 ufd); 665 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 666 bool attach_drv); 667 struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 668 void bpf_prog_sub(struct bpf_prog *prog, int i); 669 struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 670 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 671 void bpf_prog_put(struct bpf_prog *prog); 672 int __bpf_prog_charge(struct user_struct *user, u32 pages); 673 void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 674 675 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 676 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 677 678 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 679 struct bpf_map *__bpf_map_get(struct fd f); 680 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 681 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, 682 bool uref); 683 void bpf_map_put_with_uref(struct bpf_map *map); 684 void bpf_map_put(struct bpf_map *map); 685 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 686 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 687 int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); 688 void bpf_map_charge_finish(struct bpf_map_memory *mem); 689 void bpf_map_charge_move(struct bpf_map_memory *dst, 690 struct bpf_map_memory *src); 691 void *bpf_map_area_alloc(size_t size, int numa_node); 692 void bpf_map_area_free(void *base); 693 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 694 695 extern int sysctl_unprivileged_bpf_disabled; 696 697 int bpf_map_new_fd(struct bpf_map *map, int flags); 698 int bpf_prog_new_fd(struct bpf_prog *prog); 699 700 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 701 int bpf_obj_get_user(const char __user *pathname, int flags); 702 703 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 704 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 705 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 706 u64 flags); 707 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 708 u64 flags); 709 710 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 711 712 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 713 void *key, void *value, u64 map_flags); 714 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 715 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 716 void *key, void *value, u64 map_flags); 717 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 718 719 int bpf_get_file_flag(int flags); 720 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 721 size_t actual_size); 722 723 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 724 * forced to use 'long' read/writes to try to atomically copy long counters. 725 * Best-effort only. No barriers here, since it _will_ race with concurrent 726 * updates from BPF programs. Called from bpf syscall and mostly used with 727 * size 8 or 16 bytes, so ask compiler to inline it. 728 */ 729 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 730 { 731 const long *lsrc = src; 732 long *ldst = dst; 733 734 size /= sizeof(long); 735 while (size--) 736 *ldst++ = *lsrc++; 737 } 738 739 /* verify correctness of eBPF program */ 740 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 741 union bpf_attr __user *uattr); 742 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 743 744 /* Map specifics */ 745 struct xdp_buff; 746 struct sk_buff; 747 748 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 749 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 750 void __dev_map_flush(struct bpf_map *map); 751 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 752 struct net_device *dev_rx); 753 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 754 struct bpf_prog *xdp_prog); 755 756 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 757 void __cpu_map_flush(struct bpf_map *map); 758 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 759 struct net_device *dev_rx); 760 761 /* Return map's numa specified by userspace */ 762 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 763 { 764 return (attr->map_flags & BPF_F_NUMA_NODE) ? 765 attr->numa_node : NUMA_NO_NODE; 766 } 767 768 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 769 int array_map_alloc_check(union bpf_attr *attr); 770 771 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 772 union bpf_attr __user *uattr); 773 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 774 union bpf_attr __user *uattr); 775 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 776 const union bpf_attr *kattr, 777 union bpf_attr __user *uattr); 778 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 779 const struct bpf_prog *prog, 780 struct bpf_insn_access_aux *info); 781 int btf_struct_access(struct bpf_verifier_log *log, 782 const struct btf_type *t, int off, int size, 783 enum bpf_access_type atype, 784 u32 *next_btf_id); 785 u32 btf_resolve_helper_id(struct bpf_verifier_log *log, void *, int); 786 787 #else /* !CONFIG_BPF_SYSCALL */ 788 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 789 { 790 return ERR_PTR(-EOPNOTSUPP); 791 } 792 793 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 794 enum bpf_prog_type type, 795 bool attach_drv) 796 { 797 return ERR_PTR(-EOPNOTSUPP); 798 } 799 800 static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, 801 int i) 802 { 803 return ERR_PTR(-EOPNOTSUPP); 804 } 805 806 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 807 { 808 } 809 810 static inline void bpf_prog_put(struct bpf_prog *prog) 811 { 812 } 813 814 static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) 815 { 816 return ERR_PTR(-EOPNOTSUPP); 817 } 818 819 static inline struct bpf_prog *__must_check 820 bpf_prog_inc_not_zero(struct bpf_prog *prog) 821 { 822 return ERR_PTR(-EOPNOTSUPP); 823 } 824 825 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 826 { 827 return 0; 828 } 829 830 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 831 { 832 } 833 834 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 835 { 836 return -EOPNOTSUPP; 837 } 838 839 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 840 u32 key) 841 { 842 return NULL; 843 } 844 845 static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, 846 u32 key) 847 { 848 return NULL; 849 } 850 851 static inline void __dev_map_flush(struct bpf_map *map) 852 { 853 } 854 855 struct xdp_buff; 856 struct bpf_dtab_netdev; 857 858 static inline 859 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 860 struct net_device *dev_rx) 861 { 862 return 0; 863 } 864 865 struct sk_buff; 866 867 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 868 struct sk_buff *skb, 869 struct bpf_prog *xdp_prog) 870 { 871 return 0; 872 } 873 874 static inline 875 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 876 { 877 return NULL; 878 } 879 880 static inline void __cpu_map_flush(struct bpf_map *map) 881 { 882 } 883 884 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 885 struct xdp_buff *xdp, 886 struct net_device *dev_rx) 887 { 888 return 0; 889 } 890 891 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 892 enum bpf_prog_type type) 893 { 894 return ERR_PTR(-EOPNOTSUPP); 895 } 896 897 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 898 const union bpf_attr *kattr, 899 union bpf_attr __user *uattr) 900 { 901 return -ENOTSUPP; 902 } 903 904 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 905 const union bpf_attr *kattr, 906 union bpf_attr __user *uattr) 907 { 908 return -ENOTSUPP; 909 } 910 911 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 912 const union bpf_attr *kattr, 913 union bpf_attr __user *uattr) 914 { 915 return -ENOTSUPP; 916 } 917 #endif /* CONFIG_BPF_SYSCALL */ 918 919 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 920 enum bpf_prog_type type) 921 { 922 return bpf_prog_get_type_dev(ufd, type, false); 923 } 924 925 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 926 927 int bpf_prog_offload_compile(struct bpf_prog *prog); 928 void bpf_prog_offload_destroy(struct bpf_prog *prog); 929 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 930 struct bpf_prog *prog); 931 932 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 933 934 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 935 int bpf_map_offload_update_elem(struct bpf_map *map, 936 void *key, void *value, u64 flags); 937 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 938 int bpf_map_offload_get_next_key(struct bpf_map *map, 939 void *key, void *next_key); 940 941 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 942 943 struct bpf_offload_dev * 944 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 945 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 946 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 947 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 948 struct net_device *netdev); 949 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 950 struct net_device *netdev); 951 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 952 953 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 954 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 955 956 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 957 { 958 return aux->offload_requested; 959 } 960 961 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 962 { 963 return unlikely(map->ops == &bpf_map_offload_ops); 964 } 965 966 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 967 void bpf_map_offload_map_free(struct bpf_map *map); 968 #else 969 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 970 union bpf_attr *attr) 971 { 972 return -EOPNOTSUPP; 973 } 974 975 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 976 { 977 return false; 978 } 979 980 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 981 { 982 return false; 983 } 984 985 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 986 { 987 return ERR_PTR(-EOPNOTSUPP); 988 } 989 990 static inline void bpf_map_offload_map_free(struct bpf_map *map) 991 { 992 } 993 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 994 995 #if defined(CONFIG_BPF_STREAM_PARSER) 996 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 997 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 998 #else 999 static inline int sock_map_prog_update(struct bpf_map *map, 1000 struct bpf_prog *prog, u32 which) 1001 { 1002 return -EOPNOTSUPP; 1003 } 1004 1005 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1006 struct bpf_prog *prog) 1007 { 1008 return -EINVAL; 1009 } 1010 #endif 1011 1012 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1013 void bpf_sk_reuseport_detach(struct sock *sk); 1014 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1015 void *value); 1016 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1017 void *value, u64 map_flags); 1018 #else 1019 static inline void bpf_sk_reuseport_detach(struct sock *sk) 1020 { 1021 } 1022 1023 #ifdef CONFIG_BPF_SYSCALL 1024 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1025 void *key, void *value) 1026 { 1027 return -EOPNOTSUPP; 1028 } 1029 1030 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1031 void *key, void *value, 1032 u64 map_flags) 1033 { 1034 return -EOPNOTSUPP; 1035 } 1036 #endif /* CONFIG_BPF_SYSCALL */ 1037 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1038 1039 /* verifier prototypes for helper functions called from eBPF programs */ 1040 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1041 extern const struct bpf_func_proto bpf_map_update_elem_proto; 1042 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1043 extern const struct bpf_func_proto bpf_map_push_elem_proto; 1044 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1045 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1046 1047 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1048 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1049 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1050 extern const struct bpf_func_proto bpf_tail_call_proto; 1051 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1052 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1053 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1054 extern const struct bpf_func_proto bpf_get_current_comm_proto; 1055 extern const struct bpf_func_proto bpf_get_stackid_proto; 1056 extern const struct bpf_func_proto bpf_get_stack_proto; 1057 extern const struct bpf_func_proto bpf_sock_map_update_proto; 1058 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1059 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1060 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1061 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1062 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1063 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1064 extern const struct bpf_func_proto bpf_spin_lock_proto; 1065 extern const struct bpf_func_proto bpf_spin_unlock_proto; 1066 extern const struct bpf_func_proto bpf_get_local_storage_proto; 1067 extern const struct bpf_func_proto bpf_strtol_proto; 1068 extern const struct bpf_func_proto bpf_strtoul_proto; 1069 extern const struct bpf_func_proto bpf_tcp_sock_proto; 1070 1071 /* Shared helpers among cBPF and eBPF. */ 1072 void bpf_user_rnd_init_once(void); 1073 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1074 1075 #if defined(CONFIG_NET) 1076 bool bpf_sock_common_is_valid_access(int off, int size, 1077 enum bpf_access_type type, 1078 struct bpf_insn_access_aux *info); 1079 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1080 struct bpf_insn_access_aux *info); 1081 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1082 const struct bpf_insn *si, 1083 struct bpf_insn *insn_buf, 1084 struct bpf_prog *prog, 1085 u32 *target_size); 1086 #else 1087 static inline bool bpf_sock_common_is_valid_access(int off, int size, 1088 enum bpf_access_type type, 1089 struct bpf_insn_access_aux *info) 1090 { 1091 return false; 1092 } 1093 static inline bool bpf_sock_is_valid_access(int off, int size, 1094 enum bpf_access_type type, 1095 struct bpf_insn_access_aux *info) 1096 { 1097 return false; 1098 } 1099 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1100 const struct bpf_insn *si, 1101 struct bpf_insn *insn_buf, 1102 struct bpf_prog *prog, 1103 u32 *target_size) 1104 { 1105 return 0; 1106 } 1107 #endif 1108 1109 #ifdef CONFIG_INET 1110 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1111 struct bpf_insn_access_aux *info); 1112 1113 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1114 const struct bpf_insn *si, 1115 struct bpf_insn *insn_buf, 1116 struct bpf_prog *prog, 1117 u32 *target_size); 1118 1119 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1120 struct bpf_insn_access_aux *info); 1121 1122 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1123 const struct bpf_insn *si, 1124 struct bpf_insn *insn_buf, 1125 struct bpf_prog *prog, 1126 u32 *target_size); 1127 #else 1128 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1129 enum bpf_access_type type, 1130 struct bpf_insn_access_aux *info) 1131 { 1132 return false; 1133 } 1134 1135 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1136 const struct bpf_insn *si, 1137 struct bpf_insn *insn_buf, 1138 struct bpf_prog *prog, 1139 u32 *target_size) 1140 { 1141 return 0; 1142 } 1143 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 1144 enum bpf_access_type type, 1145 struct bpf_insn_access_aux *info) 1146 { 1147 return false; 1148 } 1149 1150 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1151 const struct bpf_insn *si, 1152 struct bpf_insn *insn_buf, 1153 struct bpf_prog *prog, 1154 u32 *target_size) 1155 { 1156 return 0; 1157 } 1158 #endif /* CONFIG_INET */ 1159 1160 #endif /* _LINUX_BPF_H */ 1161