1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Linux Socket Filter Data Structures 4 */ 5 #ifndef __LINUX_FILTER_H__ 6 #define __LINUX_FILTER_H__ 7 8 #include <stdarg.h> 9 10 #include <linux/atomic.h> 11 #include <linux/refcount.h> 12 #include <linux/compat.h> 13 #include <linux/skbuff.h> 14 #include <linux/linkage.h> 15 #include <linux/printk.h> 16 #include <linux/workqueue.h> 17 #include <linux/sched.h> 18 #include <linux/capability.h> 19 #include <linux/cryptohash.h> 20 #include <linux/set_memory.h> 21 22 #include <net/sch_generic.h> 23 24 #include <uapi/linux/filter.h> 25 #include <uapi/linux/bpf.h> 26 27 struct sk_buff; 28 struct sock; 29 struct seccomp_data; 30 struct bpf_prog_aux; 31 32 /* ArgX, context and stack frame pointer register positions. Note, 33 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 34 * calls in BPF_CALL instruction. 35 */ 36 #define BPF_REG_ARG1 BPF_REG_1 37 #define BPF_REG_ARG2 BPF_REG_2 38 #define BPF_REG_ARG3 BPF_REG_3 39 #define BPF_REG_ARG4 BPF_REG_4 40 #define BPF_REG_ARG5 BPF_REG_5 41 #define BPF_REG_CTX BPF_REG_6 42 #define BPF_REG_FP BPF_REG_10 43 44 /* Additional register mappings for converted user programs. */ 45 #define BPF_REG_A BPF_REG_0 46 #define BPF_REG_X BPF_REG_7 47 #define BPF_REG_TMP BPF_REG_8 48 49 /* Kernel hidden auxiliary/helper register for hardening step. 50 * Only used by eBPF JITs. It's nothing more than a temporary 51 * register that JITs use internally, only that here it's part 52 * of eBPF instructions that have been rewritten for blinding 53 * constants. See JIT pre-step in bpf_jit_blind_constants(). 54 */ 55 #define BPF_REG_AX MAX_BPF_REG 56 #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) 57 58 /* unused opcode to mark special call to bpf_tail_call() helper */ 59 #define BPF_TAIL_CALL 0xf0 60 61 /* As per nm, we expose JITed images as text (code) section for 62 * kallsyms. That way, tools like perf can find it to match 63 * addresses. 64 */ 65 #define BPF_SYM_ELF_TYPE 't' 66 67 /* BPF program can access up to 512 bytes of stack space. */ 68 #define MAX_BPF_STACK 512 69 70 /* Helper macros for filter block array initializers. */ 71 72 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 73 74 #define BPF_ALU64_REG(OP, DST, SRC) \ 75 ((struct bpf_insn) { \ 76 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ 77 .dst_reg = DST, \ 78 .src_reg = SRC, \ 79 .off = 0, \ 80 .imm = 0 }) 81 82 #define BPF_ALU32_REG(OP, DST, SRC) \ 83 ((struct bpf_insn) { \ 84 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ 85 .dst_reg = DST, \ 86 .src_reg = SRC, \ 87 .off = 0, \ 88 .imm = 0 }) 89 90 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ 91 92 #define BPF_ALU64_IMM(OP, DST, IMM) \ 93 ((struct bpf_insn) { \ 94 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ 95 .dst_reg = DST, \ 96 .src_reg = 0, \ 97 .off = 0, \ 98 .imm = IMM }) 99 100 #define BPF_ALU32_IMM(OP, DST, IMM) \ 101 ((struct bpf_insn) { \ 102 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ 103 .dst_reg = DST, \ 104 .src_reg = 0, \ 105 .off = 0, \ 106 .imm = IMM }) 107 108 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ 109 110 #define BPF_ENDIAN(TYPE, DST, LEN) \ 111 ((struct bpf_insn) { \ 112 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ 113 .dst_reg = DST, \ 114 .src_reg = 0, \ 115 .off = 0, \ 116 .imm = LEN }) 117 118 /* Short form of mov, dst_reg = src_reg */ 119 120 #define BPF_MOV64_REG(DST, SRC) \ 121 ((struct bpf_insn) { \ 122 .code = BPF_ALU64 | BPF_MOV | BPF_X, \ 123 .dst_reg = DST, \ 124 .src_reg = SRC, \ 125 .off = 0, \ 126 .imm = 0 }) 127 128 #define BPF_MOV32_REG(DST, SRC) \ 129 ((struct bpf_insn) { \ 130 .code = BPF_ALU | BPF_MOV | BPF_X, \ 131 .dst_reg = DST, \ 132 .src_reg = SRC, \ 133 .off = 0, \ 134 .imm = 0 }) 135 136 /* Short form of mov, dst_reg = imm32 */ 137 138 #define BPF_MOV64_IMM(DST, IMM) \ 139 ((struct bpf_insn) { \ 140 .code = BPF_ALU64 | BPF_MOV | BPF_K, \ 141 .dst_reg = DST, \ 142 .src_reg = 0, \ 143 .off = 0, \ 144 .imm = IMM }) 145 146 #define BPF_MOV32_IMM(DST, IMM) \ 147 ((struct bpf_insn) { \ 148 .code = BPF_ALU | BPF_MOV | BPF_K, \ 149 .dst_reg = DST, \ 150 .src_reg = 0, \ 151 .off = 0, \ 152 .imm = IMM }) 153 154 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ 155 #define BPF_LD_IMM64(DST, IMM) \ 156 BPF_LD_IMM64_RAW(DST, 0, IMM) 157 158 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ 159 ((struct bpf_insn) { \ 160 .code = BPF_LD | BPF_DW | BPF_IMM, \ 161 .dst_reg = DST, \ 162 .src_reg = SRC, \ 163 .off = 0, \ 164 .imm = (__u32) (IMM) }), \ 165 ((struct bpf_insn) { \ 166 .code = 0, /* zero is reserved opcode */ \ 167 .dst_reg = 0, \ 168 .src_reg = 0, \ 169 .off = 0, \ 170 .imm = ((__u64) (IMM)) >> 32 }) 171 172 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ 173 #define BPF_LD_MAP_FD(DST, MAP_FD) \ 174 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) 175 176 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 177 178 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 179 ((struct bpf_insn) { \ 180 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ 181 .dst_reg = DST, \ 182 .src_reg = SRC, \ 183 .off = 0, \ 184 .imm = IMM }) 185 186 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ 187 ((struct bpf_insn) { \ 188 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ 189 .dst_reg = DST, \ 190 .src_reg = SRC, \ 191 .off = 0, \ 192 .imm = IMM }) 193 194 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ 195 196 #define BPF_LD_ABS(SIZE, IMM) \ 197 ((struct bpf_insn) { \ 198 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ 199 .dst_reg = 0, \ 200 .src_reg = 0, \ 201 .off = 0, \ 202 .imm = IMM }) 203 204 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ 205 206 #define BPF_LD_IND(SIZE, SRC, IMM) \ 207 ((struct bpf_insn) { \ 208 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ 209 .dst_reg = 0, \ 210 .src_reg = SRC, \ 211 .off = 0, \ 212 .imm = IMM }) 213 214 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ 215 216 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ 217 ((struct bpf_insn) { \ 218 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ 219 .dst_reg = DST, \ 220 .src_reg = SRC, \ 221 .off = OFF, \ 222 .imm = 0 }) 223 224 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ 225 226 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ 227 ((struct bpf_insn) { \ 228 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ 229 .dst_reg = DST, \ 230 .src_reg = SRC, \ 231 .off = OFF, \ 232 .imm = 0 }) 233 234 /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ 235 236 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ 237 ((struct bpf_insn) { \ 238 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ 239 .dst_reg = DST, \ 240 .src_reg = SRC, \ 241 .off = OFF, \ 242 .imm = 0 }) 243 244 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 245 246 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 247 ((struct bpf_insn) { \ 248 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ 249 .dst_reg = DST, \ 250 .src_reg = 0, \ 251 .off = OFF, \ 252 .imm = IMM }) 253 254 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ 255 256 #define BPF_JMP_REG(OP, DST, SRC, OFF) \ 257 ((struct bpf_insn) { \ 258 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ 259 .dst_reg = DST, \ 260 .src_reg = SRC, \ 261 .off = OFF, \ 262 .imm = 0 }) 263 264 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ 265 266 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ 267 ((struct bpf_insn) { \ 268 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ 269 .dst_reg = DST, \ 270 .src_reg = 0, \ 271 .off = OFF, \ 272 .imm = IMM }) 273 274 /* Unconditional jumps, goto pc + off16 */ 275 276 #define BPF_JMP_A(OFF) \ 277 ((struct bpf_insn) { \ 278 .code = BPF_JMP | BPF_JA, \ 279 .dst_reg = 0, \ 280 .src_reg = 0, \ 281 .off = OFF, \ 282 .imm = 0 }) 283 284 /* Function call */ 285 286 #define BPF_EMIT_CALL(FUNC) \ 287 ((struct bpf_insn) { \ 288 .code = BPF_JMP | BPF_CALL, \ 289 .dst_reg = 0, \ 290 .src_reg = 0, \ 291 .off = 0, \ 292 .imm = ((FUNC) - __bpf_call_base) }) 293 294 /* Raw code statement block */ 295 296 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ 297 ((struct bpf_insn) { \ 298 .code = CODE, \ 299 .dst_reg = DST, \ 300 .src_reg = SRC, \ 301 .off = OFF, \ 302 .imm = IMM }) 303 304 /* Program exit */ 305 306 #define BPF_EXIT_INSN() \ 307 ((struct bpf_insn) { \ 308 .code = BPF_JMP | BPF_EXIT, \ 309 .dst_reg = 0, \ 310 .src_reg = 0, \ 311 .off = 0, \ 312 .imm = 0 }) 313 314 /* Internal classic blocks for direct assignment */ 315 316 #define __BPF_STMT(CODE, K) \ 317 ((struct sock_filter) BPF_STMT(CODE, K)) 318 319 #define __BPF_JUMP(CODE, K, JT, JF) \ 320 ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) 321 322 #define bytes_to_bpf_size(bytes) \ 323 ({ \ 324 int bpf_size = -EINVAL; \ 325 \ 326 if (bytes == sizeof(u8)) \ 327 bpf_size = BPF_B; \ 328 else if (bytes == sizeof(u16)) \ 329 bpf_size = BPF_H; \ 330 else if (bytes == sizeof(u32)) \ 331 bpf_size = BPF_W; \ 332 else if (bytes == sizeof(u64)) \ 333 bpf_size = BPF_DW; \ 334 \ 335 bpf_size; \ 336 }) 337 338 #define bpf_size_to_bytes(bpf_size) \ 339 ({ \ 340 int bytes = -EINVAL; \ 341 \ 342 if (bpf_size == BPF_B) \ 343 bytes = sizeof(u8); \ 344 else if (bpf_size == BPF_H) \ 345 bytes = sizeof(u16); \ 346 else if (bpf_size == BPF_W) \ 347 bytes = sizeof(u32); \ 348 else if (bpf_size == BPF_DW) \ 349 bytes = sizeof(u64); \ 350 \ 351 bytes; \ 352 }) 353 354 #define BPF_SIZEOF(type) \ 355 ({ \ 356 const int __size = bytes_to_bpf_size(sizeof(type)); \ 357 BUILD_BUG_ON(__size < 0); \ 358 __size; \ 359 }) 360 361 #define BPF_FIELD_SIZEOF(type, field) \ 362 ({ \ 363 const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ 364 BUILD_BUG_ON(__size < 0); \ 365 __size; \ 366 }) 367 368 #define BPF_LDST_BYTES(insn) \ 369 ({ \ 370 const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \ 371 WARN_ON(__size < 0); \ 372 __size; \ 373 }) 374 375 #define __BPF_MAP_0(m, v, ...) v 376 #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) 377 #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) 378 #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) 379 #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) 380 #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) 381 382 #define __BPF_REG_0(...) __BPF_PAD(5) 383 #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) 384 #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) 385 #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) 386 #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) 387 #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) 388 389 #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) 390 #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) 391 392 #define __BPF_CAST(t, a) \ 393 (__force t) \ 394 (__force \ 395 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ 396 (unsigned long)0, (t)0))) a 397 #define __BPF_V void 398 #define __BPF_N 399 400 #define __BPF_DECL_ARGS(t, a) t a 401 #define __BPF_DECL_REGS(t, a) u64 a 402 403 #define __BPF_PAD(n) \ 404 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ 405 u64, __ur_3, u64, __ur_4, u64, __ur_5) 406 407 #define BPF_CALL_x(x, name, ...) \ 408 static __always_inline \ 409 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ 410 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ 411 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ 412 { \ 413 return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ 414 } \ 415 static __always_inline \ 416 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) 417 418 #define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) 419 #define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) 420 #define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) 421 #define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) 422 #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) 423 #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) 424 425 #define bpf_ctx_range(TYPE, MEMBER) \ 426 offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 427 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ 428 offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 429 430 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ 431 ({ \ 432 BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ 433 *(PTR_SIZE) = (SIZE); \ 434 offsetof(TYPE, MEMBER); \ 435 }) 436 437 #ifdef CONFIG_COMPAT 438 /* A struct sock_filter is architecture independent. */ 439 struct compat_sock_fprog { 440 u16 len; 441 compat_uptr_t filter; /* struct sock_filter * */ 442 }; 443 #endif 444 445 struct sock_fprog_kern { 446 u16 len; 447 struct sock_filter *filter; 448 }; 449 450 struct bpf_binary_header { 451 unsigned int pages; 452 u8 image[]; 453 }; 454 455 struct bpf_prog { 456 u16 pages; /* Number of allocated pages */ 457 kmemcheck_bitfield_begin(meta); 458 u16 jited:1, /* Is our filter JIT'ed? */ 459 locked:1, /* Program image locked? */ 460 gpl_compatible:1, /* Is filter GPL compatible? */ 461 cb_access:1, /* Is control block accessed? */ 462 dst_needed:1; /* Do we need dst entry? */ 463 kmemcheck_bitfield_end(meta); 464 enum bpf_prog_type type; /* Type of BPF program */ 465 u32 len; /* Number of filter blocks */ 466 u32 jited_len; /* Size of jited insns in bytes */ 467 u8 tag[BPF_TAG_SIZE]; 468 struct bpf_prog_aux *aux; /* Auxiliary fields */ 469 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 470 unsigned int (*bpf_func)(const void *ctx, 471 const struct bpf_insn *insn); 472 /* Instructions for interpreter */ 473 union { 474 struct sock_filter insns[0]; 475 struct bpf_insn insnsi[0]; 476 }; 477 }; 478 479 struct sk_filter { 480 refcount_t refcnt; 481 struct rcu_head rcu; 482 struct bpf_prog *prog; 483 }; 484 485 #define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi) 486 487 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN 488 489 struct bpf_skb_data_end { 490 struct qdisc_skb_cb qdisc_cb; 491 void *data_meta; 492 void *data_end; 493 }; 494 495 struct xdp_buff { 496 void *data; 497 void *data_end; 498 void *data_meta; 499 void *data_hard_start; 500 }; 501 502 /* Compute the linear packet data range [data, data_end) which 503 * will be accessed by various program types (cls_bpf, act_bpf, 504 * lwt, ...). Subsystems allowing direct data access must (!) 505 * ensure that cb[] area can be written to when BPF program is 506 * invoked (otherwise cb[] save/restore is necessary). 507 */ 508 static inline void bpf_compute_data_pointers(struct sk_buff *skb) 509 { 510 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 511 512 BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); 513 cb->data_meta = skb->data - skb_metadata_len(skb); 514 cb->data_end = skb->data + skb_headlen(skb); 515 } 516 517 static inline u8 *bpf_skb_cb(struct sk_buff *skb) 518 { 519 /* eBPF programs may read/write skb->cb[] area to transfer meta 520 * data between tail calls. Since this also needs to work with 521 * tc, that scratch memory is mapped to qdisc_skb_cb's data area. 522 * 523 * In some socket filter cases, the cb unfortunately needs to be 524 * saved/restored so that protocol specific skb->cb[] data won't 525 * be lost. In any case, due to unpriviledged eBPF programs 526 * attached to sockets, we need to clear the bpf_skb_cb() area 527 * to not leak previous contents to user space. 528 */ 529 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); 530 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != 531 FIELD_SIZEOF(struct qdisc_skb_cb, data)); 532 533 return qdisc_skb_cb(skb)->data; 534 } 535 536 static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 537 struct sk_buff *skb) 538 { 539 u8 *cb_data = bpf_skb_cb(skb); 540 u8 cb_saved[BPF_SKB_CB_LEN]; 541 u32 res; 542 543 if (unlikely(prog->cb_access)) { 544 memcpy(cb_saved, cb_data, sizeof(cb_saved)); 545 memset(cb_data, 0, sizeof(cb_saved)); 546 } 547 548 res = BPF_PROG_RUN(prog, skb); 549 550 if (unlikely(prog->cb_access)) 551 memcpy(cb_data, cb_saved, sizeof(cb_saved)); 552 553 return res; 554 } 555 556 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, 557 struct sk_buff *skb) 558 { 559 u8 *cb_data = bpf_skb_cb(skb); 560 561 if (unlikely(prog->cb_access)) 562 memset(cb_data, 0, BPF_SKB_CB_LEN); 563 564 return BPF_PROG_RUN(prog, skb); 565 } 566 567 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, 568 struct xdp_buff *xdp) 569 { 570 /* Caller needs to hold rcu_read_lock() (!), otherwise program 571 * can be released while still running, or map elements could be 572 * freed early while still having concurrent users. XDP fastpath 573 * already takes rcu_read_lock() when fetching the program, so 574 * it's not necessary here anymore. 575 */ 576 return BPF_PROG_RUN(prog, xdp); 577 } 578 579 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) 580 { 581 return prog->len * sizeof(struct bpf_insn); 582 } 583 584 static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) 585 { 586 return round_up(bpf_prog_insn_size(prog) + 587 sizeof(__be64) + 1, SHA_MESSAGE_BYTES); 588 } 589 590 static inline unsigned int bpf_prog_size(unsigned int proglen) 591 { 592 return max(sizeof(struct bpf_prog), 593 offsetof(struct bpf_prog, insns[proglen])); 594 } 595 596 static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) 597 { 598 /* When classic BPF programs have been loaded and the arch 599 * does not have a classic BPF JIT (anymore), they have been 600 * converted via bpf_migrate_filter() to eBPF and thus always 601 * have an unspec program type. 602 */ 603 return prog->type == BPF_PROG_TYPE_UNSPEC; 604 } 605 606 static inline bool 607 bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default) 608 { 609 bool off_ok; 610 #ifdef __LITTLE_ENDIAN 611 off_ok = (off & (size_default - 1)) == 0; 612 #else 613 off_ok = (off & (size_default - 1)) + size == size_default; 614 #endif 615 return off_ok && size <= size_default && (size & (size - 1)) == 0; 616 } 617 618 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 619 620 #ifdef CONFIG_ARCH_HAS_SET_MEMORY 621 static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 622 { 623 fp->locked = 1; 624 WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages)); 625 } 626 627 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 628 { 629 if (fp->locked) { 630 WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); 631 /* In case set_memory_rw() fails, we want to be the first 632 * to crash here instead of some random place later on. 633 */ 634 fp->locked = 0; 635 } 636 } 637 638 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) 639 { 640 WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages)); 641 } 642 643 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) 644 { 645 WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); 646 } 647 #else 648 static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 649 { 650 } 651 652 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 653 { 654 } 655 656 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) 657 { 658 } 659 660 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) 661 { 662 } 663 #endif /* CONFIG_ARCH_HAS_SET_MEMORY */ 664 665 static inline struct bpf_binary_header * 666 bpf_jit_binary_hdr(const struct bpf_prog *fp) 667 { 668 unsigned long real_start = (unsigned long)fp->bpf_func; 669 unsigned long addr = real_start & PAGE_MASK; 670 671 return (void *)addr; 672 } 673 674 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); 675 static inline int sk_filter(struct sock *sk, struct sk_buff *skb) 676 { 677 return sk_filter_trim_cap(sk, skb, 1); 678 } 679 680 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 681 void bpf_prog_free(struct bpf_prog *fp); 682 683 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); 684 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 685 gfp_t gfp_extra_flags); 686 void __bpf_prog_free(struct bpf_prog *fp); 687 688 static inline void bpf_prog_unlock_free(struct bpf_prog *fp) 689 { 690 bpf_prog_unlock_ro(fp); 691 __bpf_prog_free(fp); 692 } 693 694 typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, 695 unsigned int flen); 696 697 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 698 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, 699 bpf_aux_classic_check_t trans, bool save_orig); 700 void bpf_prog_destroy(struct bpf_prog *fp); 701 702 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 703 int sk_attach_bpf(u32 ufd, struct sock *sk); 704 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); 705 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); 706 int sk_detach_filter(struct sock *sk); 707 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 708 unsigned int len); 709 710 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); 711 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); 712 713 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 714 715 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); 716 void bpf_jit_compile(struct bpf_prog *prog); 717 bool bpf_helper_changes_pkt_data(void *func); 718 719 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 720 const struct bpf_insn *patch, u32 len); 721 722 /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the 723 * same cpu context. Further for best results no more than a single map 724 * for the do_redirect/do_flush pair should be used. This limitation is 725 * because we only track one map and force a flush when the map changes. 726 * This does not appear to be a real limitation for existing software. 727 */ 728 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, 729 struct bpf_prog *prog); 730 int xdp_do_redirect(struct net_device *dev, 731 struct xdp_buff *xdp, 732 struct bpf_prog *prog); 733 void xdp_do_flush_map(void); 734 735 /* Drivers not supporting XDP metadata can use this helper, which 736 * rejects any room expansion for metadata as a result. 737 */ 738 static __always_inline void 739 xdp_set_data_meta_invalid(struct xdp_buff *xdp) 740 { 741 xdp->data_meta = xdp->data + 1; 742 } 743 744 static __always_inline bool 745 xdp_data_meta_unsupported(const struct xdp_buff *xdp) 746 { 747 return unlikely(xdp->data_meta > xdp->data); 748 } 749 750 void bpf_warn_invalid_xdp_action(u32 act); 751 752 struct sock *do_sk_redirect_map(struct sk_buff *skb); 753 754 #ifdef CONFIG_BPF_JIT 755 extern int bpf_jit_enable; 756 extern int bpf_jit_harden; 757 extern int bpf_jit_kallsyms; 758 759 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 760 761 struct bpf_binary_header * 762 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 763 unsigned int alignment, 764 bpf_jit_fill_hole_t bpf_fill_ill_insns); 765 void bpf_jit_binary_free(struct bpf_binary_header *hdr); 766 767 void bpf_jit_free(struct bpf_prog *fp); 768 769 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); 770 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); 771 772 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 773 u32 pass, void *image) 774 { 775 pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, 776 proglen, pass, image, current->comm, task_pid_nr(current)); 777 778 if (image) 779 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 780 16, 1, image, proglen, false); 781 } 782 783 static inline bool bpf_jit_is_ebpf(void) 784 { 785 # ifdef CONFIG_HAVE_EBPF_JIT 786 return true; 787 # else 788 return false; 789 # endif 790 } 791 792 static inline bool ebpf_jit_enabled(void) 793 { 794 return bpf_jit_enable && bpf_jit_is_ebpf(); 795 } 796 797 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) 798 { 799 return fp->jited && bpf_jit_is_ebpf(); 800 } 801 802 static inline bool bpf_jit_blinding_enabled(void) 803 { 804 /* These are the prerequisites, should someone ever have the 805 * idea to call blinding outside of them, we make sure to 806 * bail out. 807 */ 808 if (!bpf_jit_is_ebpf()) 809 return false; 810 if (!bpf_jit_enable) 811 return false; 812 if (!bpf_jit_harden) 813 return false; 814 if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) 815 return false; 816 817 return true; 818 } 819 820 static inline bool bpf_jit_kallsyms_enabled(void) 821 { 822 /* There are a couple of corner cases where kallsyms should 823 * not be enabled f.e. on hardening. 824 */ 825 if (bpf_jit_harden) 826 return false; 827 if (!bpf_jit_kallsyms) 828 return false; 829 if (bpf_jit_kallsyms == 1) 830 return true; 831 832 return false; 833 } 834 835 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 836 unsigned long *off, char *sym); 837 bool is_bpf_text_address(unsigned long addr); 838 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 839 char *sym); 840 841 static inline const char * 842 bpf_address_lookup(unsigned long addr, unsigned long *size, 843 unsigned long *off, char **modname, char *sym) 844 { 845 const char *ret = __bpf_address_lookup(addr, size, off, sym); 846 847 if (ret && modname) 848 *modname = NULL; 849 return ret; 850 } 851 852 void bpf_prog_kallsyms_add(struct bpf_prog *fp); 853 void bpf_prog_kallsyms_del(struct bpf_prog *fp); 854 855 #else /* CONFIG_BPF_JIT */ 856 857 static inline bool ebpf_jit_enabled(void) 858 { 859 return false; 860 } 861 862 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) 863 { 864 return false; 865 } 866 867 static inline void bpf_jit_free(struct bpf_prog *fp) 868 { 869 bpf_prog_unlock_free(fp); 870 } 871 872 static inline bool bpf_jit_kallsyms_enabled(void) 873 { 874 return false; 875 } 876 877 static inline const char * 878 __bpf_address_lookup(unsigned long addr, unsigned long *size, 879 unsigned long *off, char *sym) 880 { 881 return NULL; 882 } 883 884 static inline bool is_bpf_text_address(unsigned long addr) 885 { 886 return false; 887 } 888 889 static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, 890 char *type, char *sym) 891 { 892 return -ERANGE; 893 } 894 895 static inline const char * 896 bpf_address_lookup(unsigned long addr, unsigned long *size, 897 unsigned long *off, char **modname, char *sym) 898 { 899 return NULL; 900 } 901 902 static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) 903 { 904 } 905 906 static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) 907 { 908 } 909 #endif /* CONFIG_BPF_JIT */ 910 911 #define BPF_ANC BIT(15) 912 913 static inline bool bpf_needs_clear_a(const struct sock_filter *first) 914 { 915 switch (first->code) { 916 case BPF_RET | BPF_K: 917 case BPF_LD | BPF_W | BPF_LEN: 918 return false; 919 920 case BPF_LD | BPF_W | BPF_ABS: 921 case BPF_LD | BPF_H | BPF_ABS: 922 case BPF_LD | BPF_B | BPF_ABS: 923 if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) 924 return true; 925 return false; 926 927 default: 928 return true; 929 } 930 } 931 932 static inline u16 bpf_anc_helper(const struct sock_filter *ftest) 933 { 934 BUG_ON(ftest->code & BPF_ANC); 935 936 switch (ftest->code) { 937 case BPF_LD | BPF_W | BPF_ABS: 938 case BPF_LD | BPF_H | BPF_ABS: 939 case BPF_LD | BPF_B | BPF_ABS: 940 #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ 941 return BPF_ANC | SKF_AD_##CODE 942 switch (ftest->k) { 943 BPF_ANCILLARY(PROTOCOL); 944 BPF_ANCILLARY(PKTTYPE); 945 BPF_ANCILLARY(IFINDEX); 946 BPF_ANCILLARY(NLATTR); 947 BPF_ANCILLARY(NLATTR_NEST); 948 BPF_ANCILLARY(MARK); 949 BPF_ANCILLARY(QUEUE); 950 BPF_ANCILLARY(HATYPE); 951 BPF_ANCILLARY(RXHASH); 952 BPF_ANCILLARY(CPU); 953 BPF_ANCILLARY(ALU_XOR_X); 954 BPF_ANCILLARY(VLAN_TAG); 955 BPF_ANCILLARY(VLAN_TAG_PRESENT); 956 BPF_ANCILLARY(PAY_OFFSET); 957 BPF_ANCILLARY(RANDOM); 958 BPF_ANCILLARY(VLAN_TPID); 959 } 960 /* Fallthrough. */ 961 default: 962 return ftest->code; 963 } 964 } 965 966 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, 967 int k, unsigned int size); 968 969 static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, 970 unsigned int size, void *buffer) 971 { 972 if (k >= 0) 973 return skb_header_pointer(skb, k, size, buffer); 974 975 return bpf_internal_load_pointer_neg_helper(skb, k, size); 976 } 977 978 static inline int bpf_tell_extensions(void) 979 { 980 return SKF_AD_MAX; 981 } 982 983 struct bpf_sock_ops_kern { 984 struct sock *sk; 985 u32 op; 986 union { 987 u32 reply; 988 u32 replylong[4]; 989 }; 990 }; 991 992 #endif /* __LINUX_FILTER_H__ */ 993