1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11
12 #include <rte_common.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_memory.h>
16 #include <rte_eal.h>
17 #include <rte_byteorder.h>
18
19 #include "bpf_impl.h"
20
21 #define BPF_JMP_UNC(ins) ((ins) += (ins)->off)
22
23 #define BPF_JMP_CND_REG(reg, ins, op, type) \
24 ((ins) += \
25 ((type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg]) ? \
26 (ins)->off : 0)
27
28 #define BPF_JMP_CND_IMM(reg, ins, op, type) \
29 ((ins) += \
30 ((type)(reg)[(ins)->dst_reg] op (type)(ins)->imm) ? \
31 (ins)->off : 0)
32
33 #define BPF_NEG_ALU(reg, ins, type) \
34 ((reg)[(ins)->dst_reg] = (type)(-(reg)[(ins)->dst_reg]))
35
36 #define EBPF_MOV_ALU_REG(reg, ins, type) \
37 ((reg)[(ins)->dst_reg] = (type)(reg)[(ins)->src_reg])
38
39 #define BPF_OP_ALU_REG(reg, ins, op, type) \
40 ((reg)[(ins)->dst_reg] = \
41 (type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg])
42
43 #define EBPF_MOV_ALU_IMM(reg, ins, type) \
44 ((reg)[(ins)->dst_reg] = (type)(ins)->imm)
45
46 #define BPF_OP_ALU_IMM(reg, ins, op, type) \
47 ((reg)[(ins)->dst_reg] = \
48 (type)(reg)[(ins)->dst_reg] op (type)(ins)->imm)
49
50 #define BPF_DIV_ZERO_CHECK(bpf, reg, ins, type) do { \
51 if ((type)(reg)[(ins)->src_reg] == 0) { \
52 RTE_BPF_LOG(ERR, \
53 "%s(%p): division by 0 at pc: %#zx;\n", \
54 __func__, bpf, \
55 (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins); \
56 return 0; \
57 } \
58 } while (0)
59
60 #define BPF_LD_REG(reg, ins, type) \
61 ((reg)[(ins)->dst_reg] = \
62 *(type *)(uintptr_t)((reg)[(ins)->src_reg] + (ins)->off))
63
64 #define BPF_ST_IMM(reg, ins, type) \
65 (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
66 (type)(ins)->imm)
67
68 #define BPF_ST_REG(reg, ins, type) \
69 (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
70 (type)(reg)[(ins)->src_reg])
71
72 #define BPF_ST_XADD_REG(reg, ins, tp) \
73 (rte_atomic##tp##_add((rte_atomic##tp##_t *) \
74 (uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off), \
75 reg[ins->src_reg]))
76
77 /* BPF_LD | BPF_ABS/BPF_IND */
78
79 #define NOP(x) (x)
80
81 #define BPF_LD_ABS(bpf, reg, ins, type, op) do { \
82 const type *p = bpf_ld_mbuf(bpf, reg, ins, (ins)->imm, sizeof(type)); \
83 if (p == NULL) \
84 return 0; \
85 reg[EBPF_REG_0] = op(p[0]); \
86 } while (0)
87
88 #define BPF_LD_IND(bpf, reg, ins, type, op) do { \
89 uint32_t ofs = reg[ins->src_reg] + (ins)->imm; \
90 const type *p = bpf_ld_mbuf(bpf, reg, ins, ofs, sizeof(type)); \
91 if (p == NULL) \
92 return 0; \
93 reg[EBPF_REG_0] = op(p[0]); \
94 } while (0)
95
96
97 static inline void
bpf_alu_be(uint64_t reg[EBPF_REG_NUM],const struct ebpf_insn * ins)98 bpf_alu_be(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
99 {
100 uint64_t *v;
101
102 v = reg + ins->dst_reg;
103 switch (ins->imm) {
104 case 16:
105 *v = rte_cpu_to_be_16(*v);
106 break;
107 case 32:
108 *v = rte_cpu_to_be_32(*v);
109 break;
110 case 64:
111 *v = rte_cpu_to_be_64(*v);
112 break;
113 }
114 }
115
116 static inline void
bpf_alu_le(uint64_t reg[EBPF_REG_NUM],const struct ebpf_insn * ins)117 bpf_alu_le(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
118 {
119 uint64_t *v;
120
121 v = reg + ins->dst_reg;
122 switch (ins->imm) {
123 case 16:
124 *v = rte_cpu_to_le_16(*v);
125 break;
126 case 32:
127 *v = rte_cpu_to_le_32(*v);
128 break;
129 case 64:
130 *v = rte_cpu_to_le_64(*v);
131 break;
132 }
133 }
134
135 static inline const void *
bpf_ld_mbuf(const struct rte_bpf * bpf,uint64_t reg[EBPF_REG_NUM],const struct ebpf_insn * ins,uint32_t off,uint32_t len)136 bpf_ld_mbuf(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM],
137 const struct ebpf_insn *ins, uint32_t off, uint32_t len)
138 {
139 const struct rte_mbuf *mb;
140 const void *p;
141
142 mb = (const struct rte_mbuf *)(uintptr_t)reg[EBPF_REG_6];
143 p = rte_pktmbuf_read(mb, off, len, reg + EBPF_REG_0);
144 if (p == NULL)
145 RTE_BPF_LOG(DEBUG, "%s(bpf=%p, mbuf=%p, ofs=%u, len=%u): "
146 "load beyond packet boundary at pc: %#zx;\n",
147 __func__, bpf, mb, off, len,
148 (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins);
149 return p;
150 }
151
152 static inline uint64_t
bpf_exec(const struct rte_bpf * bpf,uint64_t reg[EBPF_REG_NUM])153 bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
154 {
155 const struct ebpf_insn *ins;
156
157 for (ins = bpf->prm.ins; ; ins++) {
158 switch (ins->code) {
159 /* 32 bit ALU IMM operations */
160 case (BPF_ALU | BPF_ADD | BPF_K):
161 BPF_OP_ALU_IMM(reg, ins, +, uint32_t);
162 break;
163 case (BPF_ALU | BPF_SUB | BPF_K):
164 BPF_OP_ALU_IMM(reg, ins, -, uint32_t);
165 break;
166 case (BPF_ALU | BPF_AND | BPF_K):
167 BPF_OP_ALU_IMM(reg, ins, &, uint32_t);
168 break;
169 case (BPF_ALU | BPF_OR | BPF_K):
170 BPF_OP_ALU_IMM(reg, ins, |, uint32_t);
171 break;
172 case (BPF_ALU | BPF_LSH | BPF_K):
173 BPF_OP_ALU_IMM(reg, ins, <<, uint32_t);
174 break;
175 case (BPF_ALU | BPF_RSH | BPF_K):
176 BPF_OP_ALU_IMM(reg, ins, >>, uint32_t);
177 break;
178 case (BPF_ALU | BPF_XOR | BPF_K):
179 BPF_OP_ALU_IMM(reg, ins, ^, uint32_t);
180 break;
181 case (BPF_ALU | BPF_MUL | BPF_K):
182 BPF_OP_ALU_IMM(reg, ins, *, uint32_t);
183 break;
184 case (BPF_ALU | BPF_DIV | BPF_K):
185 BPF_OP_ALU_IMM(reg, ins, /, uint32_t);
186 break;
187 case (BPF_ALU | BPF_MOD | BPF_K):
188 BPF_OP_ALU_IMM(reg, ins, %, uint32_t);
189 break;
190 case (BPF_ALU | EBPF_MOV | BPF_K):
191 EBPF_MOV_ALU_IMM(reg, ins, uint32_t);
192 break;
193 /* 32 bit ALU REG operations */
194 case (BPF_ALU | BPF_ADD | BPF_X):
195 BPF_OP_ALU_REG(reg, ins, +, uint32_t);
196 break;
197 case (BPF_ALU | BPF_SUB | BPF_X):
198 BPF_OP_ALU_REG(reg, ins, -, uint32_t);
199 break;
200 case (BPF_ALU | BPF_AND | BPF_X):
201 BPF_OP_ALU_REG(reg, ins, &, uint32_t);
202 break;
203 case (BPF_ALU | BPF_OR | BPF_X):
204 BPF_OP_ALU_REG(reg, ins, |, uint32_t);
205 break;
206 case (BPF_ALU | BPF_LSH | BPF_X):
207 BPF_OP_ALU_REG(reg, ins, <<, uint32_t);
208 break;
209 case (BPF_ALU | BPF_RSH | BPF_X):
210 BPF_OP_ALU_REG(reg, ins, >>, uint32_t);
211 break;
212 case (BPF_ALU | BPF_XOR | BPF_X):
213 BPF_OP_ALU_REG(reg, ins, ^, uint32_t);
214 break;
215 case (BPF_ALU | BPF_MUL | BPF_X):
216 BPF_OP_ALU_REG(reg, ins, *, uint32_t);
217 break;
218 case (BPF_ALU | BPF_DIV | BPF_X):
219 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
220 BPF_OP_ALU_REG(reg, ins, /, uint32_t);
221 break;
222 case (BPF_ALU | BPF_MOD | BPF_X):
223 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
224 BPF_OP_ALU_REG(reg, ins, %, uint32_t);
225 break;
226 case (BPF_ALU | EBPF_MOV | BPF_X):
227 EBPF_MOV_ALU_REG(reg, ins, uint32_t);
228 break;
229 case (BPF_ALU | BPF_NEG):
230 BPF_NEG_ALU(reg, ins, uint32_t);
231 break;
232 case (BPF_ALU | EBPF_END | EBPF_TO_BE):
233 bpf_alu_be(reg, ins);
234 break;
235 case (BPF_ALU | EBPF_END | EBPF_TO_LE):
236 bpf_alu_le(reg, ins);
237 break;
238 /* 64 bit ALU IMM operations */
239 case (EBPF_ALU64 | BPF_ADD | BPF_K):
240 BPF_OP_ALU_IMM(reg, ins, +, uint64_t);
241 break;
242 case (EBPF_ALU64 | BPF_SUB | BPF_K):
243 BPF_OP_ALU_IMM(reg, ins, -, uint64_t);
244 break;
245 case (EBPF_ALU64 | BPF_AND | BPF_K):
246 BPF_OP_ALU_IMM(reg, ins, &, uint64_t);
247 break;
248 case (EBPF_ALU64 | BPF_OR | BPF_K):
249 BPF_OP_ALU_IMM(reg, ins, |, uint64_t);
250 break;
251 case (EBPF_ALU64 | BPF_LSH | BPF_K):
252 BPF_OP_ALU_IMM(reg, ins, <<, uint64_t);
253 break;
254 case (EBPF_ALU64 | BPF_RSH | BPF_K):
255 BPF_OP_ALU_IMM(reg, ins, >>, uint64_t);
256 break;
257 case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
258 BPF_OP_ALU_IMM(reg, ins, >>, int64_t);
259 break;
260 case (EBPF_ALU64 | BPF_XOR | BPF_K):
261 BPF_OP_ALU_IMM(reg, ins, ^, uint64_t);
262 break;
263 case (EBPF_ALU64 | BPF_MUL | BPF_K):
264 BPF_OP_ALU_IMM(reg, ins, *, uint64_t);
265 break;
266 case (EBPF_ALU64 | BPF_DIV | BPF_K):
267 BPF_OP_ALU_IMM(reg, ins, /, uint64_t);
268 break;
269 case (EBPF_ALU64 | BPF_MOD | BPF_K):
270 BPF_OP_ALU_IMM(reg, ins, %, uint64_t);
271 break;
272 case (EBPF_ALU64 | EBPF_MOV | BPF_K):
273 EBPF_MOV_ALU_IMM(reg, ins, uint64_t);
274 break;
275 /* 64 bit ALU REG operations */
276 case (EBPF_ALU64 | BPF_ADD | BPF_X):
277 BPF_OP_ALU_REG(reg, ins, +, uint64_t);
278 break;
279 case (EBPF_ALU64 | BPF_SUB | BPF_X):
280 BPF_OP_ALU_REG(reg, ins, -, uint64_t);
281 break;
282 case (EBPF_ALU64 | BPF_AND | BPF_X):
283 BPF_OP_ALU_REG(reg, ins, &, uint64_t);
284 break;
285 case (EBPF_ALU64 | BPF_OR | BPF_X):
286 BPF_OP_ALU_REG(reg, ins, |, uint64_t);
287 break;
288 case (EBPF_ALU64 | BPF_LSH | BPF_X):
289 BPF_OP_ALU_REG(reg, ins, <<, uint64_t);
290 break;
291 case (EBPF_ALU64 | BPF_RSH | BPF_X):
292 BPF_OP_ALU_REG(reg, ins, >>, uint64_t);
293 break;
294 case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
295 BPF_OP_ALU_REG(reg, ins, >>, int64_t);
296 break;
297 case (EBPF_ALU64 | BPF_XOR | BPF_X):
298 BPF_OP_ALU_REG(reg, ins, ^, uint64_t);
299 break;
300 case (EBPF_ALU64 | BPF_MUL | BPF_X):
301 BPF_OP_ALU_REG(reg, ins, *, uint64_t);
302 break;
303 case (EBPF_ALU64 | BPF_DIV | BPF_X):
304 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
305 BPF_OP_ALU_REG(reg, ins, /, uint64_t);
306 break;
307 case (EBPF_ALU64 | BPF_MOD | BPF_X):
308 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
309 BPF_OP_ALU_REG(reg, ins, %, uint64_t);
310 break;
311 case (EBPF_ALU64 | EBPF_MOV | BPF_X):
312 EBPF_MOV_ALU_REG(reg, ins, uint64_t);
313 break;
314 case (EBPF_ALU64 | BPF_NEG):
315 BPF_NEG_ALU(reg, ins, uint64_t);
316 break;
317 /* load instructions */
318 case (BPF_LDX | BPF_MEM | BPF_B):
319 BPF_LD_REG(reg, ins, uint8_t);
320 break;
321 case (BPF_LDX | BPF_MEM | BPF_H):
322 BPF_LD_REG(reg, ins, uint16_t);
323 break;
324 case (BPF_LDX | BPF_MEM | BPF_W):
325 BPF_LD_REG(reg, ins, uint32_t);
326 break;
327 case (BPF_LDX | BPF_MEM | EBPF_DW):
328 BPF_LD_REG(reg, ins, uint64_t);
329 break;
330 /* load 64 bit immediate value */
331 case (BPF_LD | BPF_IMM | EBPF_DW):
332 reg[ins->dst_reg] = (uint32_t)ins[0].imm |
333 (uint64_t)(uint32_t)ins[1].imm << 32;
334 ins++;
335 break;
336 /* load absolute instructions */
337 case (BPF_LD | BPF_ABS | BPF_B):
338 BPF_LD_ABS(bpf, reg, ins, uint8_t, NOP);
339 break;
340 case (BPF_LD | BPF_ABS | BPF_H):
341 BPF_LD_ABS(bpf, reg, ins, uint16_t, rte_be_to_cpu_16);
342 break;
343 case (BPF_LD | BPF_ABS | BPF_W):
344 BPF_LD_ABS(bpf, reg, ins, uint32_t, rte_be_to_cpu_32);
345 break;
346 /* load indirect instructions */
347 case (BPF_LD | BPF_IND | BPF_B):
348 BPF_LD_IND(bpf, reg, ins, uint8_t, NOP);
349 break;
350 case (BPF_LD | BPF_IND | BPF_H):
351 BPF_LD_IND(bpf, reg, ins, uint16_t, rte_be_to_cpu_16);
352 break;
353 case (BPF_LD | BPF_IND | BPF_W):
354 BPF_LD_IND(bpf, reg, ins, uint32_t, rte_be_to_cpu_32);
355 break;
356 /* store instructions */
357 case (BPF_STX | BPF_MEM | BPF_B):
358 BPF_ST_REG(reg, ins, uint8_t);
359 break;
360 case (BPF_STX | BPF_MEM | BPF_H):
361 BPF_ST_REG(reg, ins, uint16_t);
362 break;
363 case (BPF_STX | BPF_MEM | BPF_W):
364 BPF_ST_REG(reg, ins, uint32_t);
365 break;
366 case (BPF_STX | BPF_MEM | EBPF_DW):
367 BPF_ST_REG(reg, ins, uint64_t);
368 break;
369 case (BPF_ST | BPF_MEM | BPF_B):
370 BPF_ST_IMM(reg, ins, uint8_t);
371 break;
372 case (BPF_ST | BPF_MEM | BPF_H):
373 BPF_ST_IMM(reg, ins, uint16_t);
374 break;
375 case (BPF_ST | BPF_MEM | BPF_W):
376 BPF_ST_IMM(reg, ins, uint32_t);
377 break;
378 case (BPF_ST | BPF_MEM | EBPF_DW):
379 BPF_ST_IMM(reg, ins, uint64_t);
380 break;
381 /* atomic add instructions */
382 case (BPF_STX | EBPF_XADD | BPF_W):
383 BPF_ST_XADD_REG(reg, ins, 32);
384 break;
385 case (BPF_STX | EBPF_XADD | EBPF_DW):
386 BPF_ST_XADD_REG(reg, ins, 64);
387 break;
388 /* jump instructions */
389 case (BPF_JMP | BPF_JA):
390 BPF_JMP_UNC(ins);
391 break;
392 /* jump IMM instructions */
393 case (BPF_JMP | BPF_JEQ | BPF_K):
394 BPF_JMP_CND_IMM(reg, ins, ==, uint64_t);
395 break;
396 case (BPF_JMP | EBPF_JNE | BPF_K):
397 BPF_JMP_CND_IMM(reg, ins, !=, uint64_t);
398 break;
399 case (BPF_JMP | BPF_JGT | BPF_K):
400 BPF_JMP_CND_IMM(reg, ins, >, uint64_t);
401 break;
402 case (BPF_JMP | EBPF_JLT | BPF_K):
403 BPF_JMP_CND_IMM(reg, ins, <, uint64_t);
404 break;
405 case (BPF_JMP | BPF_JGE | BPF_K):
406 BPF_JMP_CND_IMM(reg, ins, >=, uint64_t);
407 break;
408 case (BPF_JMP | EBPF_JLE | BPF_K):
409 BPF_JMP_CND_IMM(reg, ins, <=, uint64_t);
410 break;
411 case (BPF_JMP | EBPF_JSGT | BPF_K):
412 BPF_JMP_CND_IMM(reg, ins, >, int64_t);
413 break;
414 case (BPF_JMP | EBPF_JSLT | BPF_K):
415 BPF_JMP_CND_IMM(reg, ins, <, int64_t);
416 break;
417 case (BPF_JMP | EBPF_JSGE | BPF_K):
418 BPF_JMP_CND_IMM(reg, ins, >=, int64_t);
419 break;
420 case (BPF_JMP | EBPF_JSLE | BPF_K):
421 BPF_JMP_CND_IMM(reg, ins, <=, int64_t);
422 break;
423 case (BPF_JMP | BPF_JSET | BPF_K):
424 BPF_JMP_CND_IMM(reg, ins, &, uint64_t);
425 break;
426 /* jump REG instructions */
427 case (BPF_JMP | BPF_JEQ | BPF_X):
428 BPF_JMP_CND_REG(reg, ins, ==, uint64_t);
429 break;
430 case (BPF_JMP | EBPF_JNE | BPF_X):
431 BPF_JMP_CND_REG(reg, ins, !=, uint64_t);
432 break;
433 case (BPF_JMP | BPF_JGT | BPF_X):
434 BPF_JMP_CND_REG(reg, ins, >, uint64_t);
435 break;
436 case (BPF_JMP | EBPF_JLT | BPF_X):
437 BPF_JMP_CND_REG(reg, ins, <, uint64_t);
438 break;
439 case (BPF_JMP | BPF_JGE | BPF_X):
440 BPF_JMP_CND_REG(reg, ins, >=, uint64_t);
441 break;
442 case (BPF_JMP | EBPF_JLE | BPF_X):
443 BPF_JMP_CND_REG(reg, ins, <=, uint64_t);
444 break;
445 case (BPF_JMP | EBPF_JSGT | BPF_X):
446 BPF_JMP_CND_REG(reg, ins, >, int64_t);
447 break;
448 case (BPF_JMP | EBPF_JSLT | BPF_X):
449 BPF_JMP_CND_REG(reg, ins, <, int64_t);
450 break;
451 case (BPF_JMP | EBPF_JSGE | BPF_X):
452 BPF_JMP_CND_REG(reg, ins, >=, int64_t);
453 break;
454 case (BPF_JMP | EBPF_JSLE | BPF_X):
455 BPF_JMP_CND_REG(reg, ins, <=, int64_t);
456 break;
457 case (BPF_JMP | BPF_JSET | BPF_X):
458 BPF_JMP_CND_REG(reg, ins, &, uint64_t);
459 break;
460 /* call instructions */
461 case (BPF_JMP | EBPF_CALL):
462 reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func.val(
463 reg[EBPF_REG_1], reg[EBPF_REG_2],
464 reg[EBPF_REG_3], reg[EBPF_REG_4],
465 reg[EBPF_REG_5]);
466 break;
467 /* return instruction */
468 case (BPF_JMP | EBPF_EXIT):
469 return reg[EBPF_REG_0];
470 default:
471 RTE_BPF_LOG(ERR,
472 "%s(%p): invalid opcode %#x at pc: %#zx;\n",
473 __func__, bpf, ins->code,
474 (uintptr_t)ins - (uintptr_t)bpf->prm.ins);
475 return 0;
476 }
477 }
478
479 /* should never be reached */
480 RTE_VERIFY(0);
481 return 0;
482 }
483
484 uint32_t
rte_bpf_exec_burst(const struct rte_bpf * bpf,void * ctx[],uint64_t rc[],uint32_t num)485 rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
486 uint32_t num)
487 {
488 uint32_t i;
489 uint64_t reg[EBPF_REG_NUM];
490 uint64_t stack[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
491
492 for (i = 0; i != num; i++) {
493
494 reg[EBPF_REG_1] = (uintptr_t)ctx[i];
495 reg[EBPF_REG_10] = (uintptr_t)(stack + RTE_DIM(stack));
496
497 rc[i] = bpf_exec(bpf, reg);
498 }
499
500 return i;
501 }
502
503 uint64_t
rte_bpf_exec(const struct rte_bpf * bpf,void * ctx)504 rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
505 {
506 uint64_t rc;
507
508 rte_bpf_exec_burst(bpf, &ctx, &rc, 1);
509 return rc;
510 }
511