1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11
12 #include <rte_common.h>
13 #include <rte_eal.h>
14 #include <rte_byteorder.h>
15
16 #include "bpf_impl.h"
17
18 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
19
20 struct bpf_reg_val {
21 struct rte_bpf_arg v;
22 uint64_t mask;
23 struct {
24 int64_t min;
25 int64_t max;
26 } s;
27 struct {
28 uint64_t min;
29 uint64_t max;
30 } u;
31 };
32
33 struct bpf_eval_state {
34 struct bpf_reg_val rv[EBPF_REG_NUM];
35 struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
36 };
37
38 /* possible instruction node colour */
39 enum {
40 WHITE,
41 GREY,
42 BLACK,
43 MAX_NODE_COLOUR
44 };
45
46 /* possible edge types */
47 enum {
48 UNKNOWN_EDGE,
49 TREE_EDGE,
50 BACK_EDGE,
51 CROSS_EDGE,
52 MAX_EDGE_TYPE
53 };
54
55 #define MAX_EDGES 2
56
57 struct inst_node {
58 uint8_t colour;
59 uint8_t nb_edge:4;
60 uint8_t cur_edge:4;
61 uint8_t edge_type[MAX_EDGES];
62 uint32_t edge_dest[MAX_EDGES];
63 uint32_t prev_node;
64 struct bpf_eval_state *evst;
65 };
66
67 struct bpf_verifier {
68 const struct rte_bpf_prm *prm;
69 struct inst_node *in;
70 uint64_t stack_sz;
71 uint32_t nb_nodes;
72 uint32_t nb_jcc_nodes;
73 uint32_t nb_ldmb_nodes;
74 uint32_t node_colour[MAX_NODE_COLOUR];
75 uint32_t edge_type[MAX_EDGE_TYPE];
76 struct bpf_eval_state *evst;
77 struct inst_node *evin;
78 struct {
79 uint32_t num;
80 uint32_t cur;
81 struct bpf_eval_state *ent;
82 } evst_pool;
83 };
84
85 struct bpf_ins_check {
86 struct {
87 uint16_t dreg;
88 uint16_t sreg;
89 } mask;
90 struct {
91 uint16_t min;
92 uint16_t max;
93 } off;
94 struct {
95 uint32_t min;
96 uint32_t max;
97 } imm;
98 const char * (*check)(const struct ebpf_insn *);
99 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
100 };
101
102 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
103 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
104 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
105
106 /* For LD_IND R6 is an implicit CTX register. */
107 #define IND_SRC_REGS (WRT_REGS ^ 1 << EBPF_REG_6)
108
109 /*
110 * check and evaluate functions for particular instruction types.
111 */
112
113 static const char *
check_alu_bele(const struct ebpf_insn * ins)114 check_alu_bele(const struct ebpf_insn *ins)
115 {
116 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
117 return "invalid imm field";
118 return NULL;
119 }
120
121 static const char *
eval_exit(struct bpf_verifier * bvf,const struct ebpf_insn * ins)122 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
123 {
124 RTE_SET_USED(ins);
125 if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
126 return "undefined return value";
127 return NULL;
128 }
129
130 /* setup max possible with this mask bounds */
131 static void
eval_umax_bound(struct bpf_reg_val * rv,uint64_t mask)132 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
133 {
134 rv->u.max = mask;
135 rv->u.min = 0;
136 }
137
138 static void
eval_smax_bound(struct bpf_reg_val * rv,uint64_t mask)139 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
140 {
141 rv->s.max = mask >> 1;
142 rv->s.min = rv->s.max ^ UINT64_MAX;
143 }
144
145 static void
eval_max_bound(struct bpf_reg_val * rv,uint64_t mask)146 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
147 {
148 eval_umax_bound(rv, mask);
149 eval_smax_bound(rv, mask);
150 }
151
152 static void
eval_fill_max_bound(struct bpf_reg_val * rv,uint64_t mask)153 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
154 {
155 eval_max_bound(rv, mask);
156 rv->v.type = RTE_BPF_ARG_RAW;
157 rv->mask = mask;
158 }
159
160 static void
eval_fill_imm64(struct bpf_reg_val * rv,uint64_t mask,uint64_t val)161 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
162 {
163 rv->mask = mask;
164 rv->s.min = val;
165 rv->s.max = val;
166 rv->u.min = val;
167 rv->u.max = val;
168 }
169
170 static void
eval_fill_imm(struct bpf_reg_val * rv,uint64_t mask,int32_t imm)171 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
172 {
173 uint64_t v;
174
175 v = (uint64_t)imm & mask;
176
177 rv->v.type = RTE_BPF_ARG_RAW;
178 eval_fill_imm64(rv, mask, v);
179 }
180
181 static const char *
eval_ld_imm64(struct bpf_verifier * bvf,const struct ebpf_insn * ins)182 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
183 {
184 uint32_t i;
185 uint64_t val;
186 struct bpf_reg_val *rd;
187
188 val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
189
190 rd = bvf->evst->rv + ins->dst_reg;
191 rd->v.type = RTE_BPF_ARG_RAW;
192 eval_fill_imm64(rd, UINT64_MAX, val);
193
194 for (i = 0; i != bvf->prm->nb_xsym; i++) {
195
196 /* load of external variable */
197 if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
198 (uintptr_t)bvf->prm->xsym[i].var.val == val) {
199 rd->v = bvf->prm->xsym[i].var.desc;
200 eval_fill_imm64(rd, UINT64_MAX, 0);
201 break;
202 }
203 }
204
205 return NULL;
206 }
207
208 static void
eval_apply_mask(struct bpf_reg_val * rv,uint64_t mask)209 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
210 {
211 struct bpf_reg_val rt;
212
213 rt.u.min = rv->u.min & mask;
214 rt.u.max = rv->u.max & mask;
215 if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
216 rv->u.max = RTE_MAX(rt.u.max, mask);
217 rv->u.min = 0;
218 }
219
220 eval_smax_bound(&rt, mask);
221 rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
222 rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
223
224 rv->mask = mask;
225 }
226
227 static void
eval_add(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,uint64_t msk)228 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
229 {
230 struct bpf_reg_val rv;
231
232 rv.u.min = (rd->u.min + rs->u.min) & msk;
233 rv.u.max = (rd->u.max + rs->u.max) & msk;
234 rv.s.min = (rd->s.min + rs->s.min) & msk;
235 rv.s.max = (rd->s.max + rs->s.max) & msk;
236
237 /*
238 * if at least one of the operands is not constant,
239 * then check for overflow
240 */
241 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
242 (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
243 eval_umax_bound(&rv, msk);
244
245 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
246 (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
247 rv.s.min < rd->s.min) ||
248 ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
249 rv.s.max < rd->s.max)))
250 eval_smax_bound(&rv, msk);
251
252 rd->s = rv.s;
253 rd->u = rv.u;
254 }
255
256 static void
eval_sub(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,uint64_t msk)257 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
258 {
259 struct bpf_reg_val rv;
260
261 rv.u.min = (rd->u.min - rs->u.max) & msk;
262 rv.u.max = (rd->u.max - rs->u.min) & msk;
263 rv.s.min = (rd->s.min - rs->s.max) & msk;
264 rv.s.max = (rd->s.max - rs->s.min) & msk;
265
266 /*
267 * if at least one of the operands is not constant,
268 * then check for overflow
269 */
270 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
271 (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
272 eval_umax_bound(&rv, msk);
273
274 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
275 (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
276 rv.s.min > rd->s.min) ||
277 ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
278 rv.s.max > rd->s.max)))
279 eval_smax_bound(&rv, msk);
280
281 rd->s = rv.s;
282 rd->u = rv.u;
283 }
284
285 static void
eval_lsh(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)286 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
287 uint64_t msk)
288 {
289 /* check if shift value is less then max result bits */
290 if (rs->u.max >= opsz) {
291 eval_max_bound(rd, msk);
292 return;
293 }
294
295 /* check for overflow */
296 if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
297 eval_umax_bound(rd, msk);
298 else {
299 rd->u.max <<= rs->u.max;
300 rd->u.min <<= rs->u.min;
301 }
302
303 /* check that dreg values are and would remain always positive */
304 if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
305 RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
306 eval_smax_bound(rd, msk);
307 else {
308 rd->s.max <<= rs->u.max;
309 rd->s.min <<= rs->u.min;
310 }
311 }
312
313 static void
eval_rsh(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)314 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
315 uint64_t msk)
316 {
317 /* check if shift value is less then max result bits */
318 if (rs->u.max >= opsz) {
319 eval_max_bound(rd, msk);
320 return;
321 }
322
323 rd->u.max >>= rs->u.min;
324 rd->u.min >>= rs->u.max;
325
326 /* check that dreg values are always positive */
327 if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
328 eval_smax_bound(rd, msk);
329 else {
330 rd->s.max >>= rs->u.min;
331 rd->s.min >>= rs->u.max;
332 }
333 }
334
335 static void
eval_arsh(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)336 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
337 uint64_t msk)
338 {
339 uint32_t shv;
340
341 /* check if shift value is less then max result bits */
342 if (rs->u.max >= opsz) {
343 eval_max_bound(rd, msk);
344 return;
345 }
346
347 rd->u.max = (int64_t)rd->u.max >> rs->u.min;
348 rd->u.min = (int64_t)rd->u.min >> rs->u.max;
349
350 /* if we have 32-bit values - extend them to 64-bit */
351 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
352 rd->s.min <<= opsz;
353 rd->s.max <<= opsz;
354 shv = opsz;
355 } else
356 shv = 0;
357
358 if (rd->s.min < 0)
359 rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
360 else
361 rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
362
363 if (rd->s.max < 0)
364 rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
365 else
366 rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
367 }
368
369 static uint64_t
eval_umax_bits(uint64_t v,size_t opsz)370 eval_umax_bits(uint64_t v, size_t opsz)
371 {
372 if (v == 0)
373 return 0;
374
375 v = __builtin_clzll(v);
376 return RTE_LEN2MASK(opsz - v, uint64_t);
377 }
378
379 /* estimate max possible value for (v1 & v2) */
380 static uint64_t
eval_uand_max(uint64_t v1,uint64_t v2,size_t opsz)381 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
382 {
383 v1 = eval_umax_bits(v1, opsz);
384 v2 = eval_umax_bits(v2, opsz);
385 return (v1 & v2);
386 }
387
388 /* estimate max possible value for (v1 | v2) */
389 static uint64_t
eval_uor_max(uint64_t v1,uint64_t v2,size_t opsz)390 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
391 {
392 v1 = eval_umax_bits(v1, opsz);
393 v2 = eval_umax_bits(v2, opsz);
394 return (v1 | v2);
395 }
396
397 static void
eval_and(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)398 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
399 uint64_t msk)
400 {
401 /* both operands are constants */
402 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
403 rd->u.min &= rs->u.min;
404 rd->u.max &= rs->u.max;
405 } else {
406 rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
407 rd->u.min &= rs->u.min;
408 }
409
410 /* both operands are constants */
411 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
412 rd->s.min &= rs->s.min;
413 rd->s.max &= rs->s.max;
414 /* at least one of operand is non-negative */
415 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
416 rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
417 rs->s.max & (msk >> 1), opsz);
418 rd->s.min &= rs->s.min;
419 } else
420 eval_smax_bound(rd, msk);
421 }
422
423 static void
eval_or(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)424 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
425 uint64_t msk)
426 {
427 /* both operands are constants */
428 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
429 rd->u.min |= rs->u.min;
430 rd->u.max |= rs->u.max;
431 } else {
432 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
433 rd->u.min |= rs->u.min;
434 }
435
436 /* both operands are constants */
437 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
438 rd->s.min |= rs->s.min;
439 rd->s.max |= rs->s.max;
440
441 /* both operands are non-negative */
442 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
443 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
444 rd->s.min |= rs->s.min;
445 } else
446 eval_smax_bound(rd, msk);
447 }
448
449 static void
eval_xor(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)450 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
451 uint64_t msk)
452 {
453 /* both operands are constants */
454 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
455 rd->u.min ^= rs->u.min;
456 rd->u.max ^= rs->u.max;
457 } else {
458 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
459 rd->u.min = 0;
460 }
461
462 /* both operands are constants */
463 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
464 rd->s.min ^= rs->s.min;
465 rd->s.max ^= rs->s.max;
466
467 /* both operands are non-negative */
468 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
469 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
470 rd->s.min = 0;
471 } else
472 eval_smax_bound(rd, msk);
473 }
474
475 static void
eval_mul(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)476 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
477 uint64_t msk)
478 {
479 /* both operands are constants */
480 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
481 rd->u.min = (rd->u.min * rs->u.min) & msk;
482 rd->u.max = (rd->u.max * rs->u.max) & msk;
483 /* check for overflow */
484 } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
485 rd->u.max *= rs->u.max;
486 rd->u.min *= rd->u.min;
487 } else
488 eval_umax_bound(rd, msk);
489
490 /* both operands are constants */
491 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
492 rd->s.min = (rd->s.min * rs->s.min) & msk;
493 rd->s.max = (rd->s.max * rs->s.max) & msk;
494 /* check that both operands are positive and no overflow */
495 } else if (rd->s.min >= 0 && rs->s.min >= 0) {
496 rd->s.max *= rs->s.max;
497 rd->s.min *= rd->s.min;
498 } else
499 eval_smax_bound(rd, msk);
500 }
501
502 static const char *
eval_divmod(uint32_t op,struct bpf_reg_val * rd,struct bpf_reg_val * rs,size_t opsz,uint64_t msk)503 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
504 size_t opsz, uint64_t msk)
505 {
506 /* both operands are constants */
507 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
508 if (rs->u.max == 0)
509 return "division by 0";
510 if (op == BPF_DIV) {
511 rd->u.min /= rs->u.min;
512 rd->u.max /= rs->u.max;
513 } else {
514 rd->u.min %= rs->u.min;
515 rd->u.max %= rs->u.max;
516 }
517 } else {
518 if (op == BPF_MOD)
519 rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
520 else
521 rd->u.max = rd->u.max;
522 rd->u.min = 0;
523 }
524
525 /* if we have 32-bit values - extend them to 64-bit */
526 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
527 rd->s.min = (int32_t)rd->s.min;
528 rd->s.max = (int32_t)rd->s.max;
529 rs->s.min = (int32_t)rs->s.min;
530 rs->s.max = (int32_t)rs->s.max;
531 }
532
533 /* both operands are constants */
534 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
535 if (rs->s.max == 0)
536 return "division by 0";
537 if (op == BPF_DIV) {
538 rd->s.min /= rs->s.min;
539 rd->s.max /= rs->s.max;
540 } else {
541 rd->s.min %= rs->s.min;
542 rd->s.max %= rs->s.max;
543 }
544 } else if (op == BPF_MOD) {
545 rd->s.min = RTE_MAX(rd->s.max, 0);
546 rd->s.min = RTE_MIN(rd->s.min, 0);
547 } else
548 eval_smax_bound(rd, msk);
549
550 rd->s.max &= msk;
551 rd->s.min &= msk;
552
553 return NULL;
554 }
555
556 static void
eval_neg(struct bpf_reg_val * rd,size_t opsz,uint64_t msk)557 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
558 {
559 uint64_t ux, uy;
560 int64_t sx, sy;
561
562 /* if we have 32-bit values - extend them to 64-bit */
563 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
564 rd->u.min = (int32_t)rd->u.min;
565 rd->u.max = (int32_t)rd->u.max;
566 }
567
568 ux = -(int64_t)rd->u.min & msk;
569 uy = -(int64_t)rd->u.max & msk;
570
571 rd->u.max = RTE_MAX(ux, uy);
572 rd->u.min = RTE_MIN(ux, uy);
573
574 /* if we have 32-bit values - extend them to 64-bit */
575 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
576 rd->s.min = (int32_t)rd->s.min;
577 rd->s.max = (int32_t)rd->s.max;
578 }
579
580 sx = -rd->s.min & msk;
581 sy = -rd->s.max & msk;
582
583 rd->s.max = RTE_MAX(sx, sy);
584 rd->s.min = RTE_MIN(sx, sy);
585 }
586
587 static const char *
eval_ld_mbuf(struct bpf_verifier * bvf,const struct ebpf_insn * ins)588 eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
589 {
590 uint32_t i, mode;
591 struct bpf_reg_val *rv, ri, rs;
592
593 mode = BPF_MODE(ins->code);
594
595 /* R6 is an implicit input that must contain pointer to mbuf */
596 if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
597 return "invalid type for implicit ctx register";
598
599 if (mode == BPF_IND) {
600 rs = bvf->evst->rv[ins->src_reg];
601 if (rs.v.type != RTE_BPF_ARG_RAW)
602 return "unexpected type for src register";
603
604 eval_fill_imm(&ri, UINT64_MAX, ins->imm);
605 eval_add(&rs, &ri, UINT64_MAX);
606
607 if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
608 return "mbuf boundary violation";
609 }
610
611 /* R1-R5 scratch registers */
612 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
613 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
614
615 /* R0 is an implicit output, contains data fetched from the packet */
616 rv = bvf->evst->rv + EBPF_REG_0;
617 rv->v.size = bpf_size(BPF_SIZE(ins->code));
618 eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
619
620 return NULL;
621 }
622
623 /*
624 * check that destination and source operand are in defined state.
625 */
626 static const char *
eval_defined(const struct bpf_reg_val * dst,const struct bpf_reg_val * src)627 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
628 {
629 if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
630 return "dest reg value is undefined";
631 if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
632 return "src reg value is undefined";
633 return NULL;
634 }
635
636 static const char *
eval_alu(struct bpf_verifier * bvf,const struct ebpf_insn * ins)637 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
638 {
639 uint64_t msk;
640 uint32_t op;
641 size_t opsz;
642 const char *err;
643 struct bpf_eval_state *st;
644 struct bpf_reg_val *rd, rs;
645
646 opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
647 sizeof(uint32_t) : sizeof(uint64_t);
648 opsz = opsz * CHAR_BIT;
649 msk = RTE_LEN2MASK(opsz, uint64_t);
650
651 st = bvf->evst;
652 rd = st->rv + ins->dst_reg;
653
654 if (BPF_SRC(ins->code) == BPF_X) {
655 rs = st->rv[ins->src_reg];
656 eval_apply_mask(&rs, msk);
657 } else
658 eval_fill_imm(&rs, msk, ins->imm);
659
660 eval_apply_mask(rd, msk);
661
662 op = BPF_OP(ins->code);
663
664 err = eval_defined((op != EBPF_MOV) ? rd : NULL,
665 (op != BPF_NEG) ? &rs : NULL);
666 if (err != NULL)
667 return err;
668
669 if (op == BPF_ADD)
670 eval_add(rd, &rs, msk);
671 else if (op == BPF_SUB)
672 eval_sub(rd, &rs, msk);
673 else if (op == BPF_LSH)
674 eval_lsh(rd, &rs, opsz, msk);
675 else if (op == BPF_RSH)
676 eval_rsh(rd, &rs, opsz, msk);
677 else if (op == EBPF_ARSH)
678 eval_arsh(rd, &rs, opsz, msk);
679 else if (op == BPF_AND)
680 eval_and(rd, &rs, opsz, msk);
681 else if (op == BPF_OR)
682 eval_or(rd, &rs, opsz, msk);
683 else if (op == BPF_XOR)
684 eval_xor(rd, &rs, opsz, msk);
685 else if (op == BPF_MUL)
686 eval_mul(rd, &rs, opsz, msk);
687 else if (op == BPF_DIV || op == BPF_MOD)
688 err = eval_divmod(op, rd, &rs, opsz, msk);
689 else if (op == BPF_NEG)
690 eval_neg(rd, opsz, msk);
691 else if (op == EBPF_MOV)
692 *rd = rs;
693 else
694 eval_max_bound(rd, msk);
695
696 return err;
697 }
698
699 static const char *
eval_bele(struct bpf_verifier * bvf,const struct ebpf_insn * ins)700 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
701 {
702 uint64_t msk;
703 struct bpf_eval_state *st;
704 struct bpf_reg_val *rd;
705 const char *err;
706
707 msk = RTE_LEN2MASK(ins->imm, uint64_t);
708
709 st = bvf->evst;
710 rd = st->rv + ins->dst_reg;
711
712 err = eval_defined(rd, NULL);
713 if (err != NULL)
714 return err;
715
716 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
717 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
718 eval_max_bound(rd, msk);
719 else
720 eval_apply_mask(rd, msk);
721 #else
722 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
723 eval_max_bound(rd, msk);
724 else
725 eval_apply_mask(rd, msk);
726 #endif
727
728 return NULL;
729 }
730
731 static const char *
eval_ptr(struct bpf_verifier * bvf,struct bpf_reg_val * rm,uint32_t opsz,uint32_t align,int16_t off)732 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
733 uint32_t align, int16_t off)
734 {
735 struct bpf_reg_val rv;
736
737 /* calculate reg + offset */
738 eval_fill_imm(&rv, rm->mask, off);
739 eval_add(rm, &rv, rm->mask);
740
741 if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
742 return "destination is not a pointer";
743
744 if (rm->mask != UINT64_MAX)
745 return "pointer truncation";
746
747 if (rm->u.max + opsz > rm->v.size ||
748 (uint64_t)rm->s.max + opsz > rm->v.size ||
749 rm->s.min < 0)
750 return "memory boundary violation";
751
752 if (rm->u.max % align != 0)
753 return "unaligned memory access";
754
755 if (rm->v.type == BPF_ARG_PTR_STACK) {
756
757 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
758 rm->u.max != (uint64_t)rm->s.max)
759 return "stack access with variable offset";
760
761 bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
762
763 /* pointer to mbuf */
764 } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
765
766 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
767 rm->u.max != (uint64_t)rm->s.max)
768 return "mbuf access with variable offset";
769 }
770
771 return NULL;
772 }
773
774 static void
eval_max_load(struct bpf_reg_val * rv,uint64_t mask)775 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
776 {
777 eval_umax_bound(rv, mask);
778
779 /* full 64-bit load */
780 if (mask == UINT64_MAX)
781 eval_smax_bound(rv, mask);
782
783 /* zero-extend load */
784 rv->s.min = rv->u.min;
785 rv->s.max = rv->u.max;
786 }
787
788
789 static const char *
eval_load(struct bpf_verifier * bvf,const struct ebpf_insn * ins)790 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
791 {
792 uint32_t opsz;
793 uint64_t msk;
794 const char *err;
795 struct bpf_eval_state *st;
796 struct bpf_reg_val *rd, rs;
797 const struct bpf_reg_val *sv;
798
799 st = bvf->evst;
800 rd = st->rv + ins->dst_reg;
801 rs = st->rv[ins->src_reg];
802 opsz = bpf_size(BPF_SIZE(ins->code));
803 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
804
805 err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
806 if (err != NULL)
807 return err;
808
809 if (rs.v.type == BPF_ARG_PTR_STACK) {
810
811 sv = st->sv + rs.u.max / sizeof(uint64_t);
812 if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
813 return "undefined value on the stack";
814
815 *rd = *sv;
816
817 /* pointer to mbuf */
818 } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
819
820 if (rs.u.max == offsetof(struct rte_mbuf, next)) {
821 eval_fill_imm(rd, msk, 0);
822 rd->v = rs.v;
823 } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
824 eval_fill_imm(rd, msk, 0);
825 rd->v.type = RTE_BPF_ARG_PTR;
826 rd->v.size = rs.v.buf_size;
827 } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
828 eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
829 rd->v.type = RTE_BPF_ARG_RAW;
830 } else {
831 eval_max_load(rd, msk);
832 rd->v.type = RTE_BPF_ARG_RAW;
833 }
834
835 /* pointer to raw data */
836 } else {
837 eval_max_load(rd, msk);
838 rd->v.type = RTE_BPF_ARG_RAW;
839 }
840
841 return NULL;
842 }
843
844 static const char *
eval_mbuf_store(const struct bpf_reg_val * rv,uint32_t opsz)845 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
846 {
847 uint32_t i;
848
849 static const struct {
850 size_t off;
851 size_t sz;
852 } mbuf_ro_fileds[] = {
853 { .off = offsetof(struct rte_mbuf, buf_addr), },
854 { .off = offsetof(struct rte_mbuf, refcnt), },
855 { .off = offsetof(struct rte_mbuf, nb_segs), },
856 { .off = offsetof(struct rte_mbuf, buf_len), },
857 { .off = offsetof(struct rte_mbuf, pool), },
858 { .off = offsetof(struct rte_mbuf, next), },
859 { .off = offsetof(struct rte_mbuf, priv_size), },
860 };
861
862 for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
863 (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
864 rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
865 i++)
866 ;
867
868 if (i != RTE_DIM(mbuf_ro_fileds))
869 return "store to the read-only mbuf field";
870
871 return NULL;
872
873 }
874
875 static const char *
eval_store(struct bpf_verifier * bvf,const struct ebpf_insn * ins)876 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
877 {
878 uint32_t opsz;
879 uint64_t msk;
880 const char *err;
881 struct bpf_eval_state *st;
882 struct bpf_reg_val rd, rs, *sv;
883
884 opsz = bpf_size(BPF_SIZE(ins->code));
885 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
886
887 st = bvf->evst;
888 rd = st->rv[ins->dst_reg];
889
890 if (BPF_CLASS(ins->code) == BPF_STX) {
891 rs = st->rv[ins->src_reg];
892 eval_apply_mask(&rs, msk);
893 } else
894 eval_fill_imm(&rs, msk, ins->imm);
895
896 err = eval_defined(NULL, &rs);
897 if (err != NULL)
898 return err;
899
900 err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
901 if (err != NULL)
902 return err;
903
904 if (rd.v.type == BPF_ARG_PTR_STACK) {
905
906 sv = st->sv + rd.u.max / sizeof(uint64_t);
907 if (BPF_CLASS(ins->code) == BPF_STX &&
908 BPF_MODE(ins->code) == EBPF_XADD)
909 eval_max_bound(sv, msk);
910 else
911 *sv = rs;
912
913 /* pointer to mbuf */
914 } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
915 err = eval_mbuf_store(&rd, opsz);
916 if (err != NULL)
917 return err;
918 }
919
920 return NULL;
921 }
922
923 static const char *
eval_func_arg(struct bpf_verifier * bvf,const struct rte_bpf_arg * arg,struct bpf_reg_val * rv)924 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
925 struct bpf_reg_val *rv)
926 {
927 uint32_t i, n;
928 struct bpf_eval_state *st;
929 const char *err;
930
931 st = bvf->evst;
932
933 if (rv->v.type == RTE_BPF_ARG_UNDEF)
934 return "Undefined argument type";
935
936 if (arg->type != rv->v.type &&
937 arg->type != RTE_BPF_ARG_RAW &&
938 (arg->type != RTE_BPF_ARG_PTR ||
939 RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
940 return "Invalid argument type";
941
942 err = NULL;
943
944 /* argument is a pointer */
945 if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
946
947 err = eval_ptr(bvf, rv, arg->size, 1, 0);
948
949 /*
950 * pointer to the variable on the stack is passed
951 * as an argument, mark stack space it occupies as initialized.
952 */
953 if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
954
955 i = rv->u.max / sizeof(uint64_t);
956 n = i + arg->size / sizeof(uint64_t);
957 while (i != n) {
958 eval_fill_max_bound(st->sv + i, UINT64_MAX);
959 i++;
960 };
961 }
962 }
963
964 return err;
965 }
966
967 static const char *
eval_call(struct bpf_verifier * bvf,const struct ebpf_insn * ins)968 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
969 {
970 uint32_t i, idx;
971 struct bpf_reg_val *rv;
972 const struct rte_bpf_xsym *xsym;
973 const char *err;
974
975 idx = ins->imm;
976
977 if (idx >= bvf->prm->nb_xsym ||
978 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
979 return "invalid external function index";
980
981 /* for now don't support function calls on 32 bit platform */
982 if (sizeof(uint64_t) != sizeof(uintptr_t))
983 return "function calls are supported only for 64 bit apps";
984
985 xsym = bvf->prm->xsym + idx;
986
987 /* evaluate function arguments */
988 err = NULL;
989 for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
990 err = eval_func_arg(bvf, xsym->func.args + i,
991 bvf->evst->rv + EBPF_REG_1 + i);
992 }
993
994 /* R1-R5 argument/scratch registers */
995 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
996 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
997
998 /* update return value */
999
1000 rv = bvf->evst->rv + EBPF_REG_0;
1001 rv->v = xsym->func.ret;
1002 if (rv->v.type == RTE_BPF_ARG_RAW)
1003 eval_fill_max_bound(rv,
1004 RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
1005 else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
1006 eval_fill_imm64(rv, UINTPTR_MAX, 0);
1007
1008 return err;
1009 }
1010
1011 static void
eval_jeq_jne(struct bpf_reg_val * trd,struct bpf_reg_val * trs)1012 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
1013 {
1014 /* sreg is constant */
1015 if (trs->u.min == trs->u.max) {
1016 trd->u = trs->u;
1017 /* dreg is constant */
1018 } else if (trd->u.min == trd->u.max) {
1019 trs->u = trd->u;
1020 } else {
1021 trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
1022 trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
1023 trs->u = trd->u;
1024 }
1025
1026 /* sreg is constant */
1027 if (trs->s.min == trs->s.max) {
1028 trd->s = trs->s;
1029 /* dreg is constant */
1030 } else if (trd->s.min == trd->s.max) {
1031 trs->s = trd->s;
1032 } else {
1033 trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
1034 trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1035 trs->s = trd->s;
1036 }
1037 }
1038
1039 static void
eval_jgt_jle(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1040 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1041 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1042 {
1043 frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1044 trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1045 }
1046
1047 static void
eval_jlt_jge(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1048 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1049 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1050 {
1051 frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1052 trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1053 }
1054
1055 static void
eval_jsgt_jsle(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1056 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1057 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1058 {
1059 frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1060 trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1061 }
1062
1063 static void
eval_jslt_jsge(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1064 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1065 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1066 {
1067 frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1068 trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1069 }
1070
1071 static const char *
eval_jcc(struct bpf_verifier * bvf,const struct ebpf_insn * ins)1072 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1073 {
1074 uint32_t op;
1075 const char *err;
1076 struct bpf_eval_state *fst, *tst;
1077 struct bpf_reg_val *frd, *frs, *trd, *trs;
1078 struct bpf_reg_val rvf, rvt;
1079
1080 tst = bvf->evst;
1081 fst = bvf->evin->evst;
1082
1083 frd = fst->rv + ins->dst_reg;
1084 trd = tst->rv + ins->dst_reg;
1085
1086 if (BPF_SRC(ins->code) == BPF_X) {
1087 frs = fst->rv + ins->src_reg;
1088 trs = tst->rv + ins->src_reg;
1089 } else {
1090 frs = &rvf;
1091 trs = &rvt;
1092 eval_fill_imm(frs, UINT64_MAX, ins->imm);
1093 eval_fill_imm(trs, UINT64_MAX, ins->imm);
1094 }
1095
1096 err = eval_defined(trd, trs);
1097 if (err != NULL)
1098 return err;
1099
1100 op = BPF_OP(ins->code);
1101
1102 if (op == BPF_JEQ)
1103 eval_jeq_jne(trd, trs);
1104 else if (op == EBPF_JNE)
1105 eval_jeq_jne(frd, frs);
1106 else if (op == BPF_JGT)
1107 eval_jgt_jle(trd, trs, frd, frs);
1108 else if (op == EBPF_JLE)
1109 eval_jgt_jle(frd, frs, trd, trs);
1110 else if (op == EBPF_JLT)
1111 eval_jlt_jge(trd, trs, frd, frs);
1112 else if (op == BPF_JGE)
1113 eval_jlt_jge(frd, frs, trd, trs);
1114 else if (op == EBPF_JSGT)
1115 eval_jsgt_jsle(trd, trs, frd, frs);
1116 else if (op == EBPF_JSLE)
1117 eval_jsgt_jsle(frd, frs, trd, trs);
1118 else if (op == EBPF_JLT)
1119 eval_jslt_jsge(trd, trs, frd, frs);
1120 else if (op == EBPF_JSGE)
1121 eval_jslt_jsge(frd, frs, trd, trs);
1122
1123 return NULL;
1124 }
1125
1126 /*
1127 * validate parameters for each instruction type.
1128 */
1129 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1130 /* ALU IMM 32-bit instructions */
1131 [(BPF_ALU | BPF_ADD | BPF_K)] = {
1132 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1133 .off = { .min = 0, .max = 0},
1134 .imm = { .min = 0, .max = UINT32_MAX,},
1135 .eval = eval_alu,
1136 },
1137 [(BPF_ALU | BPF_SUB | BPF_K)] = {
1138 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1139 .off = { .min = 0, .max = 0},
1140 .imm = { .min = 0, .max = UINT32_MAX,},
1141 .eval = eval_alu,
1142 },
1143 [(BPF_ALU | BPF_AND | BPF_K)] = {
1144 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1145 .off = { .min = 0, .max = 0},
1146 .imm = { .min = 0, .max = UINT32_MAX,},
1147 .eval = eval_alu,
1148 },
1149 [(BPF_ALU | BPF_OR | BPF_K)] = {
1150 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1151 .off = { .min = 0, .max = 0},
1152 .imm = { .min = 0, .max = UINT32_MAX,},
1153 .eval = eval_alu,
1154 },
1155 [(BPF_ALU | BPF_LSH | BPF_K)] = {
1156 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1157 .off = { .min = 0, .max = 0},
1158 .imm = { .min = 0, .max = UINT32_MAX,},
1159 .eval = eval_alu,
1160 },
1161 [(BPF_ALU | BPF_RSH | BPF_K)] = {
1162 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1163 .off = { .min = 0, .max = 0},
1164 .imm = { .min = 0, .max = UINT32_MAX,},
1165 .eval = eval_alu,
1166 },
1167 [(BPF_ALU | BPF_XOR | BPF_K)] = {
1168 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1169 .off = { .min = 0, .max = 0},
1170 .imm = { .min = 0, .max = UINT32_MAX,},
1171 .eval = eval_alu,
1172 },
1173 [(BPF_ALU | BPF_MUL | BPF_K)] = {
1174 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1175 .off = { .min = 0, .max = 0},
1176 .imm = { .min = 0, .max = UINT32_MAX,},
1177 .eval = eval_alu,
1178 },
1179 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
1180 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1181 .off = { .min = 0, .max = 0},
1182 .imm = { .min = 0, .max = UINT32_MAX,},
1183 .eval = eval_alu,
1184 },
1185 [(BPF_ALU | BPF_DIV | BPF_K)] = {
1186 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1187 .off = { .min = 0, .max = 0},
1188 .imm = { .min = 1, .max = UINT32_MAX},
1189 .eval = eval_alu,
1190 },
1191 [(BPF_ALU | BPF_MOD | BPF_K)] = {
1192 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1193 .off = { .min = 0, .max = 0},
1194 .imm = { .min = 1, .max = UINT32_MAX},
1195 .eval = eval_alu,
1196 },
1197 /* ALU IMM 64-bit instructions */
1198 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1199 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1200 .off = { .min = 0, .max = 0},
1201 .imm = { .min = 0, .max = UINT32_MAX,},
1202 .eval = eval_alu,
1203 },
1204 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1205 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1206 .off = { .min = 0, .max = 0},
1207 .imm = { .min = 0, .max = UINT32_MAX,},
1208 .eval = eval_alu,
1209 },
1210 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1211 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1212 .off = { .min = 0, .max = 0},
1213 .imm = { .min = 0, .max = UINT32_MAX,},
1214 .eval = eval_alu,
1215 },
1216 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1217 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1218 .off = { .min = 0, .max = 0},
1219 .imm = { .min = 0, .max = UINT32_MAX,},
1220 .eval = eval_alu,
1221 },
1222 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1223 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1224 .off = { .min = 0, .max = 0},
1225 .imm = { .min = 0, .max = UINT32_MAX,},
1226 .eval = eval_alu,
1227 },
1228 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1229 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1230 .off = { .min = 0, .max = 0},
1231 .imm = { .min = 0, .max = UINT32_MAX,},
1232 .eval = eval_alu,
1233 },
1234 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1235 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1236 .off = { .min = 0, .max = 0},
1237 .imm = { .min = 0, .max = UINT32_MAX,},
1238 .eval = eval_alu,
1239 },
1240 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1241 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1242 .off = { .min = 0, .max = 0},
1243 .imm = { .min = 0, .max = UINT32_MAX,},
1244 .eval = eval_alu,
1245 },
1246 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1247 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1248 .off = { .min = 0, .max = 0},
1249 .imm = { .min = 0, .max = UINT32_MAX,},
1250 .eval = eval_alu,
1251 },
1252 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1253 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1254 .off = { .min = 0, .max = 0},
1255 .imm = { .min = 0, .max = UINT32_MAX,},
1256 .eval = eval_alu,
1257 },
1258 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1259 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1260 .off = { .min = 0, .max = 0},
1261 .imm = { .min = 1, .max = UINT32_MAX},
1262 .eval = eval_alu,
1263 },
1264 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1265 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1266 .off = { .min = 0, .max = 0},
1267 .imm = { .min = 1, .max = UINT32_MAX},
1268 .eval = eval_alu,
1269 },
1270 /* ALU REG 32-bit instructions */
1271 [(BPF_ALU | BPF_ADD | BPF_X)] = {
1272 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1273 .off = { .min = 0, .max = 0},
1274 .imm = { .min = 0, .max = 0},
1275 .eval = eval_alu,
1276 },
1277 [(BPF_ALU | BPF_SUB | BPF_X)] = {
1278 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1279 .off = { .min = 0, .max = 0},
1280 .imm = { .min = 0, .max = 0},
1281 .eval = eval_alu,
1282 },
1283 [(BPF_ALU | BPF_AND | BPF_X)] = {
1284 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1285 .off = { .min = 0, .max = 0},
1286 .imm = { .min = 0, .max = 0},
1287 .eval = eval_alu,
1288 },
1289 [(BPF_ALU | BPF_OR | BPF_X)] = {
1290 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1291 .off = { .min = 0, .max = 0},
1292 .imm = { .min = 0, .max = 0},
1293 .eval = eval_alu,
1294 },
1295 [(BPF_ALU | BPF_LSH | BPF_X)] = {
1296 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1297 .off = { .min = 0, .max = 0},
1298 .imm = { .min = 0, .max = 0},
1299 .eval = eval_alu,
1300 },
1301 [(BPF_ALU | BPF_RSH | BPF_X)] = {
1302 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1303 .off = { .min = 0, .max = 0},
1304 .imm = { .min = 0, .max = 0},
1305 .eval = eval_alu,
1306 },
1307 [(BPF_ALU | BPF_XOR | BPF_X)] = {
1308 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1309 .off = { .min = 0, .max = 0},
1310 .imm = { .min = 0, .max = 0},
1311 .eval = eval_alu,
1312 },
1313 [(BPF_ALU | BPF_MUL | BPF_X)] = {
1314 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1315 .off = { .min = 0, .max = 0},
1316 .imm = { .min = 0, .max = 0},
1317 .eval = eval_alu,
1318 },
1319 [(BPF_ALU | BPF_DIV | BPF_X)] = {
1320 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1321 .off = { .min = 0, .max = 0},
1322 .imm = { .min = 0, .max = 0},
1323 .eval = eval_alu,
1324 },
1325 [(BPF_ALU | BPF_MOD | BPF_X)] = {
1326 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1327 .off = { .min = 0, .max = 0},
1328 .imm = { .min = 0, .max = 0},
1329 .eval = eval_alu,
1330 },
1331 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
1332 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1333 .off = { .min = 0, .max = 0},
1334 .imm = { .min = 0, .max = 0},
1335 .eval = eval_alu,
1336 },
1337 [(BPF_ALU | BPF_NEG)] = {
1338 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1339 .off = { .min = 0, .max = 0},
1340 .imm = { .min = 0, .max = 0},
1341 .eval = eval_alu,
1342 },
1343 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1344 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1345 .off = { .min = 0, .max = 0},
1346 .imm = { .min = 16, .max = 64},
1347 .check = check_alu_bele,
1348 .eval = eval_bele,
1349 },
1350 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1351 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1352 .off = { .min = 0, .max = 0},
1353 .imm = { .min = 16, .max = 64},
1354 .check = check_alu_bele,
1355 .eval = eval_bele,
1356 },
1357 /* ALU REG 64-bit instructions */
1358 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1359 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1360 .off = { .min = 0, .max = 0},
1361 .imm = { .min = 0, .max = 0},
1362 .eval = eval_alu,
1363 },
1364 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1365 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1366 .off = { .min = 0, .max = 0},
1367 .imm = { .min = 0, .max = 0},
1368 .eval = eval_alu,
1369 },
1370 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1371 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1372 .off = { .min = 0, .max = 0},
1373 .imm = { .min = 0, .max = 0},
1374 .eval = eval_alu,
1375 },
1376 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1377 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1378 .off = { .min = 0, .max = 0},
1379 .imm = { .min = 0, .max = 0},
1380 .eval = eval_alu,
1381 },
1382 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1383 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1384 .off = { .min = 0, .max = 0},
1385 .imm = { .min = 0, .max = 0},
1386 .eval = eval_alu,
1387 },
1388 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1389 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1390 .off = { .min = 0, .max = 0},
1391 .imm = { .min = 0, .max = 0},
1392 .eval = eval_alu,
1393 },
1394 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1395 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1396 .off = { .min = 0, .max = 0},
1397 .imm = { .min = 0, .max = 0},
1398 .eval = eval_alu,
1399 },
1400 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1401 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1402 .off = { .min = 0, .max = 0},
1403 .imm = { .min = 0, .max = 0},
1404 .eval = eval_alu,
1405 },
1406 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1407 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1408 .off = { .min = 0, .max = 0},
1409 .imm = { .min = 0, .max = 0},
1410 .eval = eval_alu,
1411 },
1412 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1413 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1414 .off = { .min = 0, .max = 0},
1415 .imm = { .min = 0, .max = 0},
1416 .eval = eval_alu,
1417 },
1418 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1419 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1420 .off = { .min = 0, .max = 0},
1421 .imm = { .min = 0, .max = 0},
1422 .eval = eval_alu,
1423 },
1424 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1425 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1426 .off = { .min = 0, .max = 0},
1427 .imm = { .min = 0, .max = 0},
1428 .eval = eval_alu,
1429 },
1430 [(EBPF_ALU64 | BPF_NEG)] = {
1431 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1432 .off = { .min = 0, .max = 0},
1433 .imm = { .min = 0, .max = 0},
1434 .eval = eval_alu,
1435 },
1436 /* load instructions */
1437 [(BPF_LDX | BPF_MEM | BPF_B)] = {
1438 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1439 .off = { .min = 0, .max = UINT16_MAX},
1440 .imm = { .min = 0, .max = 0},
1441 .eval = eval_load,
1442 },
1443 [(BPF_LDX | BPF_MEM | BPF_H)] = {
1444 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1445 .off = { .min = 0, .max = UINT16_MAX},
1446 .imm = { .min = 0, .max = 0},
1447 .eval = eval_load,
1448 },
1449 [(BPF_LDX | BPF_MEM | BPF_W)] = {
1450 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1451 .off = { .min = 0, .max = UINT16_MAX},
1452 .imm = { .min = 0, .max = 0},
1453 .eval = eval_load,
1454 },
1455 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1456 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1457 .off = { .min = 0, .max = UINT16_MAX},
1458 .imm = { .min = 0, .max = 0},
1459 .eval = eval_load,
1460 },
1461 /* load 64 bit immediate value */
1462 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
1463 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1464 .off = { .min = 0, .max = 0},
1465 .imm = { .min = 0, .max = UINT32_MAX},
1466 .eval = eval_ld_imm64,
1467 },
1468 /* load absolute instructions */
1469 [(BPF_LD | BPF_ABS | BPF_B)] = {
1470 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1471 .off = { .min = 0, .max = 0},
1472 .imm = { .min = 0, .max = INT32_MAX},
1473 .eval = eval_ld_mbuf,
1474 },
1475 [(BPF_LD | BPF_ABS | BPF_H)] = {
1476 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1477 .off = { .min = 0, .max = 0},
1478 .imm = { .min = 0, .max = INT32_MAX},
1479 .eval = eval_ld_mbuf,
1480 },
1481 [(BPF_LD | BPF_ABS | BPF_W)] = {
1482 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1483 .off = { .min = 0, .max = 0},
1484 .imm = { .min = 0, .max = INT32_MAX},
1485 .eval = eval_ld_mbuf,
1486 },
1487 /* load indirect instructions */
1488 [(BPF_LD | BPF_IND | BPF_B)] = {
1489 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1490 .off = { .min = 0, .max = 0},
1491 .imm = { .min = 0, .max = UINT32_MAX},
1492 .eval = eval_ld_mbuf,
1493 },
1494 [(BPF_LD | BPF_IND | BPF_H)] = {
1495 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1496 .off = { .min = 0, .max = 0},
1497 .imm = { .min = 0, .max = UINT32_MAX},
1498 .eval = eval_ld_mbuf,
1499 },
1500 [(BPF_LD | BPF_IND | BPF_W)] = {
1501 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1502 .off = { .min = 0, .max = 0},
1503 .imm = { .min = 0, .max = UINT32_MAX},
1504 .eval = eval_ld_mbuf,
1505 },
1506 /* store REG instructions */
1507 [(BPF_STX | BPF_MEM | BPF_B)] = {
1508 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1509 .off = { .min = 0, .max = UINT16_MAX},
1510 .imm = { .min = 0, .max = 0},
1511 .eval = eval_store,
1512 },
1513 [(BPF_STX | BPF_MEM | BPF_H)] = {
1514 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1515 .off = { .min = 0, .max = UINT16_MAX},
1516 .imm = { .min = 0, .max = 0},
1517 .eval = eval_store,
1518 },
1519 [(BPF_STX | BPF_MEM | BPF_W)] = {
1520 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1521 .off = { .min = 0, .max = UINT16_MAX},
1522 .imm = { .min = 0, .max = 0},
1523 .eval = eval_store,
1524 },
1525 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
1526 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1527 .off = { .min = 0, .max = UINT16_MAX},
1528 .imm = { .min = 0, .max = 0},
1529 .eval = eval_store,
1530 },
1531 /* atomic add instructions */
1532 [(BPF_STX | EBPF_XADD | BPF_W)] = {
1533 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1534 .off = { .min = 0, .max = UINT16_MAX},
1535 .imm = { .min = 0, .max = 0},
1536 .eval = eval_store,
1537 },
1538 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1539 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1540 .off = { .min = 0, .max = UINT16_MAX},
1541 .imm = { .min = 0, .max = 0},
1542 .eval = eval_store,
1543 },
1544 /* store IMM instructions */
1545 [(BPF_ST | BPF_MEM | BPF_B)] = {
1546 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1547 .off = { .min = 0, .max = UINT16_MAX},
1548 .imm = { .min = 0, .max = UINT32_MAX},
1549 .eval = eval_store,
1550 },
1551 [(BPF_ST | BPF_MEM | BPF_H)] = {
1552 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1553 .off = { .min = 0, .max = UINT16_MAX},
1554 .imm = { .min = 0, .max = UINT32_MAX},
1555 .eval = eval_store,
1556 },
1557 [(BPF_ST | BPF_MEM | BPF_W)] = {
1558 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1559 .off = { .min = 0, .max = UINT16_MAX},
1560 .imm = { .min = 0, .max = UINT32_MAX},
1561 .eval = eval_store,
1562 },
1563 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
1564 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1565 .off = { .min = 0, .max = UINT16_MAX},
1566 .imm = { .min = 0, .max = UINT32_MAX},
1567 .eval = eval_store,
1568 },
1569 /* jump instruction */
1570 [(BPF_JMP | BPF_JA)] = {
1571 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1572 .off = { .min = 0, .max = UINT16_MAX},
1573 .imm = { .min = 0, .max = 0},
1574 },
1575 /* jcc IMM instructions */
1576 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
1577 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1578 .off = { .min = 0, .max = UINT16_MAX},
1579 .imm = { .min = 0, .max = UINT32_MAX},
1580 .eval = eval_jcc,
1581 },
1582 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
1583 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1584 .off = { .min = 0, .max = UINT16_MAX},
1585 .imm = { .min = 0, .max = UINT32_MAX},
1586 .eval = eval_jcc,
1587 },
1588 [(BPF_JMP | BPF_JGT | BPF_K)] = {
1589 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1590 .off = { .min = 0, .max = UINT16_MAX},
1591 .imm = { .min = 0, .max = UINT32_MAX},
1592 .eval = eval_jcc,
1593 },
1594 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
1595 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1596 .off = { .min = 0, .max = UINT16_MAX},
1597 .imm = { .min = 0, .max = UINT32_MAX},
1598 .eval = eval_jcc,
1599 },
1600 [(BPF_JMP | BPF_JGE | BPF_K)] = {
1601 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1602 .off = { .min = 0, .max = UINT16_MAX},
1603 .imm = { .min = 0, .max = UINT32_MAX},
1604 .eval = eval_jcc,
1605 },
1606 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
1607 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1608 .off = { .min = 0, .max = UINT16_MAX},
1609 .imm = { .min = 0, .max = UINT32_MAX},
1610 .eval = eval_jcc,
1611 },
1612 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1613 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1614 .off = { .min = 0, .max = UINT16_MAX},
1615 .imm = { .min = 0, .max = UINT32_MAX},
1616 .eval = eval_jcc,
1617 },
1618 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1619 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1620 .off = { .min = 0, .max = UINT16_MAX},
1621 .imm = { .min = 0, .max = UINT32_MAX},
1622 .eval = eval_jcc,
1623 },
1624 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1625 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1626 .off = { .min = 0, .max = UINT16_MAX},
1627 .imm = { .min = 0, .max = UINT32_MAX},
1628 .eval = eval_jcc,
1629 },
1630 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1631 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1632 .off = { .min = 0, .max = UINT16_MAX},
1633 .imm = { .min = 0, .max = UINT32_MAX},
1634 .eval = eval_jcc,
1635 },
1636 [(BPF_JMP | BPF_JSET | BPF_K)] = {
1637 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1638 .off = { .min = 0, .max = UINT16_MAX},
1639 .imm = { .min = 0, .max = UINT32_MAX},
1640 .eval = eval_jcc,
1641 },
1642 /* jcc REG instructions */
1643 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
1644 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1645 .off = { .min = 0, .max = UINT16_MAX},
1646 .imm = { .min = 0, .max = 0},
1647 .eval = eval_jcc,
1648 },
1649 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
1650 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1651 .off = { .min = 0, .max = UINT16_MAX},
1652 .imm = { .min = 0, .max = 0},
1653 .eval = eval_jcc,
1654 },
1655 [(BPF_JMP | BPF_JGT | BPF_X)] = {
1656 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1657 .off = { .min = 0, .max = UINT16_MAX},
1658 .imm = { .min = 0, .max = 0},
1659 .eval = eval_jcc,
1660 },
1661 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
1662 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1663 .off = { .min = 0, .max = UINT16_MAX},
1664 .imm = { .min = 0, .max = 0},
1665 .eval = eval_jcc,
1666 },
1667 [(BPF_JMP | BPF_JGE | BPF_X)] = {
1668 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1669 .off = { .min = 0, .max = UINT16_MAX},
1670 .imm = { .min = 0, .max = 0},
1671 .eval = eval_jcc,
1672 },
1673 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
1674 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1675 .off = { .min = 0, .max = UINT16_MAX},
1676 .imm = { .min = 0, .max = 0},
1677 .eval = eval_jcc,
1678 },
1679 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1680 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1681 .off = { .min = 0, .max = UINT16_MAX},
1682 .imm = { .min = 0, .max = 0},
1683 .eval = eval_jcc,
1684 },
1685 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1686 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1687 .off = { .min = 0, .max = UINT16_MAX},
1688 .imm = { .min = 0, .max = 0},
1689 },
1690 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1691 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1692 .off = { .min = 0, .max = UINT16_MAX},
1693 .imm = { .min = 0, .max = 0},
1694 .eval = eval_jcc,
1695 },
1696 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1697 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1698 .off = { .min = 0, .max = UINT16_MAX},
1699 .imm = { .min = 0, .max = 0},
1700 .eval = eval_jcc,
1701 },
1702 [(BPF_JMP | BPF_JSET | BPF_X)] = {
1703 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1704 .off = { .min = 0, .max = UINT16_MAX},
1705 .imm = { .min = 0, .max = 0},
1706 .eval = eval_jcc,
1707 },
1708 /* call instruction */
1709 [(BPF_JMP | EBPF_CALL)] = {
1710 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1711 .off = { .min = 0, .max = 0},
1712 .imm = { .min = 0, .max = UINT32_MAX},
1713 .eval = eval_call,
1714 },
1715 /* ret instruction */
1716 [(BPF_JMP | EBPF_EXIT)] = {
1717 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1718 .off = { .min = 0, .max = 0},
1719 .imm = { .min = 0, .max = 0},
1720 .eval = eval_exit,
1721 },
1722 };
1723
1724 /*
1725 * make sure that instruction syntax is valid,
1726 * and it fields don't violate partciular instrcution type restrictions.
1727 */
1728 static const char *
check_syntax(const struct ebpf_insn * ins)1729 check_syntax(const struct ebpf_insn *ins)
1730 {
1731
1732 uint8_t op;
1733 uint16_t off;
1734 uint32_t imm;
1735
1736 op = ins->code;
1737
1738 if (ins_chk[op].mask.dreg == 0)
1739 return "invalid opcode";
1740
1741 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1742 return "invalid dst-reg field";
1743
1744 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1745 return "invalid src-reg field";
1746
1747 off = ins->off;
1748 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1749 return "invalid off field";
1750
1751 imm = ins->imm;
1752 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1753 return "invalid imm field";
1754
1755 if (ins_chk[op].check != NULL)
1756 return ins_chk[op].check(ins);
1757
1758 return NULL;
1759 }
1760
1761 /*
1762 * helper function, return instruction index for the given node.
1763 */
1764 static uint32_t
get_node_idx(const struct bpf_verifier * bvf,const struct inst_node * node)1765 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1766 {
1767 return node - bvf->in;
1768 }
1769
1770 /*
1771 * helper function, used to walk through constructed CFG.
1772 */
1773 static struct inst_node *
get_next_node(struct bpf_verifier * bvf,struct inst_node * node)1774 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1775 {
1776 uint32_t ce, ne, dst;
1777
1778 ne = node->nb_edge;
1779 ce = node->cur_edge;
1780 if (ce == ne)
1781 return NULL;
1782
1783 node->cur_edge++;
1784 dst = node->edge_dest[ce];
1785 return bvf->in + dst;
1786 }
1787
1788 static void
set_node_colour(struct bpf_verifier * bvf,struct inst_node * node,uint32_t new)1789 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1790 uint32_t new)
1791 {
1792 uint32_t prev;
1793
1794 prev = node->colour;
1795 node->colour = new;
1796
1797 bvf->node_colour[prev]--;
1798 bvf->node_colour[new]++;
1799 }
1800
1801 /*
1802 * helper function, add new edge between two nodes.
1803 */
1804 static int
add_edge(struct bpf_verifier * bvf,struct inst_node * node,uint32_t nidx)1805 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1806 {
1807 uint32_t ne;
1808
1809 if (nidx > bvf->prm->nb_ins) {
1810 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1811 "next pc: %u\n",
1812 __func__, get_node_idx(bvf, node), nidx);
1813 return -EINVAL;
1814 }
1815
1816 ne = node->nb_edge;
1817 if (ne >= RTE_DIM(node->edge_dest)) {
1818 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1819 __func__, get_node_idx(bvf, node));
1820 return -EINVAL;
1821 }
1822
1823 node->edge_dest[ne] = nidx;
1824 node->nb_edge = ne + 1;
1825 return 0;
1826 }
1827
1828 /*
1829 * helper function, determine type of edge between two nodes.
1830 */
1831 static void
set_edge_type(struct bpf_verifier * bvf,struct inst_node * node,const struct inst_node * next)1832 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1833 const struct inst_node *next)
1834 {
1835 uint32_t ce, clr, type;
1836
1837 ce = node->cur_edge - 1;
1838 clr = next->colour;
1839
1840 type = UNKNOWN_EDGE;
1841
1842 if (clr == WHITE)
1843 type = TREE_EDGE;
1844 else if (clr == GREY)
1845 type = BACK_EDGE;
1846 else if (clr == BLACK)
1847 /*
1848 * in fact it could be either direct or cross edge,
1849 * but for now, we don't need to distinguish between them.
1850 */
1851 type = CROSS_EDGE;
1852
1853 node->edge_type[ce] = type;
1854 bvf->edge_type[type]++;
1855 }
1856
1857 static struct inst_node *
get_prev_node(struct bpf_verifier * bvf,struct inst_node * node)1858 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1859 {
1860 return bvf->in + node->prev_node;
1861 }
1862
1863 /*
1864 * Depth-First Search (DFS) through previously constructed
1865 * Control Flow Graph (CFG).
1866 * Information collected at this path would be used later
1867 * to determine is there any loops, and/or unreachable instructions.
1868 */
1869 static void
dfs(struct bpf_verifier * bvf)1870 dfs(struct bpf_verifier *bvf)
1871 {
1872 struct inst_node *next, *node;
1873
1874 node = bvf->in;
1875 while (node != NULL) {
1876
1877 if (node->colour == WHITE)
1878 set_node_colour(bvf, node, GREY);
1879
1880 if (node->colour == GREY) {
1881
1882 /* find next unprocessed child node */
1883 do {
1884 next = get_next_node(bvf, node);
1885 if (next == NULL)
1886 break;
1887 set_edge_type(bvf, node, next);
1888 } while (next->colour != WHITE);
1889
1890 if (next != NULL) {
1891 /* proceed with next child */
1892 next->prev_node = get_node_idx(bvf, node);
1893 node = next;
1894 } else {
1895 /*
1896 * finished with current node and all it's kids,
1897 * proceed with parent
1898 */
1899 set_node_colour(bvf, node, BLACK);
1900 node->cur_edge = 0;
1901 node = get_prev_node(bvf, node);
1902 }
1903 } else
1904 node = NULL;
1905 }
1906 }
1907
1908 /*
1909 * report unreachable instructions.
1910 */
1911 static void
log_unreachable(const struct bpf_verifier * bvf)1912 log_unreachable(const struct bpf_verifier *bvf)
1913 {
1914 uint32_t i;
1915 struct inst_node *node;
1916 const struct ebpf_insn *ins;
1917
1918 for (i = 0; i != bvf->prm->nb_ins; i++) {
1919
1920 node = bvf->in + i;
1921 ins = bvf->prm->ins + i;
1922
1923 if (node->colour == WHITE &&
1924 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1925 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1926 }
1927 }
1928
1929 /*
1930 * report loops detected.
1931 */
1932 static void
log_loop(const struct bpf_verifier * bvf)1933 log_loop(const struct bpf_verifier *bvf)
1934 {
1935 uint32_t i, j;
1936 struct inst_node *node;
1937
1938 for (i = 0; i != bvf->prm->nb_ins; i++) {
1939
1940 node = bvf->in + i;
1941 if (node->colour != BLACK)
1942 continue;
1943
1944 for (j = 0; j != node->nb_edge; j++) {
1945 if (node->edge_type[j] == BACK_EDGE)
1946 RTE_BPF_LOG(ERR,
1947 "loop at pc:%u --> pc:%u;\n",
1948 i, node->edge_dest[j]);
1949 }
1950 }
1951 }
1952
1953 /*
1954 * First pass goes though all instructions in the set, checks that each
1955 * instruction is a valid one (correct syntax, valid field values, etc.)
1956 * and constructs control flow graph (CFG).
1957 * Then deapth-first search is performed over the constructed graph.
1958 * Programs with unreachable instructions and/or loops will be rejected.
1959 */
1960 static int
validate(struct bpf_verifier * bvf)1961 validate(struct bpf_verifier *bvf)
1962 {
1963 int32_t rc;
1964 uint32_t i;
1965 struct inst_node *node;
1966 const struct ebpf_insn *ins;
1967 const char *err;
1968
1969 rc = 0;
1970 for (i = 0; i < bvf->prm->nb_ins; i++) {
1971
1972 ins = bvf->prm->ins + i;
1973 node = bvf->in + i;
1974
1975 err = check_syntax(ins);
1976 if (err != 0) {
1977 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1978 __func__, err, i);
1979 rc |= -EINVAL;
1980 }
1981
1982 /*
1983 * construct CFG, jcc nodes have to outgoing edges,
1984 * 'exit' nodes - none, all others nodes have exaclty one
1985 * outgoing edge.
1986 */
1987 switch (ins->code) {
1988 case (BPF_JMP | EBPF_EXIT):
1989 break;
1990 case (BPF_JMP | BPF_JEQ | BPF_K):
1991 case (BPF_JMP | EBPF_JNE | BPF_K):
1992 case (BPF_JMP | BPF_JGT | BPF_K):
1993 case (BPF_JMP | EBPF_JLT | BPF_K):
1994 case (BPF_JMP | BPF_JGE | BPF_K):
1995 case (BPF_JMP | EBPF_JLE | BPF_K):
1996 case (BPF_JMP | EBPF_JSGT | BPF_K):
1997 case (BPF_JMP | EBPF_JSLT | BPF_K):
1998 case (BPF_JMP | EBPF_JSGE | BPF_K):
1999 case (BPF_JMP | EBPF_JSLE | BPF_K):
2000 case (BPF_JMP | BPF_JSET | BPF_K):
2001 case (BPF_JMP | BPF_JEQ | BPF_X):
2002 case (BPF_JMP | EBPF_JNE | BPF_X):
2003 case (BPF_JMP | BPF_JGT | BPF_X):
2004 case (BPF_JMP | EBPF_JLT | BPF_X):
2005 case (BPF_JMP | BPF_JGE | BPF_X):
2006 case (BPF_JMP | EBPF_JLE | BPF_X):
2007 case (BPF_JMP | EBPF_JSGT | BPF_X):
2008 case (BPF_JMP | EBPF_JSLT | BPF_X):
2009 case (BPF_JMP | EBPF_JSGE | BPF_X):
2010 case (BPF_JMP | EBPF_JSLE | BPF_X):
2011 case (BPF_JMP | BPF_JSET | BPF_X):
2012 rc |= add_edge(bvf, node, i + ins->off + 1);
2013 rc |= add_edge(bvf, node, i + 1);
2014 bvf->nb_jcc_nodes++;
2015 break;
2016 case (BPF_JMP | BPF_JA):
2017 rc |= add_edge(bvf, node, i + ins->off + 1);
2018 break;
2019 /* load 64 bit immediate value */
2020 case (BPF_LD | BPF_IMM | EBPF_DW):
2021 rc |= add_edge(bvf, node, i + 2);
2022 i++;
2023 break;
2024 case (BPF_LD | BPF_ABS | BPF_B):
2025 case (BPF_LD | BPF_ABS | BPF_H):
2026 case (BPF_LD | BPF_ABS | BPF_W):
2027 case (BPF_LD | BPF_IND | BPF_B):
2028 case (BPF_LD | BPF_IND | BPF_H):
2029 case (BPF_LD | BPF_IND | BPF_W):
2030 bvf->nb_ldmb_nodes++;
2031 /* fallthrough */
2032 default:
2033 rc |= add_edge(bvf, node, i + 1);
2034 break;
2035 }
2036
2037 bvf->nb_nodes++;
2038 bvf->node_colour[WHITE]++;
2039 }
2040
2041 if (rc != 0)
2042 return rc;
2043
2044 dfs(bvf);
2045
2046 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
2047 "nb_nodes=%u;\n"
2048 "nb_jcc_nodes=%u;\n"
2049 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
2050 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
2051 __func__, bvf,
2052 bvf->nb_nodes,
2053 bvf->nb_jcc_nodes,
2054 bvf->node_colour[WHITE], bvf->node_colour[GREY],
2055 bvf->node_colour[BLACK],
2056 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
2057 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
2058
2059 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
2060 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
2061 __func__, bvf);
2062 log_unreachable(bvf);
2063 return -EINVAL;
2064 }
2065
2066 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
2067 bvf->edge_type[UNKNOWN_EDGE] != 0) {
2068 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
2069 __func__, bvf);
2070 return -EINVAL;
2071 }
2072
2073 if (bvf->edge_type[BACK_EDGE] != 0) {
2074 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
2075 __func__, bvf);
2076 log_loop(bvf);
2077 return -EINVAL;
2078 }
2079
2080 return 0;
2081 }
2082
2083 /*
2084 * helper functions get/free eval states.
2085 */
2086 static struct bpf_eval_state *
pull_eval_state(struct bpf_verifier * bvf)2087 pull_eval_state(struct bpf_verifier *bvf)
2088 {
2089 uint32_t n;
2090
2091 n = bvf->evst_pool.cur;
2092 if (n == bvf->evst_pool.num)
2093 return NULL;
2094
2095 bvf->evst_pool.cur = n + 1;
2096 return bvf->evst_pool.ent + n;
2097 }
2098
2099 static void
push_eval_state(struct bpf_verifier * bvf)2100 push_eval_state(struct bpf_verifier *bvf)
2101 {
2102 bvf->evst_pool.cur--;
2103 }
2104
2105 static void
evst_pool_fini(struct bpf_verifier * bvf)2106 evst_pool_fini(struct bpf_verifier *bvf)
2107 {
2108 bvf->evst = NULL;
2109 free(bvf->evst_pool.ent);
2110 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2111 }
2112
2113 static int
evst_pool_init(struct bpf_verifier * bvf)2114 evst_pool_init(struct bpf_verifier *bvf)
2115 {
2116 uint32_t n;
2117
2118 n = bvf->nb_jcc_nodes + 1;
2119
2120 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2121 if (bvf->evst_pool.ent == NULL)
2122 return -ENOMEM;
2123
2124 bvf->evst_pool.num = n;
2125 bvf->evst_pool.cur = 0;
2126
2127 bvf->evst = pull_eval_state(bvf);
2128 return 0;
2129 }
2130
2131 /*
2132 * Save current eval state.
2133 */
2134 static int
save_eval_state(struct bpf_verifier * bvf,struct inst_node * node)2135 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2136 {
2137 struct bpf_eval_state *st;
2138
2139 /* get new eval_state for this node */
2140 st = pull_eval_state(bvf);
2141 if (st == NULL) {
2142 RTE_BPF_LOG(ERR,
2143 "%s: internal error (out of space) at pc: %u\n",
2144 __func__, get_node_idx(bvf, node));
2145 return -ENOMEM;
2146 }
2147
2148 /* make a copy of current state */
2149 memcpy(st, bvf->evst, sizeof(*st));
2150
2151 /* swap current state with new one */
2152 node->evst = bvf->evst;
2153 bvf->evst = st;
2154
2155 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2156 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2157
2158 return 0;
2159 }
2160
2161 /*
2162 * Restore previous eval state and mark current eval state as free.
2163 */
2164 static void
restore_eval_state(struct bpf_verifier * bvf,struct inst_node * node)2165 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2166 {
2167 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2168 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2169
2170 bvf->evst = node->evst;
2171 node->evst = NULL;
2172 push_eval_state(bvf);
2173 }
2174
2175 static void
log_eval_state(const struct bpf_verifier * bvf,const struct ebpf_insn * ins,uint32_t pc,int32_t loglvl)2176 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2177 uint32_t pc, int32_t loglvl)
2178 {
2179 const struct bpf_eval_state *st;
2180 const struct bpf_reg_val *rv;
2181
2182 rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2183
2184 st = bvf->evst;
2185 rv = st->rv + ins->dst_reg;
2186
2187 rte_log(loglvl, rte_bpf_logtype,
2188 "r%u={\n"
2189 "\tv={type=%u, size=%zu},\n"
2190 "\tmask=0x%" PRIx64 ",\n"
2191 "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2192 "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2193 "};\n",
2194 ins->dst_reg,
2195 rv->v.type, rv->v.size,
2196 rv->mask,
2197 rv->u.min, rv->u.max,
2198 rv->s.min, rv->s.max);
2199 }
2200
2201 /*
2202 * Do second pass through CFG and try to evaluate instructions
2203 * via each possible path.
2204 * Right now evaluation functionality is quite limited.
2205 * Still need to add extra checks for:
2206 * - use/return uninitialized registers.
2207 * - use uninitialized data from the stack.
2208 * - memory boundaries violation.
2209 */
2210 static int
evaluate(struct bpf_verifier * bvf)2211 evaluate(struct bpf_verifier *bvf)
2212 {
2213 int32_t rc;
2214 uint32_t idx, op;
2215 const char *err;
2216 const struct ebpf_insn *ins;
2217 struct inst_node *next, *node;
2218
2219 /* initial state of frame pointer */
2220 static const struct bpf_reg_val rvfp = {
2221 .v = {
2222 .type = BPF_ARG_PTR_STACK,
2223 .size = MAX_BPF_STACK_SIZE,
2224 },
2225 .mask = UINT64_MAX,
2226 .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2227 .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2228 };
2229
2230 bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2231 bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2232 if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2233 eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2234
2235 bvf->evst->rv[EBPF_REG_10] = rvfp;
2236
2237 ins = bvf->prm->ins;
2238 node = bvf->in;
2239 next = node;
2240 rc = 0;
2241
2242 while (node != NULL && rc == 0) {
2243
2244 /*
2245 * current node evaluation, make sure we evaluate
2246 * each node only once.
2247 */
2248 if (next != NULL) {
2249
2250 bvf->evin = node;
2251 idx = get_node_idx(bvf, node);
2252 op = ins[idx].code;
2253
2254 /* for jcc node make a copy of evaluatoion state */
2255 if (node->nb_edge > 1)
2256 rc |= save_eval_state(bvf, node);
2257
2258 if (ins_chk[op].eval != NULL && rc == 0) {
2259 err = ins_chk[op].eval(bvf, ins + idx);
2260 if (err != NULL) {
2261 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2262 __func__, err, idx);
2263 rc = -EINVAL;
2264 }
2265 }
2266
2267 log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2268 bvf->evin = NULL;
2269 }
2270
2271 /* proceed through CFG */
2272 next = get_next_node(bvf, node);
2273 if (next != NULL) {
2274
2275 /* proceed with next child */
2276 if (node->cur_edge == node->nb_edge &&
2277 node->evst != NULL)
2278 restore_eval_state(bvf, node);
2279
2280 next->prev_node = get_node_idx(bvf, node);
2281 node = next;
2282 } else {
2283 /*
2284 * finished with current node and all it's kids,
2285 * proceed with parent
2286 */
2287 node->cur_edge = 0;
2288 node = get_prev_node(bvf, node);
2289
2290 /* finished */
2291 if (node == bvf->in)
2292 node = NULL;
2293 }
2294 }
2295
2296 return rc;
2297 }
2298
2299 int
bpf_validate(struct rte_bpf * bpf)2300 bpf_validate(struct rte_bpf *bpf)
2301 {
2302 int32_t rc;
2303 struct bpf_verifier bvf;
2304
2305 /* check input argument type, don't allow mbuf ptr on 32-bit */
2306 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2307 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2308 (sizeof(uint64_t) != sizeof(uintptr_t) ||
2309 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2310 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2311 return -ENOTSUP;
2312 }
2313
2314 memset(&bvf, 0, sizeof(bvf));
2315 bvf.prm = &bpf->prm;
2316 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2317 if (bvf.in == NULL)
2318 return -ENOMEM;
2319
2320 rc = validate(&bvf);
2321
2322 if (rc == 0) {
2323 rc = evst_pool_init(&bvf);
2324 if (rc == 0)
2325 rc = evaluate(&bvf);
2326 evst_pool_fini(&bvf);
2327 }
2328
2329 free(bvf.in);
2330
2331 /* copy collected info */
2332 if (rc == 0) {
2333 bpf->stack_sz = bvf.stack_sz;
2334
2335 /* for LD_ABS/LD_IND, we'll need extra space on the stack */
2336 if (bvf.nb_ldmb_nodes != 0)
2337 bpf->stack_sz = RTE_ALIGN_CEIL(bpf->stack_sz +
2338 sizeof(uint64_t), sizeof(uint64_t));
2339 }
2340
2341 return rc;
2342 }
2343