1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <string.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10
11 #include <rte_common.h>
12
13 #include "bpf_impl.h"
14
15 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
16
17 struct bpf_reg_val {
18 struct rte_bpf_arg v;
19 uint64_t mask;
20 struct {
21 int64_t min;
22 int64_t max;
23 } s;
24 struct {
25 uint64_t min;
26 uint64_t max;
27 } u;
28 };
29
30 struct bpf_eval_state {
31 struct bpf_reg_val rv[EBPF_REG_NUM];
32 struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
33 };
34
35 /* possible instruction node colour */
36 enum {
37 WHITE,
38 GREY,
39 BLACK,
40 MAX_NODE_COLOUR
41 };
42
43 /* possible edge types */
44 enum {
45 UNKNOWN_EDGE,
46 TREE_EDGE,
47 BACK_EDGE,
48 CROSS_EDGE,
49 MAX_EDGE_TYPE
50 };
51
52 #define MAX_EDGES 2
53
54 struct inst_node {
55 uint8_t colour;
56 uint8_t nb_edge:4;
57 uint8_t cur_edge:4;
58 uint8_t edge_type[MAX_EDGES];
59 uint32_t edge_dest[MAX_EDGES];
60 uint32_t prev_node;
61 struct bpf_eval_state *evst;
62 };
63
64 struct bpf_verifier {
65 const struct rte_bpf_prm *prm;
66 struct inst_node *in;
67 uint64_t stack_sz;
68 uint32_t nb_nodes;
69 uint32_t nb_jcc_nodes;
70 uint32_t nb_ldmb_nodes;
71 uint32_t node_colour[MAX_NODE_COLOUR];
72 uint32_t edge_type[MAX_EDGE_TYPE];
73 struct bpf_eval_state *evst;
74 struct inst_node *evin;
75 struct {
76 uint32_t num;
77 uint32_t cur;
78 struct bpf_eval_state *ent;
79 } evst_pool;
80 };
81
82 struct bpf_ins_check {
83 struct {
84 uint16_t dreg;
85 uint16_t sreg;
86 } mask;
87 struct {
88 uint16_t min;
89 uint16_t max;
90 } off;
91 struct {
92 uint32_t min;
93 uint32_t max;
94 } imm;
95 const char * (*check)(const struct ebpf_insn *);
96 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
97 };
98
99 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
100 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
101 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
102
103 /* For LD_IND R6 is an implicit CTX register. */
104 #define IND_SRC_REGS (WRT_REGS ^ 1 << EBPF_REG_6)
105
106 /*
107 * check and evaluate functions for particular instruction types.
108 */
109
110 static const char *
check_alu_bele(const struct ebpf_insn * ins)111 check_alu_bele(const struct ebpf_insn *ins)
112 {
113 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
114 return "invalid imm field";
115 return NULL;
116 }
117
118 static const char *
eval_exit(struct bpf_verifier * bvf,const struct ebpf_insn * ins)119 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
120 {
121 RTE_SET_USED(ins);
122 if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
123 return "undefined return value";
124 return NULL;
125 }
126
127 /* setup max possible with this mask bounds */
128 static void
eval_umax_bound(struct bpf_reg_val * rv,uint64_t mask)129 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
130 {
131 rv->u.max = mask;
132 rv->u.min = 0;
133 }
134
135 static void
eval_smax_bound(struct bpf_reg_val * rv,uint64_t mask)136 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
137 {
138 rv->s.max = mask >> 1;
139 rv->s.min = rv->s.max ^ UINT64_MAX;
140 }
141
142 static void
eval_max_bound(struct bpf_reg_val * rv,uint64_t mask)143 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
144 {
145 eval_umax_bound(rv, mask);
146 eval_smax_bound(rv, mask);
147 }
148
149 static void
eval_fill_max_bound(struct bpf_reg_val * rv,uint64_t mask)150 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
151 {
152 eval_max_bound(rv, mask);
153 rv->v.type = RTE_BPF_ARG_RAW;
154 rv->mask = mask;
155 }
156
157 static void
eval_fill_imm64(struct bpf_reg_val * rv,uint64_t mask,uint64_t val)158 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
159 {
160 rv->mask = mask;
161 rv->s.min = val;
162 rv->s.max = val;
163 rv->u.min = val;
164 rv->u.max = val;
165 }
166
167 static void
eval_fill_imm(struct bpf_reg_val * rv,uint64_t mask,int32_t imm)168 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
169 {
170 uint64_t v;
171
172 v = (uint64_t)imm & mask;
173
174 rv->v.type = RTE_BPF_ARG_RAW;
175 eval_fill_imm64(rv, mask, v);
176 }
177
178 static const char *
eval_ld_imm64(struct bpf_verifier * bvf,const struct ebpf_insn * ins)179 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
180 {
181 uint32_t i;
182 uint64_t val;
183 struct bpf_reg_val *rd;
184
185 val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
186
187 rd = bvf->evst->rv + ins->dst_reg;
188 rd->v.type = RTE_BPF_ARG_RAW;
189 eval_fill_imm64(rd, UINT64_MAX, val);
190
191 for (i = 0; i != bvf->prm->nb_xsym; i++) {
192
193 /* load of external variable */
194 if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
195 (uintptr_t)bvf->prm->xsym[i].var.val == val) {
196 rd->v = bvf->prm->xsym[i].var.desc;
197 eval_fill_imm64(rd, UINT64_MAX, 0);
198 break;
199 }
200 }
201
202 return NULL;
203 }
204
205 static void
eval_apply_mask(struct bpf_reg_val * rv,uint64_t mask)206 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
207 {
208 struct bpf_reg_val rt;
209
210 rt.u.min = rv->u.min & mask;
211 rt.u.max = rv->u.max & mask;
212 if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
213 rv->u.max = RTE_MAX(rt.u.max, mask);
214 rv->u.min = 0;
215 }
216
217 eval_smax_bound(&rt, mask);
218 rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
219 rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
220
221 rv->mask = mask;
222 }
223
224 static void
eval_add(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,uint64_t msk)225 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
226 {
227 struct bpf_reg_val rv;
228
229 rv.u.min = (rd->u.min + rs->u.min) & msk;
230 rv.u.max = (rd->u.max + rs->u.max) & msk;
231 rv.s.min = (rd->s.min + rs->s.min) & msk;
232 rv.s.max = (rd->s.max + rs->s.max) & msk;
233
234 /*
235 * if at least one of the operands is not constant,
236 * then check for overflow
237 */
238 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
239 (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
240 eval_umax_bound(&rv, msk);
241
242 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
243 (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
244 rv.s.min < rd->s.min) ||
245 ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
246 rv.s.max < rd->s.max)))
247 eval_smax_bound(&rv, msk);
248
249 rd->s = rv.s;
250 rd->u = rv.u;
251 }
252
253 static void
eval_sub(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,uint64_t msk)254 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
255 {
256 struct bpf_reg_val rv;
257
258 rv.u.min = (rd->u.min - rs->u.max) & msk;
259 rv.u.max = (rd->u.max - rs->u.min) & msk;
260 rv.s.min = (rd->s.min - rs->s.max) & msk;
261 rv.s.max = (rd->s.max - rs->s.min) & msk;
262
263 /*
264 * if at least one of the operands is not constant,
265 * then check for overflow
266 */
267 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
268 (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
269 eval_umax_bound(&rv, msk);
270
271 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
272 (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
273 rv.s.min > rd->s.min) ||
274 ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
275 rv.s.max > rd->s.max)))
276 eval_smax_bound(&rv, msk);
277
278 rd->s = rv.s;
279 rd->u = rv.u;
280 }
281
282 static void
eval_lsh(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)283 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
284 uint64_t msk)
285 {
286 /* check if shift value is less then max result bits */
287 if (rs->u.max >= opsz) {
288 eval_max_bound(rd, msk);
289 return;
290 }
291
292 /* check for overflow */
293 if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
294 eval_umax_bound(rd, msk);
295 else {
296 rd->u.max <<= rs->u.max;
297 rd->u.min <<= rs->u.min;
298 }
299
300 /* check that dreg values are and would remain always positive */
301 if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
302 RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
303 eval_smax_bound(rd, msk);
304 else {
305 rd->s.max <<= rs->u.max;
306 rd->s.min <<= rs->u.min;
307 }
308 }
309
310 static void
eval_rsh(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)311 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
312 uint64_t msk)
313 {
314 /* check if shift value is less then max result bits */
315 if (rs->u.max >= opsz) {
316 eval_max_bound(rd, msk);
317 return;
318 }
319
320 rd->u.max >>= rs->u.min;
321 rd->u.min >>= rs->u.max;
322
323 /* check that dreg values are always positive */
324 if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
325 eval_smax_bound(rd, msk);
326 else {
327 rd->s.max >>= rs->u.min;
328 rd->s.min >>= rs->u.max;
329 }
330 }
331
332 static void
eval_arsh(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)333 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
334 uint64_t msk)
335 {
336 uint32_t shv;
337
338 /* check if shift value is less then max result bits */
339 if (rs->u.max >= opsz) {
340 eval_max_bound(rd, msk);
341 return;
342 }
343
344 rd->u.max = (int64_t)rd->u.max >> rs->u.min;
345 rd->u.min = (int64_t)rd->u.min >> rs->u.max;
346
347 /* if we have 32-bit values - extend them to 64-bit */
348 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
349 rd->s.min <<= opsz;
350 rd->s.max <<= opsz;
351 shv = opsz;
352 } else
353 shv = 0;
354
355 if (rd->s.min < 0)
356 rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
357 else
358 rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
359
360 if (rd->s.max < 0)
361 rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
362 else
363 rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
364 }
365
366 static uint64_t
eval_umax_bits(uint64_t v,size_t opsz)367 eval_umax_bits(uint64_t v, size_t opsz)
368 {
369 if (v == 0)
370 return 0;
371
372 v = __builtin_clzll(v);
373 return RTE_LEN2MASK(opsz - v, uint64_t);
374 }
375
376 /* estimate max possible value for (v1 & v2) */
377 static uint64_t
eval_uand_max(uint64_t v1,uint64_t v2,size_t opsz)378 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
379 {
380 v1 = eval_umax_bits(v1, opsz);
381 v2 = eval_umax_bits(v2, opsz);
382 return (v1 & v2);
383 }
384
385 /* estimate max possible value for (v1 | v2) */
386 static uint64_t
eval_uor_max(uint64_t v1,uint64_t v2,size_t opsz)387 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
388 {
389 v1 = eval_umax_bits(v1, opsz);
390 v2 = eval_umax_bits(v2, opsz);
391 return (v1 | v2);
392 }
393
394 static void
eval_and(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)395 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
396 uint64_t msk)
397 {
398 /* both operands are constants */
399 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
400 rd->u.min &= rs->u.min;
401 rd->u.max &= rs->u.max;
402 } else {
403 rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
404 rd->u.min &= rs->u.min;
405 }
406
407 /* both operands are constants */
408 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
409 rd->s.min &= rs->s.min;
410 rd->s.max &= rs->s.max;
411 /* at least one of operand is non-negative */
412 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
413 rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
414 rs->s.max & (msk >> 1), opsz);
415 rd->s.min &= rs->s.min;
416 } else
417 eval_smax_bound(rd, msk);
418 }
419
420 static void
eval_or(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)421 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
422 uint64_t msk)
423 {
424 /* both operands are constants */
425 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
426 rd->u.min |= rs->u.min;
427 rd->u.max |= rs->u.max;
428 } else {
429 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
430 rd->u.min |= rs->u.min;
431 }
432
433 /* both operands are constants */
434 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
435 rd->s.min |= rs->s.min;
436 rd->s.max |= rs->s.max;
437
438 /* both operands are non-negative */
439 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
440 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
441 rd->s.min |= rs->s.min;
442 } else
443 eval_smax_bound(rd, msk);
444 }
445
446 static void
eval_xor(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)447 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
448 uint64_t msk)
449 {
450 /* both operands are constants */
451 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
452 rd->u.min ^= rs->u.min;
453 rd->u.max ^= rs->u.max;
454 } else {
455 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
456 rd->u.min = 0;
457 }
458
459 /* both operands are constants */
460 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
461 rd->s.min ^= rs->s.min;
462 rd->s.max ^= rs->s.max;
463
464 /* both operands are non-negative */
465 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
466 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
467 rd->s.min = 0;
468 } else
469 eval_smax_bound(rd, msk);
470 }
471
472 static void
eval_mul(struct bpf_reg_val * rd,const struct bpf_reg_val * rs,size_t opsz,uint64_t msk)473 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
474 uint64_t msk)
475 {
476 /* both operands are constants */
477 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
478 rd->u.min = (rd->u.min * rs->u.min) & msk;
479 rd->u.max = (rd->u.max * rs->u.max) & msk;
480 /* check for overflow */
481 } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
482 rd->u.max *= rs->u.max;
483 rd->u.min *= rd->u.min;
484 } else
485 eval_umax_bound(rd, msk);
486
487 /* both operands are constants */
488 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
489 rd->s.min = (rd->s.min * rs->s.min) & msk;
490 rd->s.max = (rd->s.max * rs->s.max) & msk;
491 /* check that both operands are positive and no overflow */
492 } else if (rd->s.min >= 0 && rs->s.min >= 0) {
493 rd->s.max *= rs->s.max;
494 rd->s.min *= rd->s.min;
495 } else
496 eval_smax_bound(rd, msk);
497 }
498
499 static const char *
eval_divmod(uint32_t op,struct bpf_reg_val * rd,struct bpf_reg_val * rs,size_t opsz,uint64_t msk)500 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
501 size_t opsz, uint64_t msk)
502 {
503 /* both operands are constants */
504 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
505 if (rs->u.max == 0)
506 return "division by 0";
507 if (op == BPF_DIV) {
508 rd->u.min /= rs->u.min;
509 rd->u.max /= rs->u.max;
510 } else {
511 rd->u.min %= rs->u.min;
512 rd->u.max %= rs->u.max;
513 }
514 } else {
515 if (op == BPF_MOD)
516 rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
517 else
518 rd->u.max = rd->u.max;
519 rd->u.min = 0;
520 }
521
522 /* if we have 32-bit values - extend them to 64-bit */
523 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
524 rd->s.min = (int32_t)rd->s.min;
525 rd->s.max = (int32_t)rd->s.max;
526 rs->s.min = (int32_t)rs->s.min;
527 rs->s.max = (int32_t)rs->s.max;
528 }
529
530 /* both operands are constants */
531 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
532 if (rs->s.max == 0)
533 return "division by 0";
534 if (op == BPF_DIV) {
535 rd->s.min /= rs->s.min;
536 rd->s.max /= rs->s.max;
537 } else {
538 rd->s.min %= rs->s.min;
539 rd->s.max %= rs->s.max;
540 }
541 } else if (op == BPF_MOD) {
542 rd->s.min = RTE_MAX(rd->s.max, 0);
543 rd->s.min = RTE_MIN(rd->s.min, 0);
544 } else
545 eval_smax_bound(rd, msk);
546
547 rd->s.max &= msk;
548 rd->s.min &= msk;
549
550 return NULL;
551 }
552
553 static void
eval_neg(struct bpf_reg_val * rd,size_t opsz,uint64_t msk)554 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
555 {
556 uint64_t ux, uy;
557 int64_t sx, sy;
558
559 /* if we have 32-bit values - extend them to 64-bit */
560 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
561 rd->u.min = (int32_t)rd->u.min;
562 rd->u.max = (int32_t)rd->u.max;
563 }
564
565 ux = -(int64_t)rd->u.min & msk;
566 uy = -(int64_t)rd->u.max & msk;
567
568 rd->u.max = RTE_MAX(ux, uy);
569 rd->u.min = RTE_MIN(ux, uy);
570
571 /* if we have 32-bit values - extend them to 64-bit */
572 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
573 rd->s.min = (int32_t)rd->s.min;
574 rd->s.max = (int32_t)rd->s.max;
575 }
576
577 sx = -rd->s.min & msk;
578 sy = -rd->s.max & msk;
579
580 rd->s.max = RTE_MAX(sx, sy);
581 rd->s.min = RTE_MIN(sx, sy);
582 }
583
584 static const char *
eval_ld_mbuf(struct bpf_verifier * bvf,const struct ebpf_insn * ins)585 eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
586 {
587 uint32_t i, mode;
588 struct bpf_reg_val *rv, ri, rs;
589
590 mode = BPF_MODE(ins->code);
591
592 /* R6 is an implicit input that must contain pointer to mbuf */
593 if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
594 return "invalid type for implicit ctx register";
595
596 if (mode == BPF_IND) {
597 rs = bvf->evst->rv[ins->src_reg];
598 if (rs.v.type != RTE_BPF_ARG_RAW)
599 return "unexpected type for src register";
600
601 eval_fill_imm(&ri, UINT64_MAX, ins->imm);
602 eval_add(&rs, &ri, UINT64_MAX);
603
604 if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
605 return "mbuf boundary violation";
606 }
607
608 /* R1-R5 scratch registers */
609 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
610 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
611
612 /* R0 is an implicit output, contains data fetched from the packet */
613 rv = bvf->evst->rv + EBPF_REG_0;
614 rv->v.size = bpf_size(BPF_SIZE(ins->code));
615 eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
616
617 return NULL;
618 }
619
620 /*
621 * check that destination and source operand are in defined state.
622 */
623 static const char *
eval_defined(const struct bpf_reg_val * dst,const struct bpf_reg_val * src)624 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
625 {
626 if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
627 return "dest reg value is undefined";
628 if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
629 return "src reg value is undefined";
630 return NULL;
631 }
632
633 static const char *
eval_alu(struct bpf_verifier * bvf,const struct ebpf_insn * ins)634 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
635 {
636 uint64_t msk;
637 uint32_t op;
638 size_t opsz;
639 const char *err;
640 struct bpf_eval_state *st;
641 struct bpf_reg_val *rd, rs;
642
643 opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
644 sizeof(uint32_t) : sizeof(uint64_t);
645 opsz = opsz * CHAR_BIT;
646 msk = RTE_LEN2MASK(opsz, uint64_t);
647
648 st = bvf->evst;
649 rd = st->rv + ins->dst_reg;
650
651 if (BPF_SRC(ins->code) == BPF_X) {
652 rs = st->rv[ins->src_reg];
653 eval_apply_mask(&rs, msk);
654 } else
655 eval_fill_imm(&rs, msk, ins->imm);
656
657 eval_apply_mask(rd, msk);
658
659 op = BPF_OP(ins->code);
660
661 /* Allow self-xor as way to zero register */
662 if (op == BPF_XOR && BPF_SRC(ins->code) == BPF_X &&
663 ins->src_reg == ins->dst_reg) {
664 eval_fill_imm(&rs, UINT64_MAX, 0);
665 eval_fill_imm(rd, UINT64_MAX, 0);
666 }
667
668 err = eval_defined((op != EBPF_MOV) ? rd : NULL,
669 (op != BPF_NEG) ? &rs : NULL);
670 if (err != NULL)
671 return err;
672
673 if (op == BPF_ADD)
674 eval_add(rd, &rs, msk);
675 else if (op == BPF_SUB)
676 eval_sub(rd, &rs, msk);
677 else if (op == BPF_LSH)
678 eval_lsh(rd, &rs, opsz, msk);
679 else if (op == BPF_RSH)
680 eval_rsh(rd, &rs, opsz, msk);
681 else if (op == EBPF_ARSH)
682 eval_arsh(rd, &rs, opsz, msk);
683 else if (op == BPF_AND)
684 eval_and(rd, &rs, opsz, msk);
685 else if (op == BPF_OR)
686 eval_or(rd, &rs, opsz, msk);
687 else if (op == BPF_XOR)
688 eval_xor(rd, &rs, opsz, msk);
689 else if (op == BPF_MUL)
690 eval_mul(rd, &rs, opsz, msk);
691 else if (op == BPF_DIV || op == BPF_MOD)
692 err = eval_divmod(op, rd, &rs, opsz, msk);
693 else if (op == BPF_NEG)
694 eval_neg(rd, opsz, msk);
695 else if (op == EBPF_MOV)
696 *rd = rs;
697 else
698 eval_max_bound(rd, msk);
699
700 return err;
701 }
702
703 static const char *
eval_bele(struct bpf_verifier * bvf,const struct ebpf_insn * ins)704 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
705 {
706 uint64_t msk;
707 struct bpf_eval_state *st;
708 struct bpf_reg_val *rd;
709 const char *err;
710
711 msk = RTE_LEN2MASK(ins->imm, uint64_t);
712
713 st = bvf->evst;
714 rd = st->rv + ins->dst_reg;
715
716 err = eval_defined(rd, NULL);
717 if (err != NULL)
718 return err;
719
720 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
721 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
722 eval_max_bound(rd, msk);
723 else
724 eval_apply_mask(rd, msk);
725 #else
726 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
727 eval_max_bound(rd, msk);
728 else
729 eval_apply_mask(rd, msk);
730 #endif
731
732 return NULL;
733 }
734
735 static const char *
eval_ptr(struct bpf_verifier * bvf,struct bpf_reg_val * rm,uint32_t opsz,uint32_t align,int16_t off)736 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
737 uint32_t align, int16_t off)
738 {
739 struct bpf_reg_val rv;
740
741 /* calculate reg + offset */
742 eval_fill_imm(&rv, rm->mask, off);
743 eval_add(rm, &rv, rm->mask);
744
745 if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
746 return "destination is not a pointer";
747
748 if (rm->mask != UINT64_MAX)
749 return "pointer truncation";
750
751 if (rm->u.max + opsz > rm->v.size ||
752 (uint64_t)rm->s.max + opsz > rm->v.size ||
753 rm->s.min < 0)
754 return "memory boundary violation";
755
756 if (rm->u.max % align != 0)
757 return "unaligned memory access";
758
759 if (rm->v.type == BPF_ARG_PTR_STACK) {
760
761 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
762 rm->u.max != (uint64_t)rm->s.max)
763 return "stack access with variable offset";
764
765 bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
766
767 /* pointer to mbuf */
768 } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
769
770 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
771 rm->u.max != (uint64_t)rm->s.max)
772 return "mbuf access with variable offset";
773 }
774
775 return NULL;
776 }
777
778 static void
eval_max_load(struct bpf_reg_val * rv,uint64_t mask)779 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
780 {
781 eval_umax_bound(rv, mask);
782
783 /* full 64-bit load */
784 if (mask == UINT64_MAX)
785 eval_smax_bound(rv, mask);
786
787 /* zero-extend load */
788 rv->s.min = rv->u.min;
789 rv->s.max = rv->u.max;
790 }
791
792
793 static const char *
eval_load(struct bpf_verifier * bvf,const struct ebpf_insn * ins)794 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
795 {
796 uint32_t opsz;
797 uint64_t msk;
798 const char *err;
799 struct bpf_eval_state *st;
800 struct bpf_reg_val *rd, rs;
801 const struct bpf_reg_val *sv;
802
803 st = bvf->evst;
804 rd = st->rv + ins->dst_reg;
805 rs = st->rv[ins->src_reg];
806 opsz = bpf_size(BPF_SIZE(ins->code));
807 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
808
809 err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
810 if (err != NULL)
811 return err;
812
813 if (rs.v.type == BPF_ARG_PTR_STACK) {
814
815 sv = st->sv + rs.u.max / sizeof(uint64_t);
816 if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
817 return "undefined value on the stack";
818
819 *rd = *sv;
820
821 /* pointer to mbuf */
822 } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
823
824 if (rs.u.max == offsetof(struct rte_mbuf, next)) {
825 eval_fill_imm(rd, msk, 0);
826 rd->v = rs.v;
827 } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
828 eval_fill_imm(rd, msk, 0);
829 rd->v.type = RTE_BPF_ARG_PTR;
830 rd->v.size = rs.v.buf_size;
831 } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
832 eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
833 rd->v.type = RTE_BPF_ARG_RAW;
834 } else {
835 eval_max_load(rd, msk);
836 rd->v.type = RTE_BPF_ARG_RAW;
837 }
838
839 /* pointer to raw data */
840 } else {
841 eval_max_load(rd, msk);
842 rd->v.type = RTE_BPF_ARG_RAW;
843 }
844
845 return NULL;
846 }
847
848 static const char *
eval_mbuf_store(const struct bpf_reg_val * rv,uint32_t opsz)849 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
850 {
851 uint32_t i;
852
853 static const struct {
854 size_t off;
855 size_t sz;
856 } mbuf_ro_fileds[] = {
857 { .off = offsetof(struct rte_mbuf, buf_addr), },
858 { .off = offsetof(struct rte_mbuf, refcnt), },
859 { .off = offsetof(struct rte_mbuf, nb_segs), },
860 { .off = offsetof(struct rte_mbuf, buf_len), },
861 { .off = offsetof(struct rte_mbuf, pool), },
862 { .off = offsetof(struct rte_mbuf, next), },
863 { .off = offsetof(struct rte_mbuf, priv_size), },
864 };
865
866 for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
867 (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
868 rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
869 i++)
870 ;
871
872 if (i != RTE_DIM(mbuf_ro_fileds))
873 return "store to the read-only mbuf field";
874
875 return NULL;
876
877 }
878
879 static const char *
eval_store(struct bpf_verifier * bvf,const struct ebpf_insn * ins)880 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
881 {
882 uint32_t opsz;
883 uint64_t msk;
884 const char *err;
885 struct bpf_eval_state *st;
886 struct bpf_reg_val rd, rs, *sv;
887
888 opsz = bpf_size(BPF_SIZE(ins->code));
889 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
890
891 st = bvf->evst;
892 rd = st->rv[ins->dst_reg];
893
894 if (BPF_CLASS(ins->code) == BPF_STX) {
895 rs = st->rv[ins->src_reg];
896 eval_apply_mask(&rs, msk);
897 } else
898 eval_fill_imm(&rs, msk, ins->imm);
899
900 err = eval_defined(NULL, &rs);
901 if (err != NULL)
902 return err;
903
904 err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
905 if (err != NULL)
906 return err;
907
908 if (rd.v.type == BPF_ARG_PTR_STACK) {
909
910 sv = st->sv + rd.u.max / sizeof(uint64_t);
911 if (BPF_CLASS(ins->code) == BPF_STX &&
912 BPF_MODE(ins->code) == EBPF_XADD)
913 eval_max_bound(sv, msk);
914 else
915 *sv = rs;
916
917 /* pointer to mbuf */
918 } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
919 err = eval_mbuf_store(&rd, opsz);
920 if (err != NULL)
921 return err;
922 }
923
924 return NULL;
925 }
926
927 static const char *
eval_func_arg(struct bpf_verifier * bvf,const struct rte_bpf_arg * arg,struct bpf_reg_val * rv)928 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
929 struct bpf_reg_val *rv)
930 {
931 uint32_t i, n;
932 struct bpf_eval_state *st;
933 const char *err;
934
935 st = bvf->evst;
936
937 if (rv->v.type == RTE_BPF_ARG_UNDEF)
938 return "Undefined argument type";
939
940 if (arg->type != rv->v.type &&
941 arg->type != RTE_BPF_ARG_RAW &&
942 (arg->type != RTE_BPF_ARG_PTR ||
943 RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
944 return "Invalid argument type";
945
946 err = NULL;
947
948 /* argument is a pointer */
949 if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
950
951 err = eval_ptr(bvf, rv, arg->size, 1, 0);
952
953 /*
954 * pointer to the variable on the stack is passed
955 * as an argument, mark stack space it occupies as initialized.
956 */
957 if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
958
959 i = rv->u.max / sizeof(uint64_t);
960 n = i + arg->size / sizeof(uint64_t);
961 while (i != n) {
962 eval_fill_max_bound(st->sv + i, UINT64_MAX);
963 i++;
964 };
965 }
966 }
967
968 return err;
969 }
970
971 static const char *
eval_call(struct bpf_verifier * bvf,const struct ebpf_insn * ins)972 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
973 {
974 uint32_t i, idx;
975 struct bpf_reg_val *rv;
976 const struct rte_bpf_xsym *xsym;
977 const char *err;
978
979 idx = ins->imm;
980
981 if (idx >= bvf->prm->nb_xsym ||
982 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
983 return "invalid external function index";
984
985 /* for now don't support function calls on 32 bit platform */
986 if (sizeof(uint64_t) != sizeof(uintptr_t))
987 return "function calls are supported only for 64 bit apps";
988
989 xsym = bvf->prm->xsym + idx;
990
991 /* evaluate function arguments */
992 err = NULL;
993 for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
994 err = eval_func_arg(bvf, xsym->func.args + i,
995 bvf->evst->rv + EBPF_REG_1 + i);
996 }
997
998 /* R1-R5 argument/scratch registers */
999 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
1000 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
1001
1002 /* update return value */
1003
1004 rv = bvf->evst->rv + EBPF_REG_0;
1005 rv->v = xsym->func.ret;
1006 if (rv->v.type == RTE_BPF_ARG_RAW)
1007 eval_fill_max_bound(rv,
1008 RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
1009 else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
1010 eval_fill_imm64(rv, UINTPTR_MAX, 0);
1011
1012 return err;
1013 }
1014
1015 static void
eval_jeq_jne(struct bpf_reg_val * trd,struct bpf_reg_val * trs)1016 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
1017 {
1018 /* sreg is constant */
1019 if (trs->u.min == trs->u.max) {
1020 trd->u = trs->u;
1021 /* dreg is constant */
1022 } else if (trd->u.min == trd->u.max) {
1023 trs->u = trd->u;
1024 } else {
1025 trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
1026 trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
1027 trs->u = trd->u;
1028 }
1029
1030 /* sreg is constant */
1031 if (trs->s.min == trs->s.max) {
1032 trd->s = trs->s;
1033 /* dreg is constant */
1034 } else if (trd->s.min == trd->s.max) {
1035 trs->s = trd->s;
1036 } else {
1037 trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
1038 trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1039 trs->s = trd->s;
1040 }
1041 }
1042
1043 static void
eval_jgt_jle(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1044 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1045 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1046 {
1047 frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1048 trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1049 }
1050
1051 static void
eval_jlt_jge(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1052 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1053 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1054 {
1055 frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1056 trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1057 }
1058
1059 static void
eval_jsgt_jsle(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1060 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1061 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1062 {
1063 frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1064 trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1065 }
1066
1067 static void
eval_jslt_jsge(struct bpf_reg_val * trd,struct bpf_reg_val * trs,struct bpf_reg_val * frd,struct bpf_reg_val * frs)1068 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1069 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1070 {
1071 frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1072 trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1073 }
1074
1075 static const char *
eval_jcc(struct bpf_verifier * bvf,const struct ebpf_insn * ins)1076 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1077 {
1078 uint32_t op;
1079 const char *err;
1080 struct bpf_eval_state *fst, *tst;
1081 struct bpf_reg_val *frd, *frs, *trd, *trs;
1082 struct bpf_reg_val rvf, rvt;
1083
1084 tst = bvf->evst;
1085 fst = bvf->evin->evst;
1086
1087 frd = fst->rv + ins->dst_reg;
1088 trd = tst->rv + ins->dst_reg;
1089
1090 if (BPF_SRC(ins->code) == BPF_X) {
1091 frs = fst->rv + ins->src_reg;
1092 trs = tst->rv + ins->src_reg;
1093 } else {
1094 frs = &rvf;
1095 trs = &rvt;
1096 eval_fill_imm(frs, UINT64_MAX, ins->imm);
1097 eval_fill_imm(trs, UINT64_MAX, ins->imm);
1098 }
1099
1100 err = eval_defined(trd, trs);
1101 if (err != NULL)
1102 return err;
1103
1104 op = BPF_OP(ins->code);
1105
1106 if (op == BPF_JEQ)
1107 eval_jeq_jne(trd, trs);
1108 else if (op == EBPF_JNE)
1109 eval_jeq_jne(frd, frs);
1110 else if (op == BPF_JGT)
1111 eval_jgt_jle(trd, trs, frd, frs);
1112 else if (op == EBPF_JLE)
1113 eval_jgt_jle(frd, frs, trd, trs);
1114 else if (op == EBPF_JLT)
1115 eval_jlt_jge(trd, trs, frd, frs);
1116 else if (op == BPF_JGE)
1117 eval_jlt_jge(frd, frs, trd, trs);
1118 else if (op == EBPF_JSGT)
1119 eval_jsgt_jsle(trd, trs, frd, frs);
1120 else if (op == EBPF_JSLE)
1121 eval_jsgt_jsle(frd, frs, trd, trs);
1122 else if (op == EBPF_JSLT)
1123 eval_jslt_jsge(trd, trs, frd, frs);
1124 else if (op == EBPF_JSGE)
1125 eval_jslt_jsge(frd, frs, trd, trs);
1126
1127 return NULL;
1128 }
1129
1130 /*
1131 * validate parameters for each instruction type.
1132 */
1133 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1134 /* ALU IMM 32-bit instructions */
1135 [(BPF_ALU | BPF_ADD | BPF_K)] = {
1136 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1137 .off = { .min = 0, .max = 0},
1138 .imm = { .min = 0, .max = UINT32_MAX,},
1139 .eval = eval_alu,
1140 },
1141 [(BPF_ALU | BPF_SUB | BPF_K)] = {
1142 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1143 .off = { .min = 0, .max = 0},
1144 .imm = { .min = 0, .max = UINT32_MAX,},
1145 .eval = eval_alu,
1146 },
1147 [(BPF_ALU | BPF_AND | BPF_K)] = {
1148 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1149 .off = { .min = 0, .max = 0},
1150 .imm = { .min = 0, .max = UINT32_MAX,},
1151 .eval = eval_alu,
1152 },
1153 [(BPF_ALU | BPF_OR | BPF_K)] = {
1154 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1155 .off = { .min = 0, .max = 0},
1156 .imm = { .min = 0, .max = UINT32_MAX,},
1157 .eval = eval_alu,
1158 },
1159 [(BPF_ALU | BPF_LSH | BPF_K)] = {
1160 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1161 .off = { .min = 0, .max = 0},
1162 .imm = { .min = 0, .max = UINT32_MAX,},
1163 .eval = eval_alu,
1164 },
1165 [(BPF_ALU | BPF_RSH | BPF_K)] = {
1166 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1167 .off = { .min = 0, .max = 0},
1168 .imm = { .min = 0, .max = UINT32_MAX,},
1169 .eval = eval_alu,
1170 },
1171 [(BPF_ALU | BPF_XOR | BPF_K)] = {
1172 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1173 .off = { .min = 0, .max = 0},
1174 .imm = { .min = 0, .max = UINT32_MAX,},
1175 .eval = eval_alu,
1176 },
1177 [(BPF_ALU | BPF_MUL | BPF_K)] = {
1178 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1179 .off = { .min = 0, .max = 0},
1180 .imm = { .min = 0, .max = UINT32_MAX,},
1181 .eval = eval_alu,
1182 },
1183 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
1184 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1185 .off = { .min = 0, .max = 0},
1186 .imm = { .min = 0, .max = UINT32_MAX,},
1187 .eval = eval_alu,
1188 },
1189 [(BPF_ALU | BPF_DIV | BPF_K)] = {
1190 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1191 .off = { .min = 0, .max = 0},
1192 .imm = { .min = 1, .max = UINT32_MAX},
1193 .eval = eval_alu,
1194 },
1195 [(BPF_ALU | BPF_MOD | BPF_K)] = {
1196 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1197 .off = { .min = 0, .max = 0},
1198 .imm = { .min = 1, .max = UINT32_MAX},
1199 .eval = eval_alu,
1200 },
1201 /* ALU IMM 64-bit instructions */
1202 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1203 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1204 .off = { .min = 0, .max = 0},
1205 .imm = { .min = 0, .max = UINT32_MAX,},
1206 .eval = eval_alu,
1207 },
1208 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1209 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1210 .off = { .min = 0, .max = 0},
1211 .imm = { .min = 0, .max = UINT32_MAX,},
1212 .eval = eval_alu,
1213 },
1214 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1215 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1216 .off = { .min = 0, .max = 0},
1217 .imm = { .min = 0, .max = UINT32_MAX,},
1218 .eval = eval_alu,
1219 },
1220 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1221 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1222 .off = { .min = 0, .max = 0},
1223 .imm = { .min = 0, .max = UINT32_MAX,},
1224 .eval = eval_alu,
1225 },
1226 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1227 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1228 .off = { .min = 0, .max = 0},
1229 .imm = { .min = 0, .max = UINT32_MAX,},
1230 .eval = eval_alu,
1231 },
1232 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1233 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1234 .off = { .min = 0, .max = 0},
1235 .imm = { .min = 0, .max = UINT32_MAX,},
1236 .eval = eval_alu,
1237 },
1238 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1239 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1240 .off = { .min = 0, .max = 0},
1241 .imm = { .min = 0, .max = UINT32_MAX,},
1242 .eval = eval_alu,
1243 },
1244 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1245 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1246 .off = { .min = 0, .max = 0},
1247 .imm = { .min = 0, .max = UINT32_MAX,},
1248 .eval = eval_alu,
1249 },
1250 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1251 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1252 .off = { .min = 0, .max = 0},
1253 .imm = { .min = 0, .max = UINT32_MAX,},
1254 .eval = eval_alu,
1255 },
1256 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1257 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1258 .off = { .min = 0, .max = 0},
1259 .imm = { .min = 0, .max = UINT32_MAX,},
1260 .eval = eval_alu,
1261 },
1262 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1263 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1264 .off = { .min = 0, .max = 0},
1265 .imm = { .min = 1, .max = UINT32_MAX},
1266 .eval = eval_alu,
1267 },
1268 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1269 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1270 .off = { .min = 0, .max = 0},
1271 .imm = { .min = 1, .max = UINT32_MAX},
1272 .eval = eval_alu,
1273 },
1274 /* ALU REG 32-bit instructions */
1275 [(BPF_ALU | BPF_ADD | BPF_X)] = {
1276 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1277 .off = { .min = 0, .max = 0},
1278 .imm = { .min = 0, .max = 0},
1279 .eval = eval_alu,
1280 },
1281 [(BPF_ALU | BPF_SUB | BPF_X)] = {
1282 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1283 .off = { .min = 0, .max = 0},
1284 .imm = { .min = 0, .max = 0},
1285 .eval = eval_alu,
1286 },
1287 [(BPF_ALU | BPF_AND | BPF_X)] = {
1288 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1289 .off = { .min = 0, .max = 0},
1290 .imm = { .min = 0, .max = 0},
1291 .eval = eval_alu,
1292 },
1293 [(BPF_ALU | BPF_OR | BPF_X)] = {
1294 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1295 .off = { .min = 0, .max = 0},
1296 .imm = { .min = 0, .max = 0},
1297 .eval = eval_alu,
1298 },
1299 [(BPF_ALU | BPF_LSH | BPF_X)] = {
1300 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1301 .off = { .min = 0, .max = 0},
1302 .imm = { .min = 0, .max = 0},
1303 .eval = eval_alu,
1304 },
1305 [(BPF_ALU | BPF_RSH | BPF_X)] = {
1306 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1307 .off = { .min = 0, .max = 0},
1308 .imm = { .min = 0, .max = 0},
1309 .eval = eval_alu,
1310 },
1311 [(BPF_ALU | BPF_XOR | BPF_X)] = {
1312 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1313 .off = { .min = 0, .max = 0},
1314 .imm = { .min = 0, .max = 0},
1315 .eval = eval_alu,
1316 },
1317 [(BPF_ALU | BPF_MUL | BPF_X)] = {
1318 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1319 .off = { .min = 0, .max = 0},
1320 .imm = { .min = 0, .max = 0},
1321 .eval = eval_alu,
1322 },
1323 [(BPF_ALU | BPF_DIV | BPF_X)] = {
1324 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1325 .off = { .min = 0, .max = 0},
1326 .imm = { .min = 0, .max = 0},
1327 .eval = eval_alu,
1328 },
1329 [(BPF_ALU | BPF_MOD | BPF_X)] = {
1330 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1331 .off = { .min = 0, .max = 0},
1332 .imm = { .min = 0, .max = 0},
1333 .eval = eval_alu,
1334 },
1335 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
1336 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1337 .off = { .min = 0, .max = 0},
1338 .imm = { .min = 0, .max = 0},
1339 .eval = eval_alu,
1340 },
1341 [(BPF_ALU | BPF_NEG)] = {
1342 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1343 .off = { .min = 0, .max = 0},
1344 .imm = { .min = 0, .max = 0},
1345 .eval = eval_alu,
1346 },
1347 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1348 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1349 .off = { .min = 0, .max = 0},
1350 .imm = { .min = 16, .max = 64},
1351 .check = check_alu_bele,
1352 .eval = eval_bele,
1353 },
1354 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1355 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1356 .off = { .min = 0, .max = 0},
1357 .imm = { .min = 16, .max = 64},
1358 .check = check_alu_bele,
1359 .eval = eval_bele,
1360 },
1361 /* ALU REG 64-bit instructions */
1362 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1363 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1364 .off = { .min = 0, .max = 0},
1365 .imm = { .min = 0, .max = 0},
1366 .eval = eval_alu,
1367 },
1368 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1369 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1370 .off = { .min = 0, .max = 0},
1371 .imm = { .min = 0, .max = 0},
1372 .eval = eval_alu,
1373 },
1374 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1375 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1376 .off = { .min = 0, .max = 0},
1377 .imm = { .min = 0, .max = 0},
1378 .eval = eval_alu,
1379 },
1380 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1381 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1382 .off = { .min = 0, .max = 0},
1383 .imm = { .min = 0, .max = 0},
1384 .eval = eval_alu,
1385 },
1386 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1387 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1388 .off = { .min = 0, .max = 0},
1389 .imm = { .min = 0, .max = 0},
1390 .eval = eval_alu,
1391 },
1392 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1393 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1394 .off = { .min = 0, .max = 0},
1395 .imm = { .min = 0, .max = 0},
1396 .eval = eval_alu,
1397 },
1398 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1399 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1400 .off = { .min = 0, .max = 0},
1401 .imm = { .min = 0, .max = 0},
1402 .eval = eval_alu,
1403 },
1404 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1405 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1406 .off = { .min = 0, .max = 0},
1407 .imm = { .min = 0, .max = 0},
1408 .eval = eval_alu,
1409 },
1410 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1411 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1412 .off = { .min = 0, .max = 0},
1413 .imm = { .min = 0, .max = 0},
1414 .eval = eval_alu,
1415 },
1416 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1417 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1418 .off = { .min = 0, .max = 0},
1419 .imm = { .min = 0, .max = 0},
1420 .eval = eval_alu,
1421 },
1422 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1423 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1424 .off = { .min = 0, .max = 0},
1425 .imm = { .min = 0, .max = 0},
1426 .eval = eval_alu,
1427 },
1428 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1429 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1430 .off = { .min = 0, .max = 0},
1431 .imm = { .min = 0, .max = 0},
1432 .eval = eval_alu,
1433 },
1434 [(EBPF_ALU64 | BPF_NEG)] = {
1435 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1436 .off = { .min = 0, .max = 0},
1437 .imm = { .min = 0, .max = 0},
1438 .eval = eval_alu,
1439 },
1440 /* load instructions */
1441 [(BPF_LDX | BPF_MEM | BPF_B)] = {
1442 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1443 .off = { .min = 0, .max = UINT16_MAX},
1444 .imm = { .min = 0, .max = 0},
1445 .eval = eval_load,
1446 },
1447 [(BPF_LDX | BPF_MEM | BPF_H)] = {
1448 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1449 .off = { .min = 0, .max = UINT16_MAX},
1450 .imm = { .min = 0, .max = 0},
1451 .eval = eval_load,
1452 },
1453 [(BPF_LDX | BPF_MEM | BPF_W)] = {
1454 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1455 .off = { .min = 0, .max = UINT16_MAX},
1456 .imm = { .min = 0, .max = 0},
1457 .eval = eval_load,
1458 },
1459 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1460 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1461 .off = { .min = 0, .max = UINT16_MAX},
1462 .imm = { .min = 0, .max = 0},
1463 .eval = eval_load,
1464 },
1465 /* load 64 bit immediate value */
1466 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
1467 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1468 .off = { .min = 0, .max = 0},
1469 .imm = { .min = 0, .max = UINT32_MAX},
1470 .eval = eval_ld_imm64,
1471 },
1472 /* load absolute instructions */
1473 [(BPF_LD | BPF_ABS | BPF_B)] = {
1474 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1475 .off = { .min = 0, .max = 0},
1476 .imm = { .min = 0, .max = INT32_MAX},
1477 .eval = eval_ld_mbuf,
1478 },
1479 [(BPF_LD | BPF_ABS | BPF_H)] = {
1480 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1481 .off = { .min = 0, .max = 0},
1482 .imm = { .min = 0, .max = INT32_MAX},
1483 .eval = eval_ld_mbuf,
1484 },
1485 [(BPF_LD | BPF_ABS | BPF_W)] = {
1486 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1487 .off = { .min = 0, .max = 0},
1488 .imm = { .min = 0, .max = INT32_MAX},
1489 .eval = eval_ld_mbuf,
1490 },
1491 /* load indirect instructions */
1492 [(BPF_LD | BPF_IND | BPF_B)] = {
1493 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1494 .off = { .min = 0, .max = 0},
1495 .imm = { .min = 0, .max = UINT32_MAX},
1496 .eval = eval_ld_mbuf,
1497 },
1498 [(BPF_LD | BPF_IND | BPF_H)] = {
1499 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1500 .off = { .min = 0, .max = 0},
1501 .imm = { .min = 0, .max = UINT32_MAX},
1502 .eval = eval_ld_mbuf,
1503 },
1504 [(BPF_LD | BPF_IND | BPF_W)] = {
1505 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1506 .off = { .min = 0, .max = 0},
1507 .imm = { .min = 0, .max = UINT32_MAX},
1508 .eval = eval_ld_mbuf,
1509 },
1510 /* store REG instructions */
1511 [(BPF_STX | BPF_MEM | BPF_B)] = {
1512 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1513 .off = { .min = 0, .max = UINT16_MAX},
1514 .imm = { .min = 0, .max = 0},
1515 .eval = eval_store,
1516 },
1517 [(BPF_STX | BPF_MEM | BPF_H)] = {
1518 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1519 .off = { .min = 0, .max = UINT16_MAX},
1520 .imm = { .min = 0, .max = 0},
1521 .eval = eval_store,
1522 },
1523 [(BPF_STX | BPF_MEM | BPF_W)] = {
1524 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1525 .off = { .min = 0, .max = UINT16_MAX},
1526 .imm = { .min = 0, .max = 0},
1527 .eval = eval_store,
1528 },
1529 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
1530 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1531 .off = { .min = 0, .max = UINT16_MAX},
1532 .imm = { .min = 0, .max = 0},
1533 .eval = eval_store,
1534 },
1535 /* atomic add instructions */
1536 [(BPF_STX | EBPF_XADD | BPF_W)] = {
1537 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1538 .off = { .min = 0, .max = UINT16_MAX},
1539 .imm = { .min = 0, .max = 0},
1540 .eval = eval_store,
1541 },
1542 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1543 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1544 .off = { .min = 0, .max = UINT16_MAX},
1545 .imm = { .min = 0, .max = 0},
1546 .eval = eval_store,
1547 },
1548 /* store IMM instructions */
1549 [(BPF_ST | BPF_MEM | BPF_B)] = {
1550 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1551 .off = { .min = 0, .max = UINT16_MAX},
1552 .imm = { .min = 0, .max = UINT32_MAX},
1553 .eval = eval_store,
1554 },
1555 [(BPF_ST | BPF_MEM | BPF_H)] = {
1556 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1557 .off = { .min = 0, .max = UINT16_MAX},
1558 .imm = { .min = 0, .max = UINT32_MAX},
1559 .eval = eval_store,
1560 },
1561 [(BPF_ST | BPF_MEM | BPF_W)] = {
1562 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1563 .off = { .min = 0, .max = UINT16_MAX},
1564 .imm = { .min = 0, .max = UINT32_MAX},
1565 .eval = eval_store,
1566 },
1567 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
1568 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1569 .off = { .min = 0, .max = UINT16_MAX},
1570 .imm = { .min = 0, .max = UINT32_MAX},
1571 .eval = eval_store,
1572 },
1573 /* jump instruction */
1574 [(BPF_JMP | BPF_JA)] = {
1575 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1576 .off = { .min = 0, .max = UINT16_MAX},
1577 .imm = { .min = 0, .max = 0},
1578 },
1579 /* jcc IMM instructions */
1580 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
1581 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1582 .off = { .min = 0, .max = UINT16_MAX},
1583 .imm = { .min = 0, .max = UINT32_MAX},
1584 .eval = eval_jcc,
1585 },
1586 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
1587 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1588 .off = { .min = 0, .max = UINT16_MAX},
1589 .imm = { .min = 0, .max = UINT32_MAX},
1590 .eval = eval_jcc,
1591 },
1592 [(BPF_JMP | BPF_JGT | BPF_K)] = {
1593 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1594 .off = { .min = 0, .max = UINT16_MAX},
1595 .imm = { .min = 0, .max = UINT32_MAX},
1596 .eval = eval_jcc,
1597 },
1598 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
1599 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1600 .off = { .min = 0, .max = UINT16_MAX},
1601 .imm = { .min = 0, .max = UINT32_MAX},
1602 .eval = eval_jcc,
1603 },
1604 [(BPF_JMP | BPF_JGE | BPF_K)] = {
1605 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1606 .off = { .min = 0, .max = UINT16_MAX},
1607 .imm = { .min = 0, .max = UINT32_MAX},
1608 .eval = eval_jcc,
1609 },
1610 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
1611 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1612 .off = { .min = 0, .max = UINT16_MAX},
1613 .imm = { .min = 0, .max = UINT32_MAX},
1614 .eval = eval_jcc,
1615 },
1616 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1617 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1618 .off = { .min = 0, .max = UINT16_MAX},
1619 .imm = { .min = 0, .max = UINT32_MAX},
1620 .eval = eval_jcc,
1621 },
1622 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1623 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1624 .off = { .min = 0, .max = UINT16_MAX},
1625 .imm = { .min = 0, .max = UINT32_MAX},
1626 .eval = eval_jcc,
1627 },
1628 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1629 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1630 .off = { .min = 0, .max = UINT16_MAX},
1631 .imm = { .min = 0, .max = UINT32_MAX},
1632 .eval = eval_jcc,
1633 },
1634 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1635 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1636 .off = { .min = 0, .max = UINT16_MAX},
1637 .imm = { .min = 0, .max = UINT32_MAX},
1638 .eval = eval_jcc,
1639 },
1640 [(BPF_JMP | BPF_JSET | BPF_K)] = {
1641 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1642 .off = { .min = 0, .max = UINT16_MAX},
1643 .imm = { .min = 0, .max = UINT32_MAX},
1644 .eval = eval_jcc,
1645 },
1646 /* jcc REG instructions */
1647 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
1648 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1649 .off = { .min = 0, .max = UINT16_MAX},
1650 .imm = { .min = 0, .max = 0},
1651 .eval = eval_jcc,
1652 },
1653 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
1654 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1655 .off = { .min = 0, .max = UINT16_MAX},
1656 .imm = { .min = 0, .max = 0},
1657 .eval = eval_jcc,
1658 },
1659 [(BPF_JMP | BPF_JGT | BPF_X)] = {
1660 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1661 .off = { .min = 0, .max = UINT16_MAX},
1662 .imm = { .min = 0, .max = 0},
1663 .eval = eval_jcc,
1664 },
1665 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
1666 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1667 .off = { .min = 0, .max = UINT16_MAX},
1668 .imm = { .min = 0, .max = 0},
1669 .eval = eval_jcc,
1670 },
1671 [(BPF_JMP | BPF_JGE | BPF_X)] = {
1672 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1673 .off = { .min = 0, .max = UINT16_MAX},
1674 .imm = { .min = 0, .max = 0},
1675 .eval = eval_jcc,
1676 },
1677 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
1678 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1679 .off = { .min = 0, .max = UINT16_MAX},
1680 .imm = { .min = 0, .max = 0},
1681 .eval = eval_jcc,
1682 },
1683 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1684 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1685 .off = { .min = 0, .max = UINT16_MAX},
1686 .imm = { .min = 0, .max = 0},
1687 .eval = eval_jcc,
1688 },
1689 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1690 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1691 .off = { .min = 0, .max = UINT16_MAX},
1692 .imm = { .min = 0, .max = 0},
1693 },
1694 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1695 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1696 .off = { .min = 0, .max = UINT16_MAX},
1697 .imm = { .min = 0, .max = 0},
1698 .eval = eval_jcc,
1699 },
1700 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1701 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1702 .off = { .min = 0, .max = UINT16_MAX},
1703 .imm = { .min = 0, .max = 0},
1704 .eval = eval_jcc,
1705 },
1706 [(BPF_JMP | BPF_JSET | BPF_X)] = {
1707 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1708 .off = { .min = 0, .max = UINT16_MAX},
1709 .imm = { .min = 0, .max = 0},
1710 .eval = eval_jcc,
1711 },
1712 /* call instruction */
1713 [(BPF_JMP | EBPF_CALL)] = {
1714 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1715 .off = { .min = 0, .max = 0},
1716 .imm = { .min = 0, .max = UINT32_MAX},
1717 .eval = eval_call,
1718 },
1719 /* ret instruction */
1720 [(BPF_JMP | EBPF_EXIT)] = {
1721 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1722 .off = { .min = 0, .max = 0},
1723 .imm = { .min = 0, .max = 0},
1724 .eval = eval_exit,
1725 },
1726 };
1727
1728 /*
1729 * make sure that instruction syntax is valid,
1730 * and its fields don't violate particular instruction type restrictions.
1731 */
1732 static const char *
check_syntax(const struct ebpf_insn * ins)1733 check_syntax(const struct ebpf_insn *ins)
1734 {
1735
1736 uint8_t op;
1737 uint16_t off;
1738 uint32_t imm;
1739
1740 op = ins->code;
1741
1742 if (ins_chk[op].mask.dreg == 0)
1743 return "invalid opcode";
1744
1745 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1746 return "invalid dst-reg field";
1747
1748 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1749 return "invalid src-reg field";
1750
1751 off = ins->off;
1752 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1753 return "invalid off field";
1754
1755 imm = ins->imm;
1756 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1757 return "invalid imm field";
1758
1759 if (ins_chk[op].check != NULL)
1760 return ins_chk[op].check(ins);
1761
1762 return NULL;
1763 }
1764
1765 /*
1766 * helper function, return instruction index for the given node.
1767 */
1768 static uint32_t
get_node_idx(const struct bpf_verifier * bvf,const struct inst_node * node)1769 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1770 {
1771 return node - bvf->in;
1772 }
1773
1774 /*
1775 * helper function, used to walk through constructed CFG.
1776 */
1777 static struct inst_node *
get_next_node(struct bpf_verifier * bvf,struct inst_node * node)1778 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1779 {
1780 uint32_t ce, ne, dst;
1781
1782 ne = node->nb_edge;
1783 ce = node->cur_edge;
1784 if (ce == ne)
1785 return NULL;
1786
1787 node->cur_edge++;
1788 dst = node->edge_dest[ce];
1789 return bvf->in + dst;
1790 }
1791
1792 static void
set_node_colour(struct bpf_verifier * bvf,struct inst_node * node,uint32_t new)1793 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1794 uint32_t new)
1795 {
1796 uint32_t prev;
1797
1798 prev = node->colour;
1799 node->colour = new;
1800
1801 bvf->node_colour[prev]--;
1802 bvf->node_colour[new]++;
1803 }
1804
1805 /*
1806 * helper function, add new edge between two nodes.
1807 */
1808 static int
add_edge(struct bpf_verifier * bvf,struct inst_node * node,uint32_t nidx)1809 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1810 {
1811 uint32_t ne;
1812
1813 if (nidx > bvf->prm->nb_ins) {
1814 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1815 "next pc: %u\n",
1816 __func__, get_node_idx(bvf, node), nidx);
1817 return -EINVAL;
1818 }
1819
1820 ne = node->nb_edge;
1821 if (ne >= RTE_DIM(node->edge_dest)) {
1822 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1823 __func__, get_node_idx(bvf, node));
1824 return -EINVAL;
1825 }
1826
1827 node->edge_dest[ne] = nidx;
1828 node->nb_edge = ne + 1;
1829 return 0;
1830 }
1831
1832 /*
1833 * helper function, determine type of edge between two nodes.
1834 */
1835 static void
set_edge_type(struct bpf_verifier * bvf,struct inst_node * node,const struct inst_node * next)1836 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1837 const struct inst_node *next)
1838 {
1839 uint32_t ce, clr, type;
1840
1841 ce = node->cur_edge - 1;
1842 clr = next->colour;
1843
1844 type = UNKNOWN_EDGE;
1845
1846 if (clr == WHITE)
1847 type = TREE_EDGE;
1848 else if (clr == GREY)
1849 type = BACK_EDGE;
1850 else if (clr == BLACK)
1851 /*
1852 * in fact it could be either direct or cross edge,
1853 * but for now, we don't need to distinguish between them.
1854 */
1855 type = CROSS_EDGE;
1856
1857 node->edge_type[ce] = type;
1858 bvf->edge_type[type]++;
1859 }
1860
1861 static struct inst_node *
get_prev_node(struct bpf_verifier * bvf,struct inst_node * node)1862 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1863 {
1864 return bvf->in + node->prev_node;
1865 }
1866
1867 /*
1868 * Depth-First Search (DFS) through previously constructed
1869 * Control Flow Graph (CFG).
1870 * Information collected at this path would be used later
1871 * to determine is there any loops, and/or unreachable instructions.
1872 */
1873 static void
dfs(struct bpf_verifier * bvf)1874 dfs(struct bpf_verifier *bvf)
1875 {
1876 struct inst_node *next, *node;
1877
1878 node = bvf->in;
1879 while (node != NULL) {
1880
1881 if (node->colour == WHITE)
1882 set_node_colour(bvf, node, GREY);
1883
1884 if (node->colour == GREY) {
1885
1886 /* find next unprocessed child node */
1887 do {
1888 next = get_next_node(bvf, node);
1889 if (next == NULL)
1890 break;
1891 set_edge_type(bvf, node, next);
1892 } while (next->colour != WHITE);
1893
1894 if (next != NULL) {
1895 /* proceed with next child */
1896 next->prev_node = get_node_idx(bvf, node);
1897 node = next;
1898 } else {
1899 /*
1900 * finished with current node and all it's kids,
1901 * proceed with parent
1902 */
1903 set_node_colour(bvf, node, BLACK);
1904 node->cur_edge = 0;
1905 node = get_prev_node(bvf, node);
1906 }
1907 } else
1908 node = NULL;
1909 }
1910 }
1911
1912 /*
1913 * report unreachable instructions.
1914 */
1915 static void
log_unreachable(const struct bpf_verifier * bvf)1916 log_unreachable(const struct bpf_verifier *bvf)
1917 {
1918 uint32_t i;
1919 struct inst_node *node;
1920 const struct ebpf_insn *ins;
1921
1922 for (i = 0; i != bvf->prm->nb_ins; i++) {
1923
1924 node = bvf->in + i;
1925 ins = bvf->prm->ins + i;
1926
1927 if (node->colour == WHITE &&
1928 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1929 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1930 }
1931 }
1932
1933 /*
1934 * report loops detected.
1935 */
1936 static void
log_loop(const struct bpf_verifier * bvf)1937 log_loop(const struct bpf_verifier *bvf)
1938 {
1939 uint32_t i, j;
1940 struct inst_node *node;
1941
1942 for (i = 0; i != bvf->prm->nb_ins; i++) {
1943
1944 node = bvf->in + i;
1945 if (node->colour != BLACK)
1946 continue;
1947
1948 for (j = 0; j != node->nb_edge; j++) {
1949 if (node->edge_type[j] == BACK_EDGE)
1950 RTE_BPF_LOG(ERR,
1951 "loop at pc:%u --> pc:%u;\n",
1952 i, node->edge_dest[j]);
1953 }
1954 }
1955 }
1956
1957 /*
1958 * First pass goes though all instructions in the set, checks that each
1959 * instruction is a valid one (correct syntax, valid field values, etc.)
1960 * and constructs control flow graph (CFG).
1961 * Then depth-first search is performed over the constructed graph.
1962 * Programs with unreachable instructions and/or loops will be rejected.
1963 */
1964 static int
validate(struct bpf_verifier * bvf)1965 validate(struct bpf_verifier *bvf)
1966 {
1967 int32_t rc;
1968 uint32_t i;
1969 struct inst_node *node;
1970 const struct ebpf_insn *ins;
1971 const char *err;
1972
1973 rc = 0;
1974 for (i = 0; i < bvf->prm->nb_ins; i++) {
1975
1976 ins = bvf->prm->ins + i;
1977 node = bvf->in + i;
1978
1979 err = check_syntax(ins);
1980 if (err != 0) {
1981 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1982 __func__, err, i);
1983 rc |= -EINVAL;
1984 }
1985
1986 /*
1987 * construct CFG, jcc nodes have to outgoing edges,
1988 * 'exit' nodes - none, all other nodes have exactly one
1989 * outgoing edge.
1990 */
1991 switch (ins->code) {
1992 case (BPF_JMP | EBPF_EXIT):
1993 break;
1994 case (BPF_JMP | BPF_JEQ | BPF_K):
1995 case (BPF_JMP | EBPF_JNE | BPF_K):
1996 case (BPF_JMP | BPF_JGT | BPF_K):
1997 case (BPF_JMP | EBPF_JLT | BPF_K):
1998 case (BPF_JMP | BPF_JGE | BPF_K):
1999 case (BPF_JMP | EBPF_JLE | BPF_K):
2000 case (BPF_JMP | EBPF_JSGT | BPF_K):
2001 case (BPF_JMP | EBPF_JSLT | BPF_K):
2002 case (BPF_JMP | EBPF_JSGE | BPF_K):
2003 case (BPF_JMP | EBPF_JSLE | BPF_K):
2004 case (BPF_JMP | BPF_JSET | BPF_K):
2005 case (BPF_JMP | BPF_JEQ | BPF_X):
2006 case (BPF_JMP | EBPF_JNE | BPF_X):
2007 case (BPF_JMP | BPF_JGT | BPF_X):
2008 case (BPF_JMP | EBPF_JLT | BPF_X):
2009 case (BPF_JMP | BPF_JGE | BPF_X):
2010 case (BPF_JMP | EBPF_JLE | BPF_X):
2011 case (BPF_JMP | EBPF_JSGT | BPF_X):
2012 case (BPF_JMP | EBPF_JSLT | BPF_X):
2013 case (BPF_JMP | EBPF_JSGE | BPF_X):
2014 case (BPF_JMP | EBPF_JSLE | BPF_X):
2015 case (BPF_JMP | BPF_JSET | BPF_X):
2016 rc |= add_edge(bvf, node, i + ins->off + 1);
2017 rc |= add_edge(bvf, node, i + 1);
2018 bvf->nb_jcc_nodes++;
2019 break;
2020 case (BPF_JMP | BPF_JA):
2021 rc |= add_edge(bvf, node, i + ins->off + 1);
2022 break;
2023 /* load 64 bit immediate value */
2024 case (BPF_LD | BPF_IMM | EBPF_DW):
2025 rc |= add_edge(bvf, node, i + 2);
2026 i++;
2027 break;
2028 case (BPF_LD | BPF_ABS | BPF_B):
2029 case (BPF_LD | BPF_ABS | BPF_H):
2030 case (BPF_LD | BPF_ABS | BPF_W):
2031 case (BPF_LD | BPF_IND | BPF_B):
2032 case (BPF_LD | BPF_IND | BPF_H):
2033 case (BPF_LD | BPF_IND | BPF_W):
2034 bvf->nb_ldmb_nodes++;
2035 /* fallthrough */
2036 default:
2037 rc |= add_edge(bvf, node, i + 1);
2038 break;
2039 }
2040
2041 bvf->nb_nodes++;
2042 bvf->node_colour[WHITE]++;
2043 }
2044
2045 if (rc != 0)
2046 return rc;
2047
2048 dfs(bvf);
2049
2050 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
2051 "nb_nodes=%u;\n"
2052 "nb_jcc_nodes=%u;\n"
2053 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
2054 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
2055 __func__, bvf,
2056 bvf->nb_nodes,
2057 bvf->nb_jcc_nodes,
2058 bvf->node_colour[WHITE], bvf->node_colour[GREY],
2059 bvf->node_colour[BLACK],
2060 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
2061 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
2062
2063 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
2064 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
2065 __func__, bvf);
2066 log_unreachable(bvf);
2067 return -EINVAL;
2068 }
2069
2070 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
2071 bvf->edge_type[UNKNOWN_EDGE] != 0) {
2072 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
2073 __func__, bvf);
2074 return -EINVAL;
2075 }
2076
2077 if (bvf->edge_type[BACK_EDGE] != 0) {
2078 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
2079 __func__, bvf);
2080 log_loop(bvf);
2081 return -EINVAL;
2082 }
2083
2084 return 0;
2085 }
2086
2087 /*
2088 * helper functions get/free eval states.
2089 */
2090 static struct bpf_eval_state *
pull_eval_state(struct bpf_verifier * bvf)2091 pull_eval_state(struct bpf_verifier *bvf)
2092 {
2093 uint32_t n;
2094
2095 n = bvf->evst_pool.cur;
2096 if (n == bvf->evst_pool.num)
2097 return NULL;
2098
2099 bvf->evst_pool.cur = n + 1;
2100 return bvf->evst_pool.ent + n;
2101 }
2102
2103 static void
push_eval_state(struct bpf_verifier * bvf)2104 push_eval_state(struct bpf_verifier *bvf)
2105 {
2106 bvf->evst_pool.cur--;
2107 }
2108
2109 static void
evst_pool_fini(struct bpf_verifier * bvf)2110 evst_pool_fini(struct bpf_verifier *bvf)
2111 {
2112 bvf->evst = NULL;
2113 free(bvf->evst_pool.ent);
2114 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2115 }
2116
2117 static int
evst_pool_init(struct bpf_verifier * bvf)2118 evst_pool_init(struct bpf_verifier *bvf)
2119 {
2120 uint32_t n;
2121
2122 n = bvf->nb_jcc_nodes + 1;
2123
2124 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2125 if (bvf->evst_pool.ent == NULL)
2126 return -ENOMEM;
2127
2128 bvf->evst_pool.num = n;
2129 bvf->evst_pool.cur = 0;
2130
2131 bvf->evst = pull_eval_state(bvf);
2132 return 0;
2133 }
2134
2135 /*
2136 * Save current eval state.
2137 */
2138 static int
save_eval_state(struct bpf_verifier * bvf,struct inst_node * node)2139 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2140 {
2141 struct bpf_eval_state *st;
2142
2143 /* get new eval_state for this node */
2144 st = pull_eval_state(bvf);
2145 if (st == NULL) {
2146 RTE_BPF_LOG(ERR,
2147 "%s: internal error (out of space) at pc: %u\n",
2148 __func__, get_node_idx(bvf, node));
2149 return -ENOMEM;
2150 }
2151
2152 /* make a copy of current state */
2153 memcpy(st, bvf->evst, sizeof(*st));
2154
2155 /* swap current state with new one */
2156 node->evst = bvf->evst;
2157 bvf->evst = st;
2158
2159 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2160 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2161
2162 return 0;
2163 }
2164
2165 /*
2166 * Restore previous eval state and mark current eval state as free.
2167 */
2168 static void
restore_eval_state(struct bpf_verifier * bvf,struct inst_node * node)2169 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2170 {
2171 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2172 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2173
2174 bvf->evst = node->evst;
2175 node->evst = NULL;
2176 push_eval_state(bvf);
2177 }
2178
2179 static void
log_eval_state(const struct bpf_verifier * bvf,const struct ebpf_insn * ins,uint32_t pc,int32_t loglvl)2180 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2181 uint32_t pc, int32_t loglvl)
2182 {
2183 const struct bpf_eval_state *st;
2184 const struct bpf_reg_val *rv;
2185
2186 rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2187
2188 st = bvf->evst;
2189 rv = st->rv + ins->dst_reg;
2190
2191 rte_log(loglvl, rte_bpf_logtype,
2192 "r%u={\n"
2193 "\tv={type=%u, size=%zu},\n"
2194 "\tmask=0x%" PRIx64 ",\n"
2195 "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2196 "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2197 "};\n",
2198 ins->dst_reg,
2199 rv->v.type, rv->v.size,
2200 rv->mask,
2201 rv->u.min, rv->u.max,
2202 rv->s.min, rv->s.max);
2203 }
2204
2205 /*
2206 * Do second pass through CFG and try to evaluate instructions
2207 * via each possible path.
2208 * Right now evaluation functionality is quite limited.
2209 * Still need to add extra checks for:
2210 * - use/return uninitialized registers.
2211 * - use uninitialized data from the stack.
2212 * - memory boundaries violation.
2213 */
2214 static int
evaluate(struct bpf_verifier * bvf)2215 evaluate(struct bpf_verifier *bvf)
2216 {
2217 int32_t rc;
2218 uint32_t idx, op;
2219 const char *err;
2220 const struct ebpf_insn *ins;
2221 struct inst_node *next, *node;
2222
2223 /* initial state of frame pointer */
2224 static const struct bpf_reg_val rvfp = {
2225 .v = {
2226 .type = BPF_ARG_PTR_STACK,
2227 .size = MAX_BPF_STACK_SIZE,
2228 },
2229 .mask = UINT64_MAX,
2230 .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2231 .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2232 };
2233
2234 bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2235 bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2236 if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2237 eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2238
2239 bvf->evst->rv[EBPF_REG_10] = rvfp;
2240
2241 ins = bvf->prm->ins;
2242 node = bvf->in;
2243 next = node;
2244 rc = 0;
2245
2246 while (node != NULL && rc == 0) {
2247
2248 /*
2249 * current node evaluation, make sure we evaluate
2250 * each node only once.
2251 */
2252 if (next != NULL) {
2253
2254 bvf->evin = node;
2255 idx = get_node_idx(bvf, node);
2256 op = ins[idx].code;
2257
2258 /* for jcc node make a copy of evaluation state */
2259 if (node->nb_edge > 1)
2260 rc |= save_eval_state(bvf, node);
2261
2262 if (ins_chk[op].eval != NULL && rc == 0) {
2263 err = ins_chk[op].eval(bvf, ins + idx);
2264 if (err != NULL) {
2265 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2266 __func__, err, idx);
2267 rc = -EINVAL;
2268 }
2269 }
2270
2271 log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2272 bvf->evin = NULL;
2273 }
2274
2275 /* proceed through CFG */
2276 next = get_next_node(bvf, node);
2277 if (next != NULL) {
2278
2279 /* proceed with next child */
2280 if (node->cur_edge == node->nb_edge &&
2281 node->evst != NULL)
2282 restore_eval_state(bvf, node);
2283
2284 next->prev_node = get_node_idx(bvf, node);
2285 node = next;
2286 } else {
2287 /*
2288 * finished with current node and all it's kids,
2289 * proceed with parent
2290 */
2291 node->cur_edge = 0;
2292 node = get_prev_node(bvf, node);
2293
2294 /* finished */
2295 if (node == bvf->in)
2296 node = NULL;
2297 }
2298 }
2299
2300 return rc;
2301 }
2302
2303 int
bpf_validate(struct rte_bpf * bpf)2304 bpf_validate(struct rte_bpf *bpf)
2305 {
2306 int32_t rc;
2307 struct bpf_verifier bvf;
2308
2309 /* check input argument type, don't allow mbuf ptr on 32-bit */
2310 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2311 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2312 (sizeof(uint64_t) != sizeof(uintptr_t) ||
2313 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2314 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2315 return -ENOTSUP;
2316 }
2317
2318 memset(&bvf, 0, sizeof(bvf));
2319 bvf.prm = &bpf->prm;
2320 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2321 if (bvf.in == NULL)
2322 return -ENOMEM;
2323
2324 rc = validate(&bvf);
2325
2326 if (rc == 0) {
2327 rc = evst_pool_init(&bvf);
2328 if (rc == 0)
2329 rc = evaluate(&bvf);
2330 evst_pool_fini(&bvf);
2331 }
2332
2333 free(bvf.in);
2334
2335 /* copy collected info */
2336 if (rc == 0) {
2337 bpf->stack_sz = bvf.stack_sz;
2338
2339 /* for LD_ABS/LD_IND, we'll need extra space on the stack */
2340 if (bvf.nb_ldmb_nodes != 0)
2341 bpf->stack_sz = RTE_ALIGN_CEIL(bpf->stack_sz +
2342 sizeof(uint64_t), sizeof(uint64_t));
2343 }
2344
2345 return rc;
2346 }
2347