1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_byteorder.h>
16 #include <rte_errno.h>
17 #include "test.h"
18
19 #if !defined(RTE_LIB_BPF)
20
21 static int
test_bpf(void)22 test_bpf(void)
23 {
24 printf("BPF not supported, skipping test\n");
25 return TEST_SKIPPED;
26 }
27
28 #else
29
30 #include <rte_bpf.h>
31 #include <rte_ether.h>
32 #include <rte_ip.h>
33
34
35 /*
36 * Basic functional tests for librte_bpf.
37 * The main procedure - load eBPF program, execute it and
38 * compare results with expected values.
39 */
40
41 struct dummy_offset {
42 uint64_t u64;
43 uint32_t u32;
44 uint16_t u16;
45 uint8_t u8;
46 };
47
48 struct dummy_vect8 {
49 struct dummy_offset in[8];
50 struct dummy_offset out[8];
51 };
52
53 struct dummy_net {
54 struct rte_ether_hdr eth_hdr;
55 struct rte_vlan_hdr vlan_hdr;
56 struct rte_ipv4_hdr ip_hdr;
57 };
58
59 #define DUMMY_MBUF_NUM 2
60
61 /* first mbuf in the packet, should always be at offset 0 */
62 struct dummy_mbuf {
63 struct rte_mbuf mb[DUMMY_MBUF_NUM];
64 uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
65 };
66
67 #define TEST_FILL_1 0xDEADBEEF
68
69 #define TEST_MUL_1 21
70 #define TEST_MUL_2 -100
71
72 #define TEST_SHIFT_1 15
73 #define TEST_SHIFT_2 33
74
75 #define TEST_SHIFT32_MASK (CHAR_BIT * sizeof(uint32_t) - 1)
76 #define TEST_SHIFT64_MASK (CHAR_BIT * sizeof(uint64_t) - 1)
77
78 #define TEST_JCC_1 0
79 #define TEST_JCC_2 -123
80 #define TEST_JCC_3 5678
81 #define TEST_JCC_4 TEST_FILL_1
82
83 #define TEST_IMM_1 UINT64_MAX
84 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
85 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
86 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
87 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
88
89 #define TEST_MEMFROB 0x2a2a2a2a
90
91 #define STRING_GEEK 0x6B656567
92 #define STRING_WEEK 0x6B656577
93
94 #define TEST_NETMASK 0xffffff00
95 #define TEST_SUBNET 0xaca80200
96
97 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
98 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
99
100 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
101 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
102
103 struct bpf_test {
104 const char *name;
105 size_t arg_sz;
106 struct rte_bpf_prm prm;
107 void (*prepare)(void *);
108 int (*check_result)(uint64_t, const void *);
109 uint32_t allow_fail;
110 };
111
112 /*
113 * Compare return value and result data with expected ones.
114 * Report a failure if they don't match.
115 */
116 static int
cmp_res(const char * func,uint64_t exp_rc,uint64_t ret_rc,const void * exp_res,const void * ret_res,size_t res_sz)117 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
118 const void *exp_res, const void *ret_res, size_t res_sz)
119 {
120 int32_t ret;
121
122 ret = 0;
123 if (exp_rc != ret_rc) {
124 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
125 ",result: 0x%" PRIx64 "\n",
126 func, __LINE__, exp_rc, ret_rc);
127 ret |= -1;
128 }
129
130 if (memcmp(exp_res, ret_res, res_sz) != 0) {
131 printf("%s: invalid value\n", func);
132 rte_memdump(stdout, "expected", exp_res, res_sz);
133 rte_memdump(stdout, "result", ret_res, res_sz);
134 ret |= -1;
135 }
136
137 return ret;
138 }
139
140 /* store immediate test-cases */
141 static const struct ebpf_insn test_store1_prog[] = {
142 {
143 .code = (BPF_ST | BPF_MEM | BPF_B),
144 .dst_reg = EBPF_REG_1,
145 .off = offsetof(struct dummy_offset, u8),
146 .imm = TEST_FILL_1,
147 },
148 {
149 .code = (BPF_ST | BPF_MEM | BPF_H),
150 .dst_reg = EBPF_REG_1,
151 .off = offsetof(struct dummy_offset, u16),
152 .imm = TEST_FILL_1,
153 },
154 {
155 .code = (BPF_ST | BPF_MEM | BPF_W),
156 .dst_reg = EBPF_REG_1,
157 .off = offsetof(struct dummy_offset, u32),
158 .imm = TEST_FILL_1,
159 },
160 {
161 .code = (BPF_ST | BPF_MEM | EBPF_DW),
162 .dst_reg = EBPF_REG_1,
163 .off = offsetof(struct dummy_offset, u64),
164 .imm = TEST_FILL_1,
165 },
166 /* return 1 */
167 {
168 .code = (BPF_ALU | EBPF_MOV | BPF_K),
169 .dst_reg = EBPF_REG_0,
170 .imm = 1,
171 },
172 {
173 .code = (BPF_JMP | EBPF_EXIT),
174 },
175 };
176
177 static void
test_store1_prepare(void * arg)178 test_store1_prepare(void *arg)
179 {
180 struct dummy_offset *df;
181
182 df = arg;
183 memset(df, 0, sizeof(*df));
184 }
185
186 static int
test_store1_check(uint64_t rc,const void * arg)187 test_store1_check(uint64_t rc, const void *arg)
188 {
189 const struct dummy_offset *dft;
190 struct dummy_offset dfe;
191
192 dft = arg;
193
194 memset(&dfe, 0, sizeof(dfe));
195 dfe.u64 = (int32_t)TEST_FILL_1;
196 dfe.u32 = dfe.u64;
197 dfe.u16 = dfe.u64;
198 dfe.u8 = dfe.u64;
199
200 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
201 }
202
203 /* store register test-cases */
204 static const struct ebpf_insn test_store2_prog[] = {
205
206 {
207 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
208 .dst_reg = EBPF_REG_2,
209 .imm = TEST_FILL_1,
210 },
211 {
212 .code = (BPF_STX | BPF_MEM | BPF_B),
213 .dst_reg = EBPF_REG_1,
214 .src_reg = EBPF_REG_2,
215 .off = offsetof(struct dummy_offset, u8),
216 },
217 {
218 .code = (BPF_STX | BPF_MEM | BPF_H),
219 .dst_reg = EBPF_REG_1,
220 .src_reg = EBPF_REG_2,
221 .off = offsetof(struct dummy_offset, u16),
222 },
223 {
224 .code = (BPF_STX | BPF_MEM | BPF_W),
225 .dst_reg = EBPF_REG_1,
226 .src_reg = EBPF_REG_2,
227 .off = offsetof(struct dummy_offset, u32),
228 },
229 {
230 .code = (BPF_STX | BPF_MEM | EBPF_DW),
231 .dst_reg = EBPF_REG_1,
232 .src_reg = EBPF_REG_2,
233 .off = offsetof(struct dummy_offset, u64),
234 },
235 /* return 1 */
236 {
237 .code = (BPF_ALU | EBPF_MOV | BPF_K),
238 .dst_reg = EBPF_REG_0,
239 .imm = 1,
240 },
241 {
242 .code = (BPF_JMP | EBPF_EXIT),
243 },
244 };
245
246 /* load test-cases */
247 static const struct ebpf_insn test_load1_prog[] = {
248
249 {
250 .code = (BPF_LDX | BPF_MEM | BPF_B),
251 .dst_reg = EBPF_REG_2,
252 .src_reg = EBPF_REG_1,
253 .off = offsetof(struct dummy_offset, u8),
254 },
255 {
256 .code = (BPF_LDX | BPF_MEM | BPF_H),
257 .dst_reg = EBPF_REG_3,
258 .src_reg = EBPF_REG_1,
259 .off = offsetof(struct dummy_offset, u16),
260 },
261 {
262 .code = (BPF_LDX | BPF_MEM | BPF_W),
263 .dst_reg = EBPF_REG_4,
264 .src_reg = EBPF_REG_1,
265 .off = offsetof(struct dummy_offset, u32),
266 },
267 {
268 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
269 .dst_reg = EBPF_REG_0,
270 .src_reg = EBPF_REG_1,
271 .off = offsetof(struct dummy_offset, u64),
272 },
273 /* return sum */
274 {
275 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
276 .dst_reg = EBPF_REG_0,
277 .src_reg = EBPF_REG_4,
278 },
279 {
280 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
281 .dst_reg = EBPF_REG_0,
282 .src_reg = EBPF_REG_3,
283 },
284 {
285 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
286 .dst_reg = EBPF_REG_0,
287 .src_reg = EBPF_REG_2,
288 },
289 {
290 .code = (BPF_JMP | EBPF_EXIT),
291 },
292 };
293
294 static void
test_load1_prepare(void * arg)295 test_load1_prepare(void *arg)
296 {
297 struct dummy_offset *df;
298
299 df = arg;
300
301 memset(df, 0, sizeof(*df));
302 df->u64 = (int32_t)TEST_FILL_1;
303 df->u32 = df->u64;
304 df->u16 = df->u64;
305 df->u8 = df->u64;
306 }
307
308 static int
test_load1_check(uint64_t rc,const void * arg)309 test_load1_check(uint64_t rc, const void *arg)
310 {
311 uint64_t v;
312 const struct dummy_offset *dft;
313
314 dft = arg;
315 v = dft->u64;
316 v += dft->u32;
317 v += dft->u16;
318 v += dft->u8;
319
320 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
321 }
322
323 /* load immediate test-cases */
324 static const struct ebpf_insn test_ldimm1_prog[] = {
325
326 {
327 .code = (BPF_LD | BPF_IMM | EBPF_DW),
328 .dst_reg = EBPF_REG_0,
329 .imm = (uint32_t)TEST_IMM_1,
330 },
331 {
332 .imm = TEST_IMM_1 >> 32,
333 },
334 {
335 .code = (BPF_LD | BPF_IMM | EBPF_DW),
336 .dst_reg = EBPF_REG_3,
337 .imm = (uint32_t)TEST_IMM_2,
338 },
339 {
340 .imm = TEST_IMM_2 >> 32,
341 },
342 {
343 .code = (BPF_LD | BPF_IMM | EBPF_DW),
344 .dst_reg = EBPF_REG_5,
345 .imm = (uint32_t)TEST_IMM_3,
346 },
347 {
348 .imm = TEST_IMM_3 >> 32,
349 },
350 {
351 .code = (BPF_LD | BPF_IMM | EBPF_DW),
352 .dst_reg = EBPF_REG_7,
353 .imm = (uint32_t)TEST_IMM_4,
354 },
355 {
356 .imm = TEST_IMM_4 >> 32,
357 },
358 {
359 .code = (BPF_LD | BPF_IMM | EBPF_DW),
360 .dst_reg = EBPF_REG_9,
361 .imm = (uint32_t)TEST_IMM_5,
362 },
363 {
364 .imm = TEST_IMM_5 >> 32,
365 },
366 /* return sum */
367 {
368 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
369 .dst_reg = EBPF_REG_0,
370 .src_reg = EBPF_REG_3,
371 },
372 {
373 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
374 .dst_reg = EBPF_REG_0,
375 .src_reg = EBPF_REG_5,
376 },
377 {
378 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
379 .dst_reg = EBPF_REG_0,
380 .src_reg = EBPF_REG_7,
381 },
382 {
383 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
384 .dst_reg = EBPF_REG_0,
385 .src_reg = EBPF_REG_9,
386 },
387 {
388 .code = (BPF_JMP | EBPF_EXIT),
389 },
390 };
391
392 static int
test_ldimm1_check(uint64_t rc,const void * arg)393 test_ldimm1_check(uint64_t rc, const void *arg)
394 {
395 uint64_t v1, v2;
396
397 v1 = TEST_IMM_1;
398 v2 = TEST_IMM_2;
399 v1 += v2;
400 v2 = TEST_IMM_3;
401 v1 += v2;
402 v2 = TEST_IMM_4;
403 v1 += v2;
404 v2 = TEST_IMM_5;
405 v1 += v2;
406
407 return cmp_res(__func__, v1, rc, arg, arg, 0);
408 }
409
410
411 /* alu mul test-cases */
412 static const struct ebpf_insn test_mul1_prog[] = {
413
414 {
415 .code = (BPF_LDX | BPF_MEM | BPF_W),
416 .dst_reg = EBPF_REG_2,
417 .src_reg = EBPF_REG_1,
418 .off = offsetof(struct dummy_vect8, in[0].u32),
419 },
420 {
421 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
422 .dst_reg = EBPF_REG_3,
423 .src_reg = EBPF_REG_1,
424 .off = offsetof(struct dummy_vect8, in[1].u64),
425 },
426 {
427 .code = (BPF_LDX | BPF_MEM | BPF_W),
428 .dst_reg = EBPF_REG_4,
429 .src_reg = EBPF_REG_1,
430 .off = offsetof(struct dummy_vect8, in[2].u32),
431 },
432 {
433 .code = (BPF_ALU | BPF_MUL | BPF_K),
434 .dst_reg = EBPF_REG_2,
435 .imm = TEST_MUL_1,
436 },
437 {
438 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
439 .dst_reg = EBPF_REG_3,
440 .imm = TEST_MUL_2,
441 },
442 {
443 .code = (BPF_ALU | BPF_MUL | BPF_X),
444 .dst_reg = EBPF_REG_4,
445 .src_reg = EBPF_REG_2,
446 },
447 {
448 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
449 .dst_reg = EBPF_REG_4,
450 .src_reg = EBPF_REG_3,
451 },
452 {
453 .code = (BPF_STX | BPF_MEM | EBPF_DW),
454 .dst_reg = EBPF_REG_1,
455 .src_reg = EBPF_REG_2,
456 .off = offsetof(struct dummy_vect8, out[0].u64),
457 },
458 {
459 .code = (BPF_STX | BPF_MEM | EBPF_DW),
460 .dst_reg = EBPF_REG_1,
461 .src_reg = EBPF_REG_3,
462 .off = offsetof(struct dummy_vect8, out[1].u64),
463 },
464 {
465 .code = (BPF_STX | BPF_MEM | EBPF_DW),
466 .dst_reg = EBPF_REG_1,
467 .src_reg = EBPF_REG_4,
468 .off = offsetof(struct dummy_vect8, out[2].u64),
469 },
470 /* return 1 */
471 {
472 .code = (BPF_ALU | EBPF_MOV | BPF_K),
473 .dst_reg = EBPF_REG_0,
474 .imm = 1,
475 },
476 {
477 .code = (BPF_JMP | EBPF_EXIT),
478 },
479 };
480
481 static void
test_mul1_prepare(void * arg)482 test_mul1_prepare(void *arg)
483 {
484 struct dummy_vect8 *dv;
485 uint64_t v;
486
487 dv = arg;
488
489 v = rte_rand();
490
491 memset(dv, 0, sizeof(*dv));
492 dv->in[0].u32 = v;
493 dv->in[1].u64 = v << 12 | v >> 6;
494 dv->in[2].u32 = -v;
495 }
496
497 static int
test_mul1_check(uint64_t rc,const void * arg)498 test_mul1_check(uint64_t rc, const void *arg)
499 {
500 uint64_t r2, r3, r4;
501 const struct dummy_vect8 *dvt;
502 struct dummy_vect8 dve;
503
504 dvt = arg;
505 memset(&dve, 0, sizeof(dve));
506
507 r2 = dvt->in[0].u32;
508 r3 = dvt->in[1].u64;
509 r4 = dvt->in[2].u32;
510
511 r2 = (uint32_t)r2 * TEST_MUL_1;
512 r3 *= TEST_MUL_2;
513 r4 = (uint32_t)(r4 * r2);
514 r4 *= r3;
515
516 dve.out[0].u64 = r2;
517 dve.out[1].u64 = r3;
518 dve.out[2].u64 = r4;
519
520 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
521 }
522
523 /* alu shift test-cases */
524 static const struct ebpf_insn test_shift1_prog[] = {
525
526 {
527 .code = (BPF_LDX | BPF_MEM | BPF_W),
528 .dst_reg = EBPF_REG_2,
529 .src_reg = EBPF_REG_1,
530 .off = offsetof(struct dummy_vect8, in[0].u32),
531 },
532 {
533 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
534 .dst_reg = EBPF_REG_3,
535 .src_reg = EBPF_REG_1,
536 .off = offsetof(struct dummy_vect8, in[1].u64),
537 },
538 {
539 .code = (BPF_LDX | BPF_MEM | BPF_W),
540 .dst_reg = EBPF_REG_4,
541 .src_reg = EBPF_REG_1,
542 .off = offsetof(struct dummy_vect8, in[2].u32),
543 },
544 {
545 .code = (BPF_ALU | BPF_LSH | BPF_K),
546 .dst_reg = EBPF_REG_2,
547 .imm = TEST_SHIFT_1,
548 },
549 {
550 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
551 .dst_reg = EBPF_REG_3,
552 .imm = TEST_SHIFT_2,
553 },
554 {
555 .code = (BPF_STX | BPF_MEM | EBPF_DW),
556 .dst_reg = EBPF_REG_1,
557 .src_reg = EBPF_REG_2,
558 .off = offsetof(struct dummy_vect8, out[0].u64),
559 },
560 {
561 .code = (BPF_STX | BPF_MEM | EBPF_DW),
562 .dst_reg = EBPF_REG_1,
563 .src_reg = EBPF_REG_3,
564 .off = offsetof(struct dummy_vect8, out[1].u64),
565 },
566 {
567 .code = (BPF_ALU | BPF_AND | BPF_K),
568 .dst_reg = EBPF_REG_4,
569 .imm = TEST_SHIFT64_MASK,
570 },
571 {
572 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
573 .dst_reg = EBPF_REG_3,
574 .src_reg = EBPF_REG_4,
575 },
576 {
577 .code = (BPF_ALU | BPF_AND | BPF_K),
578 .dst_reg = EBPF_REG_4,
579 .imm = TEST_SHIFT32_MASK,
580 },
581 {
582 .code = (BPF_ALU | BPF_RSH | BPF_X),
583 .dst_reg = EBPF_REG_2,
584 .src_reg = EBPF_REG_4,
585 },
586 {
587 .code = (BPF_STX | BPF_MEM | EBPF_DW),
588 .dst_reg = EBPF_REG_1,
589 .src_reg = EBPF_REG_2,
590 .off = offsetof(struct dummy_vect8, out[2].u64),
591 },
592 {
593 .code = (BPF_STX | BPF_MEM | EBPF_DW),
594 .dst_reg = EBPF_REG_1,
595 .src_reg = EBPF_REG_3,
596 .off = offsetof(struct dummy_vect8, out[3].u64),
597 },
598 {
599 .code = (BPF_LDX | BPF_MEM | BPF_W),
600 .dst_reg = EBPF_REG_2,
601 .src_reg = EBPF_REG_1,
602 .off = offsetof(struct dummy_vect8, in[0].u32),
603 },
604 {
605 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
606 .dst_reg = EBPF_REG_3,
607 .src_reg = EBPF_REG_1,
608 .off = offsetof(struct dummy_vect8, in[1].u64),
609 },
610 {
611 .code = (BPF_LDX | BPF_MEM | BPF_W),
612 .dst_reg = EBPF_REG_4,
613 .src_reg = EBPF_REG_1,
614 .off = offsetof(struct dummy_vect8, in[2].u32),
615 },
616 {
617 .code = (BPF_ALU | BPF_AND | BPF_K),
618 .dst_reg = EBPF_REG_2,
619 .imm = TEST_SHIFT64_MASK,
620 },
621 {
622 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
623 .dst_reg = EBPF_REG_3,
624 .src_reg = EBPF_REG_2,
625 },
626 {
627 .code = (BPF_ALU | BPF_AND | BPF_K),
628 .dst_reg = EBPF_REG_2,
629 .imm = TEST_SHIFT32_MASK,
630 },
631 {
632 .code = (BPF_ALU | BPF_LSH | BPF_X),
633 .dst_reg = EBPF_REG_4,
634 .src_reg = EBPF_REG_2,
635 },
636 {
637 .code = (BPF_STX | BPF_MEM | EBPF_DW),
638 .dst_reg = EBPF_REG_1,
639 .src_reg = EBPF_REG_4,
640 .off = offsetof(struct dummy_vect8, out[4].u64),
641 },
642 {
643 .code = (BPF_STX | BPF_MEM | EBPF_DW),
644 .dst_reg = EBPF_REG_1,
645 .src_reg = EBPF_REG_3,
646 .off = offsetof(struct dummy_vect8, out[5].u64),
647 },
648 /* return 1 */
649 {
650 .code = (BPF_ALU | EBPF_MOV | BPF_K),
651 .dst_reg = EBPF_REG_0,
652 .imm = 1,
653 },
654 {
655 .code = (BPF_JMP | EBPF_EXIT),
656 },
657 };
658
659 static void
test_shift1_prepare(void * arg)660 test_shift1_prepare(void *arg)
661 {
662 struct dummy_vect8 *dv;
663 uint64_t v;
664
665 dv = arg;
666
667 v = rte_rand();
668
669 memset(dv, 0, sizeof(*dv));
670 dv->in[0].u32 = v;
671 dv->in[1].u64 = v << 12 | v >> 6;
672 dv->in[2].u32 = (-v ^ 5);
673 }
674
675 static int
test_shift1_check(uint64_t rc,const void * arg)676 test_shift1_check(uint64_t rc, const void *arg)
677 {
678 uint64_t r2, r3, r4;
679 const struct dummy_vect8 *dvt;
680 struct dummy_vect8 dve;
681
682 dvt = arg;
683 memset(&dve, 0, sizeof(dve));
684
685 r2 = dvt->in[0].u32;
686 r3 = dvt->in[1].u64;
687 r4 = dvt->in[2].u32;
688
689 r2 = (uint32_t)r2 << TEST_SHIFT_1;
690 r3 = (int64_t)r3 >> TEST_SHIFT_2;
691
692 dve.out[0].u64 = r2;
693 dve.out[1].u64 = r3;
694
695 r4 &= TEST_SHIFT64_MASK;
696 r3 <<= r4;
697 r4 &= TEST_SHIFT32_MASK;
698 r2 = (uint32_t)r2 >> r4;
699
700 dve.out[2].u64 = r2;
701 dve.out[3].u64 = r3;
702
703 r2 = dvt->in[0].u32;
704 r3 = dvt->in[1].u64;
705 r4 = dvt->in[2].u32;
706
707 r2 &= TEST_SHIFT64_MASK;
708 r3 = (int64_t)r3 >> r2;
709 r2 &= TEST_SHIFT32_MASK;
710 r4 = (uint32_t)r4 << r2;
711
712 dve.out[4].u64 = r4;
713 dve.out[5].u64 = r3;
714
715 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
716 }
717
718 /* jmp test-cases */
719 static const struct ebpf_insn test_jump1_prog[] = {
720
721 [0] = {
722 .code = (BPF_ALU | EBPF_MOV | BPF_K),
723 .dst_reg = EBPF_REG_0,
724 .imm = 0,
725 },
726 [1] = {
727 .code = (BPF_LDX | BPF_MEM | BPF_W),
728 .dst_reg = EBPF_REG_2,
729 .src_reg = EBPF_REG_1,
730 .off = offsetof(struct dummy_vect8, in[0].u32),
731 },
732 [2] = {
733 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
734 .dst_reg = EBPF_REG_3,
735 .src_reg = EBPF_REG_1,
736 .off = offsetof(struct dummy_vect8, in[0].u64),
737 },
738 [3] = {
739 .code = (BPF_LDX | BPF_MEM | BPF_W),
740 .dst_reg = EBPF_REG_4,
741 .src_reg = EBPF_REG_1,
742 .off = offsetof(struct dummy_vect8, in[1].u32),
743 },
744 [4] = {
745 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
746 .dst_reg = EBPF_REG_5,
747 .src_reg = EBPF_REG_1,
748 .off = offsetof(struct dummy_vect8, in[1].u64),
749 },
750 [5] = {
751 .code = (BPF_JMP | BPF_JEQ | BPF_K),
752 .dst_reg = EBPF_REG_2,
753 .imm = TEST_JCC_1,
754 .off = 8,
755 },
756 [6] = {
757 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
758 .dst_reg = EBPF_REG_3,
759 .imm = TEST_JCC_2,
760 .off = 9,
761 },
762 [7] = {
763 .code = (BPF_JMP | BPF_JGT | BPF_K),
764 .dst_reg = EBPF_REG_4,
765 .imm = TEST_JCC_3,
766 .off = 10,
767 },
768 [8] = {
769 .code = (BPF_JMP | BPF_JSET | BPF_K),
770 .dst_reg = EBPF_REG_5,
771 .imm = TEST_JCC_4,
772 .off = 11,
773 },
774 [9] = {
775 .code = (BPF_JMP | EBPF_JNE | BPF_X),
776 .dst_reg = EBPF_REG_2,
777 .src_reg = EBPF_REG_3,
778 .off = 12,
779 },
780 [10] = {
781 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
782 .dst_reg = EBPF_REG_2,
783 .src_reg = EBPF_REG_4,
784 .off = 13,
785 },
786 [11] = {
787 .code = (BPF_JMP | EBPF_JLE | BPF_X),
788 .dst_reg = EBPF_REG_2,
789 .src_reg = EBPF_REG_5,
790 .off = 14,
791 },
792 [12] = {
793 .code = (BPF_JMP | BPF_JSET | BPF_X),
794 .dst_reg = EBPF_REG_3,
795 .src_reg = EBPF_REG_5,
796 .off = 15,
797 },
798 [13] = {
799 .code = (BPF_JMP | EBPF_EXIT),
800 },
801 [14] = {
802 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
803 .dst_reg = EBPF_REG_0,
804 .imm = 0x1,
805 },
806 [15] = {
807 .code = (BPF_JMP | BPF_JA),
808 .off = -10,
809 },
810 [16] = {
811 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
812 .dst_reg = EBPF_REG_0,
813 .imm = 0x2,
814 },
815 [17] = {
816 .code = (BPF_JMP | BPF_JA),
817 .off = -11,
818 },
819 [18] = {
820 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
821 .dst_reg = EBPF_REG_0,
822 .imm = 0x4,
823 },
824 [19] = {
825 .code = (BPF_JMP | BPF_JA),
826 .off = -12,
827 },
828 [20] = {
829 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
830 .dst_reg = EBPF_REG_0,
831 .imm = 0x8,
832 },
833 [21] = {
834 .code = (BPF_JMP | BPF_JA),
835 .off = -13,
836 },
837 [22] = {
838 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
839 .dst_reg = EBPF_REG_0,
840 .imm = 0x10,
841 },
842 [23] = {
843 .code = (BPF_JMP | BPF_JA),
844 .off = -14,
845 },
846 [24] = {
847 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
848 .dst_reg = EBPF_REG_0,
849 .imm = 0x20,
850 },
851 [25] = {
852 .code = (BPF_JMP | BPF_JA),
853 .off = -15,
854 },
855 [26] = {
856 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
857 .dst_reg = EBPF_REG_0,
858 .imm = 0x40,
859 },
860 [27] = {
861 .code = (BPF_JMP | BPF_JA),
862 .off = -16,
863 },
864 [28] = {
865 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
866 .dst_reg = EBPF_REG_0,
867 .imm = 0x80,
868 },
869 [29] = {
870 .code = (BPF_JMP | BPF_JA),
871 .off = -17,
872 },
873 };
874
875 static void
test_jump1_prepare(void * arg)876 test_jump1_prepare(void *arg)
877 {
878 struct dummy_vect8 *dv;
879 uint64_t v1, v2;
880
881 dv = arg;
882
883 v1 = rte_rand();
884 v2 = rte_rand();
885
886 memset(dv, 0, sizeof(*dv));
887 dv->in[0].u64 = v1;
888 dv->in[1].u64 = v2;
889 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
890 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
891 }
892
893 static int
test_jump1_check(uint64_t rc,const void * arg)894 test_jump1_check(uint64_t rc, const void *arg)
895 {
896 uint64_t r2, r3, r4, r5, rv;
897 const struct dummy_vect8 *dvt;
898
899 dvt = arg;
900
901 rv = 0;
902 r2 = dvt->in[0].u32;
903 r3 = dvt->in[0].u64;
904 r4 = dvt->in[1].u32;
905 r5 = dvt->in[1].u64;
906
907 if (r2 == TEST_JCC_1)
908 rv |= 0x1;
909 if ((int64_t)r3 <= TEST_JCC_2)
910 rv |= 0x2;
911 if (r4 > TEST_JCC_3)
912 rv |= 0x4;
913 if (r5 & TEST_JCC_4)
914 rv |= 0x8;
915 if (r2 != r3)
916 rv |= 0x10;
917 if ((int64_t)r2 > (int64_t)r4)
918 rv |= 0x20;
919 if (r2 <= r5)
920 rv |= 0x40;
921 if (r3 & r5)
922 rv |= 0x80;
923
924 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
925 }
926
927 /* Jump test case - check ip4_dest in particular subnet */
928 static const struct ebpf_insn test_jump2_prog[] = {
929
930 [0] = {
931 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
932 .dst_reg = EBPF_REG_2,
933 .imm = 0xe,
934 },
935 [1] = {
936 .code = (BPF_LDX | BPF_MEM | BPF_H),
937 .dst_reg = EBPF_REG_3,
938 .src_reg = EBPF_REG_1,
939 .off = 12,
940 },
941 [2] = {
942 .code = (BPF_JMP | EBPF_JNE | BPF_K),
943 .dst_reg = EBPF_REG_3,
944 .off = 2,
945 .imm = 0x81,
946 },
947 [3] = {
948 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
949 .dst_reg = EBPF_REG_2,
950 .imm = 0x12,
951 },
952 [4] = {
953 .code = (BPF_LDX | BPF_MEM | BPF_H),
954 .dst_reg = EBPF_REG_3,
955 .src_reg = EBPF_REG_1,
956 .off = 16,
957 },
958 [5] = {
959 .code = (EBPF_ALU64 | BPF_AND | BPF_K),
960 .dst_reg = EBPF_REG_3,
961 .imm = 0xffff,
962 },
963 [6] = {
964 .code = (BPF_JMP | EBPF_JNE | BPF_K),
965 .dst_reg = EBPF_REG_3,
966 .off = 9,
967 .imm = 0x8,
968 },
969 [7] = {
970 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
971 .dst_reg = EBPF_REG_1,
972 .src_reg = EBPF_REG_2,
973 },
974 [8] = {
975 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
976 .dst_reg = EBPF_REG_0,
977 .imm = 0,
978 },
979 [9] = {
980 .code = (BPF_LDX | BPF_MEM | BPF_W),
981 .dst_reg = EBPF_REG_1,
982 .src_reg = EBPF_REG_1,
983 .off = 16,
984 },
985 [10] = {
986 .code = (BPF_ALU | EBPF_MOV | BPF_K),
987 .dst_reg = EBPF_REG_3,
988 .imm = TEST_NETMASK,
989 },
990 [11] = {
991 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
992 .dst_reg = EBPF_REG_3,
993 .imm = sizeof(uint32_t) * CHAR_BIT,
994 },
995 [12] = {
996 .code = (BPF_ALU | BPF_AND | BPF_X),
997 .dst_reg = EBPF_REG_1,
998 .src_reg = EBPF_REG_3,
999 },
1000 [13] = {
1001 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1002 .dst_reg = EBPF_REG_3,
1003 .imm = TEST_SUBNET,
1004 },
1005 [14] = {
1006 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1007 .dst_reg = EBPF_REG_3,
1008 .imm = sizeof(uint32_t) * CHAR_BIT,
1009 },
1010 [15] = {
1011 .code = (BPF_JMP | BPF_JEQ | BPF_X),
1012 .dst_reg = EBPF_REG_1,
1013 .src_reg = EBPF_REG_3,
1014 .off = 1,
1015 },
1016 [16] = {
1017 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1018 .dst_reg = EBPF_REG_0,
1019 .imm = -1,
1020 },
1021 [17] = {
1022 .code = (BPF_JMP | EBPF_EXIT),
1023 },
1024 };
1025
1026 /* Preparing a vlan packet */
1027 static void
test_jump2_prepare(void * arg)1028 test_jump2_prepare(void *arg)
1029 {
1030 struct dummy_net *dn;
1031
1032 dn = arg;
1033 memset(dn, 0, sizeof(*dn));
1034
1035 /*
1036 * Initialize ether header.
1037 */
1038 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1039 &dn->eth_hdr.dst_addr);
1040 rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1041 &dn->eth_hdr.src_addr);
1042 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1043
1044 /*
1045 * Initialize vlan header.
1046 */
1047 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1048 dn->vlan_hdr.vlan_tci = 32;
1049
1050 /*
1051 * Initialize IP header.
1052 */
1053 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/
1054 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */
1055 dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1056 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1057 dn->ip_hdr.total_length = rte_cpu_to_be_16(60);
1058 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1059 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1060 }
1061
1062 static int
test_jump2_check(uint64_t rc,const void * arg)1063 test_jump2_check(uint64_t rc, const void *arg)
1064 {
1065 const struct rte_ether_hdr *eth_hdr = arg;
1066 const struct rte_ipv4_hdr *ipv4_hdr;
1067 const void *next = eth_hdr;
1068 uint16_t eth_type;
1069 uint64_t v = -1;
1070
1071 if (eth_hdr->ether_type == htons(0x8100)) {
1072 const struct rte_vlan_hdr *vlan_hdr =
1073 (const void *)(eth_hdr + 1);
1074 eth_type = vlan_hdr->eth_proto;
1075 next = vlan_hdr + 1;
1076 } else {
1077 eth_type = eth_hdr->ether_type;
1078 next = eth_hdr + 1;
1079 }
1080
1081 if (eth_type == htons(0x0800)) {
1082 ipv4_hdr = next;
1083 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1084 rte_cpu_to_be_32(TEST_SUBNET)) {
1085 v = 0;
1086 }
1087 }
1088
1089 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1090 }
1091
1092 /* alu (add, sub, and, or, xor, neg) test-cases */
1093 static const struct ebpf_insn test_alu1_prog[] = {
1094
1095 {
1096 .code = (BPF_LDX | BPF_MEM | BPF_W),
1097 .dst_reg = EBPF_REG_2,
1098 .src_reg = EBPF_REG_1,
1099 .off = offsetof(struct dummy_vect8, in[0].u32),
1100 },
1101 {
1102 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1103 .dst_reg = EBPF_REG_3,
1104 .src_reg = EBPF_REG_1,
1105 .off = offsetof(struct dummy_vect8, in[0].u64),
1106 },
1107 {
1108 .code = (BPF_LDX | BPF_MEM | BPF_W),
1109 .dst_reg = EBPF_REG_4,
1110 .src_reg = EBPF_REG_1,
1111 .off = offsetof(struct dummy_vect8, in[1].u32),
1112 },
1113 {
1114 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1115 .dst_reg = EBPF_REG_5,
1116 .src_reg = EBPF_REG_1,
1117 .off = offsetof(struct dummy_vect8, in[1].u64),
1118 },
1119 {
1120 .code = (BPF_ALU | BPF_AND | BPF_K),
1121 .dst_reg = EBPF_REG_2,
1122 .imm = TEST_FILL_1,
1123 },
1124 {
1125 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1126 .dst_reg = EBPF_REG_3,
1127 .imm = TEST_FILL_1,
1128 },
1129 {
1130 .code = (BPF_ALU | BPF_XOR | BPF_K),
1131 .dst_reg = EBPF_REG_4,
1132 .imm = TEST_FILL_1,
1133 },
1134 {
1135 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1136 .dst_reg = EBPF_REG_5,
1137 .imm = TEST_FILL_1,
1138 },
1139 {
1140 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1141 .dst_reg = EBPF_REG_1,
1142 .src_reg = EBPF_REG_2,
1143 .off = offsetof(struct dummy_vect8, out[0].u64),
1144 },
1145 {
1146 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1147 .dst_reg = EBPF_REG_1,
1148 .src_reg = EBPF_REG_3,
1149 .off = offsetof(struct dummy_vect8, out[1].u64),
1150 },
1151 {
1152 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1153 .dst_reg = EBPF_REG_1,
1154 .src_reg = EBPF_REG_4,
1155 .off = offsetof(struct dummy_vect8, out[2].u64),
1156 },
1157 {
1158 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1159 .dst_reg = EBPF_REG_1,
1160 .src_reg = EBPF_REG_5,
1161 .off = offsetof(struct dummy_vect8, out[3].u64),
1162 },
1163 {
1164 .code = (BPF_ALU | BPF_OR | BPF_X),
1165 .dst_reg = EBPF_REG_2,
1166 .src_reg = EBPF_REG_3,
1167 },
1168 {
1169 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1170 .dst_reg = EBPF_REG_3,
1171 .src_reg = EBPF_REG_4,
1172 },
1173 {
1174 .code = (BPF_ALU | BPF_SUB | BPF_X),
1175 .dst_reg = EBPF_REG_4,
1176 .src_reg = EBPF_REG_5,
1177 },
1178 {
1179 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
1180 .dst_reg = EBPF_REG_5,
1181 .src_reg = EBPF_REG_2,
1182 },
1183 {
1184 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1185 .dst_reg = EBPF_REG_1,
1186 .src_reg = EBPF_REG_2,
1187 .off = offsetof(struct dummy_vect8, out[4].u64),
1188 },
1189 {
1190 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1191 .dst_reg = EBPF_REG_1,
1192 .src_reg = EBPF_REG_3,
1193 .off = offsetof(struct dummy_vect8, out[5].u64),
1194 },
1195 {
1196 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1197 .dst_reg = EBPF_REG_1,
1198 .src_reg = EBPF_REG_4,
1199 .off = offsetof(struct dummy_vect8, out[6].u64),
1200 },
1201 {
1202 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1203 .dst_reg = EBPF_REG_1,
1204 .src_reg = EBPF_REG_5,
1205 .off = offsetof(struct dummy_vect8, out[7].u64),
1206 },
1207 /* return (-r2 + (-r3)) */
1208 {
1209 .code = (BPF_ALU | BPF_NEG),
1210 .dst_reg = EBPF_REG_2,
1211 },
1212 {
1213 .code = (EBPF_ALU64 | BPF_NEG),
1214 .dst_reg = EBPF_REG_3,
1215 },
1216 {
1217 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1218 .dst_reg = EBPF_REG_2,
1219 .src_reg = EBPF_REG_3,
1220 },
1221 {
1222 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1223 .dst_reg = EBPF_REG_0,
1224 .src_reg = EBPF_REG_2,
1225 },
1226 {
1227 .code = (BPF_JMP | EBPF_EXIT),
1228 },
1229 };
1230
1231 static int
test_alu1_check(uint64_t rc,const void * arg)1232 test_alu1_check(uint64_t rc, const void *arg)
1233 {
1234 uint64_t r2, r3, r4, r5, rv;
1235 const struct dummy_vect8 *dvt;
1236 struct dummy_vect8 dve;
1237
1238 dvt = arg;
1239 memset(&dve, 0, sizeof(dve));
1240
1241 r2 = dvt->in[0].u32;
1242 r3 = dvt->in[0].u64;
1243 r4 = dvt->in[1].u32;
1244 r5 = dvt->in[1].u64;
1245
1246 r2 = (uint32_t)r2 & TEST_FILL_1;
1247 r3 |= (int32_t) TEST_FILL_1;
1248 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1249 r5 += (int32_t)TEST_FILL_1;
1250
1251 dve.out[0].u64 = r2;
1252 dve.out[1].u64 = r3;
1253 dve.out[2].u64 = r4;
1254 dve.out[3].u64 = r5;
1255
1256 r2 = (uint32_t)r2 | (uint32_t)r3;
1257 r3 ^= r4;
1258 r4 = (uint32_t)r4 - (uint32_t)r5;
1259 r5 &= r2;
1260
1261 dve.out[4].u64 = r2;
1262 dve.out[5].u64 = r3;
1263 dve.out[6].u64 = r4;
1264 dve.out[7].u64 = r5;
1265
1266 r2 = -(int32_t)r2;
1267 rv = (uint32_t)r2;
1268 r3 = -r3;
1269 rv += r3;
1270
1271 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1272 }
1273
1274 /* endianness conversions (BE->LE/LE->BE) test-cases */
1275 static const struct ebpf_insn test_bele1_prog[] = {
1276
1277 {
1278 .code = (BPF_LDX | BPF_MEM | BPF_H),
1279 .dst_reg = EBPF_REG_2,
1280 .src_reg = EBPF_REG_1,
1281 .off = offsetof(struct dummy_vect8, in[0].u16),
1282 },
1283 {
1284 .code = (BPF_LDX | BPF_MEM | BPF_W),
1285 .dst_reg = EBPF_REG_3,
1286 .src_reg = EBPF_REG_1,
1287 .off = offsetof(struct dummy_vect8, in[0].u32),
1288 },
1289 {
1290 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1291 .dst_reg = EBPF_REG_4,
1292 .src_reg = EBPF_REG_1,
1293 .off = offsetof(struct dummy_vect8, in[0].u64),
1294 },
1295 {
1296 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1297 .dst_reg = EBPF_REG_2,
1298 .imm = sizeof(uint16_t) * CHAR_BIT,
1299 },
1300 {
1301 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1302 .dst_reg = EBPF_REG_3,
1303 .imm = sizeof(uint32_t) * CHAR_BIT,
1304 },
1305 {
1306 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1307 .dst_reg = EBPF_REG_4,
1308 .imm = sizeof(uint64_t) * CHAR_BIT,
1309 },
1310 {
1311 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1312 .dst_reg = EBPF_REG_1,
1313 .src_reg = EBPF_REG_2,
1314 .off = offsetof(struct dummy_vect8, out[0].u64),
1315 },
1316 {
1317 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1318 .dst_reg = EBPF_REG_1,
1319 .src_reg = EBPF_REG_3,
1320 .off = offsetof(struct dummy_vect8, out[1].u64),
1321 },
1322 {
1323 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1324 .dst_reg = EBPF_REG_1,
1325 .src_reg = EBPF_REG_4,
1326 .off = offsetof(struct dummy_vect8, out[2].u64),
1327 },
1328 {
1329 .code = (BPF_LDX | BPF_MEM | BPF_H),
1330 .dst_reg = EBPF_REG_2,
1331 .src_reg = EBPF_REG_1,
1332 .off = offsetof(struct dummy_vect8, in[0].u16),
1333 },
1334 {
1335 .code = (BPF_LDX | BPF_MEM | BPF_W),
1336 .dst_reg = EBPF_REG_3,
1337 .src_reg = EBPF_REG_1,
1338 .off = offsetof(struct dummy_vect8, in[0].u32),
1339 },
1340 {
1341 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1342 .dst_reg = EBPF_REG_4,
1343 .src_reg = EBPF_REG_1,
1344 .off = offsetof(struct dummy_vect8, in[0].u64),
1345 },
1346 {
1347 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1348 .dst_reg = EBPF_REG_2,
1349 .imm = sizeof(uint16_t) * CHAR_BIT,
1350 },
1351 {
1352 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1353 .dst_reg = EBPF_REG_3,
1354 .imm = sizeof(uint32_t) * CHAR_BIT,
1355 },
1356 {
1357 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1358 .dst_reg = EBPF_REG_4,
1359 .imm = sizeof(uint64_t) * CHAR_BIT,
1360 },
1361 {
1362 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1363 .dst_reg = EBPF_REG_1,
1364 .src_reg = EBPF_REG_2,
1365 .off = offsetof(struct dummy_vect8, out[3].u64),
1366 },
1367 {
1368 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1369 .dst_reg = EBPF_REG_1,
1370 .src_reg = EBPF_REG_3,
1371 .off = offsetof(struct dummy_vect8, out[4].u64),
1372 },
1373 {
1374 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1375 .dst_reg = EBPF_REG_1,
1376 .src_reg = EBPF_REG_4,
1377 .off = offsetof(struct dummy_vect8, out[5].u64),
1378 },
1379 /* return 1 */
1380 {
1381 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1382 .dst_reg = EBPF_REG_0,
1383 .imm = 1,
1384 },
1385 {
1386 .code = (BPF_JMP | EBPF_EXIT),
1387 },
1388 };
1389
1390 static void
test_bele1_prepare(void * arg)1391 test_bele1_prepare(void *arg)
1392 {
1393 struct dummy_vect8 *dv;
1394
1395 dv = arg;
1396
1397 memset(dv, 0, sizeof(*dv));
1398 dv->in[0].u64 = rte_rand();
1399 dv->in[0].u32 = dv->in[0].u64;
1400 dv->in[0].u16 = dv->in[0].u64;
1401 }
1402
1403 static int
test_bele1_check(uint64_t rc,const void * arg)1404 test_bele1_check(uint64_t rc, const void *arg)
1405 {
1406 uint64_t r2, r3, r4;
1407 const struct dummy_vect8 *dvt;
1408 struct dummy_vect8 dve;
1409
1410 dvt = arg;
1411 memset(&dve, 0, sizeof(dve));
1412
1413 r2 = dvt->in[0].u16;
1414 r3 = dvt->in[0].u32;
1415 r4 = dvt->in[0].u64;
1416
1417 r2 = rte_cpu_to_be_16(r2);
1418 r3 = rte_cpu_to_be_32(r3);
1419 r4 = rte_cpu_to_be_64(r4);
1420
1421 dve.out[0].u64 = r2;
1422 dve.out[1].u64 = r3;
1423 dve.out[2].u64 = r4;
1424
1425 r2 = dvt->in[0].u16;
1426 r3 = dvt->in[0].u32;
1427 r4 = dvt->in[0].u64;
1428
1429 r2 = rte_cpu_to_le_16(r2);
1430 r3 = rte_cpu_to_le_32(r3);
1431 r4 = rte_cpu_to_le_64(r4);
1432
1433 dve.out[3].u64 = r2;
1434 dve.out[4].u64 = r3;
1435 dve.out[5].u64 = r4;
1436
1437 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1438 }
1439
1440 /* atomic add test-cases */
1441 static const struct ebpf_insn test_xadd1_prog[] = {
1442
1443 {
1444 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1445 .dst_reg = EBPF_REG_2,
1446 .imm = 1,
1447 },
1448 {
1449 .code = (BPF_STX | EBPF_XADD | BPF_W),
1450 .dst_reg = EBPF_REG_1,
1451 .src_reg = EBPF_REG_2,
1452 .off = offsetof(struct dummy_offset, u32),
1453 },
1454 {
1455 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1456 .dst_reg = EBPF_REG_1,
1457 .src_reg = EBPF_REG_2,
1458 .off = offsetof(struct dummy_offset, u64),
1459 },
1460 {
1461 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1462 .dst_reg = EBPF_REG_3,
1463 .imm = -1,
1464 },
1465 {
1466 .code = (BPF_STX | EBPF_XADD | BPF_W),
1467 .dst_reg = EBPF_REG_1,
1468 .src_reg = EBPF_REG_3,
1469 .off = offsetof(struct dummy_offset, u32),
1470 },
1471 {
1472 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1473 .dst_reg = EBPF_REG_1,
1474 .src_reg = EBPF_REG_3,
1475 .off = offsetof(struct dummy_offset, u64),
1476 },
1477 {
1478 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1479 .dst_reg = EBPF_REG_4,
1480 .imm = TEST_FILL_1,
1481 },
1482 {
1483 .code = (BPF_STX | EBPF_XADD | BPF_W),
1484 .dst_reg = EBPF_REG_1,
1485 .src_reg = EBPF_REG_4,
1486 .off = offsetof(struct dummy_offset, u32),
1487 },
1488 {
1489 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1490 .dst_reg = EBPF_REG_1,
1491 .src_reg = EBPF_REG_4,
1492 .off = offsetof(struct dummy_offset, u64),
1493 },
1494 {
1495 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1496 .dst_reg = EBPF_REG_5,
1497 .imm = TEST_MUL_1,
1498 },
1499 {
1500 .code = (BPF_STX | EBPF_XADD | BPF_W),
1501 .dst_reg = EBPF_REG_1,
1502 .src_reg = EBPF_REG_5,
1503 .off = offsetof(struct dummy_offset, u32),
1504 },
1505 {
1506 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1507 .dst_reg = EBPF_REG_1,
1508 .src_reg = EBPF_REG_5,
1509 .off = offsetof(struct dummy_offset, u64),
1510 },
1511 {
1512 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1513 .dst_reg = EBPF_REG_6,
1514 .imm = TEST_MUL_2,
1515 },
1516 {
1517 .code = (BPF_STX | EBPF_XADD | BPF_W),
1518 .dst_reg = EBPF_REG_1,
1519 .src_reg = EBPF_REG_6,
1520 .off = offsetof(struct dummy_offset, u32),
1521 },
1522 {
1523 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1524 .dst_reg = EBPF_REG_1,
1525 .src_reg = EBPF_REG_6,
1526 .off = offsetof(struct dummy_offset, u64),
1527 },
1528 {
1529 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1530 .dst_reg = EBPF_REG_7,
1531 .imm = TEST_JCC_2,
1532 },
1533 {
1534 .code = (BPF_STX | EBPF_XADD | BPF_W),
1535 .dst_reg = EBPF_REG_1,
1536 .src_reg = EBPF_REG_7,
1537 .off = offsetof(struct dummy_offset, u32),
1538 },
1539 {
1540 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1541 .dst_reg = EBPF_REG_1,
1542 .src_reg = EBPF_REG_7,
1543 .off = offsetof(struct dummy_offset, u64),
1544 },
1545 {
1546 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1547 .dst_reg = EBPF_REG_8,
1548 .imm = TEST_JCC_3,
1549 },
1550 {
1551 .code = (BPF_STX | EBPF_XADD | BPF_W),
1552 .dst_reg = EBPF_REG_1,
1553 .src_reg = EBPF_REG_8,
1554 .off = offsetof(struct dummy_offset, u32),
1555 },
1556 {
1557 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1558 .dst_reg = EBPF_REG_1,
1559 .src_reg = EBPF_REG_8,
1560 .off = offsetof(struct dummy_offset, u64),
1561 },
1562 /* return 1 */
1563 {
1564 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1565 .dst_reg = EBPF_REG_0,
1566 .imm = 1,
1567 },
1568 {
1569 .code = (BPF_JMP | EBPF_EXIT),
1570 },
1571 };
1572
1573 static int
test_xadd1_check(uint64_t rc,const void * arg)1574 test_xadd1_check(uint64_t rc, const void *arg)
1575 {
1576 uint64_t rv;
1577 const struct dummy_offset *dft;
1578 struct dummy_offset dfe;
1579
1580 dft = arg;
1581 memset(&dfe, 0, sizeof(dfe));
1582
1583 rv = 1;
1584 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1585 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1586
1587 rv = -1;
1588 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1589 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1590
1591 rv = (int32_t)TEST_FILL_1;
1592 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1593 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1594
1595 rv = TEST_MUL_1;
1596 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1597 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1598
1599 rv = TEST_MUL_2;
1600 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1601 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1602
1603 rv = TEST_JCC_2;
1604 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1605 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1606
1607 rv = TEST_JCC_3;
1608 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1609 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1610
1611 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1612 }
1613
1614 /* alu div test-cases */
1615 static const struct ebpf_insn test_div1_prog[] = {
1616
1617 {
1618 .code = (BPF_LDX | BPF_MEM | BPF_W),
1619 .dst_reg = EBPF_REG_2,
1620 .src_reg = EBPF_REG_1,
1621 .off = offsetof(struct dummy_vect8, in[0].u32),
1622 },
1623 {
1624 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1625 .dst_reg = EBPF_REG_3,
1626 .src_reg = EBPF_REG_1,
1627 .off = offsetof(struct dummy_vect8, in[1].u64),
1628 },
1629 {
1630 .code = (BPF_LDX | BPF_MEM | BPF_W),
1631 .dst_reg = EBPF_REG_4,
1632 .src_reg = EBPF_REG_1,
1633 .off = offsetof(struct dummy_vect8, in[2].u32),
1634 },
1635 {
1636 .code = (BPF_ALU | BPF_DIV | BPF_K),
1637 .dst_reg = EBPF_REG_2,
1638 .imm = TEST_MUL_1,
1639 },
1640 {
1641 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1642 .dst_reg = EBPF_REG_3,
1643 .imm = TEST_MUL_2,
1644 },
1645 {
1646 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1647 .dst_reg = EBPF_REG_2,
1648 .imm = 1,
1649 },
1650 {
1651 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1652 .dst_reg = EBPF_REG_3,
1653 .imm = 1,
1654 },
1655 {
1656 .code = (BPF_ALU | BPF_MOD | BPF_X),
1657 .dst_reg = EBPF_REG_4,
1658 .src_reg = EBPF_REG_2,
1659 },
1660 {
1661 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1662 .dst_reg = EBPF_REG_4,
1663 .src_reg = EBPF_REG_3,
1664 },
1665 {
1666 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1667 .dst_reg = EBPF_REG_1,
1668 .src_reg = EBPF_REG_2,
1669 .off = offsetof(struct dummy_vect8, out[0].u64),
1670 },
1671 {
1672 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1673 .dst_reg = EBPF_REG_1,
1674 .src_reg = EBPF_REG_3,
1675 .off = offsetof(struct dummy_vect8, out[1].u64),
1676 },
1677 {
1678 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1679 .dst_reg = EBPF_REG_1,
1680 .src_reg = EBPF_REG_4,
1681 .off = offsetof(struct dummy_vect8, out[2].u64),
1682 },
1683 /* check that we can handle division by zero gracefully. */
1684 {
1685 .code = (BPF_LDX | BPF_MEM | BPF_W),
1686 .dst_reg = EBPF_REG_2,
1687 .src_reg = EBPF_REG_1,
1688 .off = offsetof(struct dummy_vect8, in[3].u32),
1689 },
1690 {
1691 .code = (BPF_ALU | BPF_DIV | BPF_X),
1692 .dst_reg = EBPF_REG_4,
1693 .src_reg = EBPF_REG_2,
1694 },
1695 /* return 1 */
1696 {
1697 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1698 .dst_reg = EBPF_REG_0,
1699 .imm = 1,
1700 },
1701 {
1702 .code = (BPF_JMP | EBPF_EXIT),
1703 },
1704 };
1705
1706 static int
test_div1_check(uint64_t rc,const void * arg)1707 test_div1_check(uint64_t rc, const void *arg)
1708 {
1709 uint64_t r2, r3, r4;
1710 const struct dummy_vect8 *dvt;
1711 struct dummy_vect8 dve;
1712
1713 dvt = arg;
1714 memset(&dve, 0, sizeof(dve));
1715
1716 r2 = dvt->in[0].u32;
1717 r3 = dvt->in[1].u64;
1718 r4 = dvt->in[2].u32;
1719
1720 r2 = (uint32_t)r2 / TEST_MUL_1;
1721 r3 %= TEST_MUL_2;
1722 r2 |= 1;
1723 r3 |= 1;
1724 r4 = (uint32_t)(r4 % r2);
1725 r4 /= r3;
1726
1727 dve.out[0].u64 = r2;
1728 dve.out[1].u64 = r3;
1729 dve.out[2].u64 = r4;
1730
1731 /*
1732 * in the test prog we attempted to divide by zero.
1733 * so return value should return 0.
1734 */
1735 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1736 }
1737
1738 /* call test-cases */
1739 static const struct ebpf_insn test_call1_prog[] = {
1740
1741 {
1742 .code = (BPF_LDX | BPF_MEM | BPF_W),
1743 .dst_reg = EBPF_REG_2,
1744 .src_reg = EBPF_REG_1,
1745 .off = offsetof(struct dummy_offset, u32),
1746 },
1747 {
1748 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1749 .dst_reg = EBPF_REG_3,
1750 .src_reg = EBPF_REG_1,
1751 .off = offsetof(struct dummy_offset, u64),
1752 },
1753 {
1754 .code = (BPF_STX | BPF_MEM | BPF_W),
1755 .dst_reg = EBPF_REG_10,
1756 .src_reg = EBPF_REG_2,
1757 .off = -4,
1758 },
1759 {
1760 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1761 .dst_reg = EBPF_REG_10,
1762 .src_reg = EBPF_REG_3,
1763 .off = -16,
1764 },
1765 {
1766 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1767 .dst_reg = EBPF_REG_2,
1768 .src_reg = EBPF_REG_10,
1769 },
1770 {
1771 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1772 .dst_reg = EBPF_REG_2,
1773 .imm = 4,
1774 },
1775 {
1776 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1777 .dst_reg = EBPF_REG_3,
1778 .src_reg = EBPF_REG_10,
1779 },
1780 {
1781 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1782 .dst_reg = EBPF_REG_3,
1783 .imm = 16,
1784 },
1785 {
1786 .code = (BPF_JMP | EBPF_CALL),
1787 .imm = 0,
1788 },
1789 {
1790 .code = (BPF_LDX | BPF_MEM | BPF_W),
1791 .dst_reg = EBPF_REG_2,
1792 .src_reg = EBPF_REG_10,
1793 .off = -4,
1794 },
1795 {
1796 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1797 .dst_reg = EBPF_REG_0,
1798 .src_reg = EBPF_REG_10,
1799 .off = -16
1800 },
1801 {
1802 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1803 .dst_reg = EBPF_REG_0,
1804 .src_reg = EBPF_REG_2,
1805 },
1806 {
1807 .code = (BPF_JMP | EBPF_EXIT),
1808 },
1809 };
1810
1811 static void
dummy_func1(const void * p,uint32_t * v32,uint64_t * v64)1812 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1813 {
1814 const struct dummy_offset *dv;
1815
1816 dv = p;
1817
1818 v32[0] += dv->u16;
1819 v64[0] += dv->u8;
1820 }
1821
1822 static int
test_call1_check(uint64_t rc,const void * arg)1823 test_call1_check(uint64_t rc, const void *arg)
1824 {
1825 uint32_t v32;
1826 uint64_t v64;
1827 const struct dummy_offset *dv;
1828
1829 dv = arg;
1830
1831 v32 = dv->u32;
1832 v64 = dv->u64;
1833 dummy_func1(arg, &v32, &v64);
1834 v64 += v32;
1835
1836 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1837 }
1838
1839 static const struct rte_bpf_xsym test_call1_xsym[] = {
1840 {
1841 .name = RTE_STR(dummy_func1),
1842 .type = RTE_BPF_XTYPE_FUNC,
1843 .func = {
1844 .val = (void *)dummy_func1,
1845 .nb_args = 3,
1846 .args = {
1847 [0] = {
1848 .type = RTE_BPF_ARG_PTR,
1849 .size = sizeof(struct dummy_offset),
1850 },
1851 [1] = {
1852 .type = RTE_BPF_ARG_PTR,
1853 .size = sizeof(uint32_t),
1854 },
1855 [2] = {
1856 .type = RTE_BPF_ARG_PTR,
1857 .size = sizeof(uint64_t),
1858 },
1859 },
1860 },
1861 },
1862 };
1863
1864 static const struct ebpf_insn test_call2_prog[] = {
1865
1866 {
1867 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1868 .dst_reg = EBPF_REG_1,
1869 .src_reg = EBPF_REG_10,
1870 },
1871 {
1872 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1873 .dst_reg = EBPF_REG_1,
1874 .imm = -(int32_t)sizeof(struct dummy_offset),
1875 },
1876 {
1877 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1878 .dst_reg = EBPF_REG_2,
1879 .src_reg = EBPF_REG_10,
1880 },
1881 {
1882 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1883 .dst_reg = EBPF_REG_2,
1884 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1885 },
1886 {
1887 .code = (BPF_JMP | EBPF_CALL),
1888 .imm = 0,
1889 },
1890 {
1891 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1892 .dst_reg = EBPF_REG_1,
1893 .src_reg = EBPF_REG_10,
1894 .off = -(int32_t)(sizeof(struct dummy_offset) -
1895 offsetof(struct dummy_offset, u64)),
1896 },
1897 {
1898 .code = (BPF_LDX | BPF_MEM | BPF_W),
1899 .dst_reg = EBPF_REG_0,
1900 .src_reg = EBPF_REG_10,
1901 .off = -(int32_t)(sizeof(struct dummy_offset) -
1902 offsetof(struct dummy_offset, u32)),
1903 },
1904 {
1905 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1906 .dst_reg = EBPF_REG_0,
1907 .src_reg = EBPF_REG_1,
1908 },
1909 {
1910 .code = (BPF_LDX | BPF_MEM | BPF_H),
1911 .dst_reg = EBPF_REG_1,
1912 .src_reg = EBPF_REG_10,
1913 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1914 offsetof(struct dummy_offset, u16)),
1915 },
1916 {
1917 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1918 .dst_reg = EBPF_REG_0,
1919 .src_reg = EBPF_REG_1,
1920 },
1921 {
1922 .code = (BPF_LDX | BPF_MEM | BPF_B),
1923 .dst_reg = EBPF_REG_1,
1924 .src_reg = EBPF_REG_10,
1925 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1926 offsetof(struct dummy_offset, u8)),
1927 },
1928 {
1929 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1930 .dst_reg = EBPF_REG_0,
1931 .src_reg = EBPF_REG_1,
1932 },
1933 {
1934 .code = (BPF_JMP | EBPF_EXIT),
1935 },
1936
1937 };
1938
1939 static void
dummy_func2(struct dummy_offset * a,struct dummy_offset * b)1940 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1941 {
1942 uint64_t v;
1943
1944 v = 0;
1945 a->u64 = v++;
1946 a->u32 = v++;
1947 a->u16 = v++;
1948 a->u8 = v++;
1949 b->u64 = v++;
1950 b->u32 = v++;
1951 b->u16 = v++;
1952 b->u8 = v++;
1953 }
1954
1955 static int
test_call2_check(uint64_t rc,const void * arg)1956 test_call2_check(uint64_t rc, const void *arg)
1957 {
1958 uint64_t v;
1959 struct dummy_offset a, b;
1960
1961 RTE_SET_USED(arg);
1962
1963 dummy_func2(&a, &b);
1964 v = a.u64 + a.u32 + b.u16 + b.u8;
1965
1966 return cmp_res(__func__, v, rc, arg, arg, 0);
1967 }
1968
1969 static const struct rte_bpf_xsym test_call2_xsym[] = {
1970 {
1971 .name = RTE_STR(dummy_func2),
1972 .type = RTE_BPF_XTYPE_FUNC,
1973 .func = {
1974 .val = (void *)dummy_func2,
1975 .nb_args = 2,
1976 .args = {
1977 [0] = {
1978 .type = RTE_BPF_ARG_PTR,
1979 .size = sizeof(struct dummy_offset),
1980 },
1981 [1] = {
1982 .type = RTE_BPF_ARG_PTR,
1983 .size = sizeof(struct dummy_offset),
1984 },
1985 },
1986 },
1987 },
1988 };
1989
1990 static const struct ebpf_insn test_call3_prog[] = {
1991
1992 {
1993 .code = (BPF_JMP | EBPF_CALL),
1994 .imm = 0,
1995 },
1996 {
1997 .code = (BPF_LDX | BPF_MEM | BPF_B),
1998 .dst_reg = EBPF_REG_2,
1999 .src_reg = EBPF_REG_0,
2000 .off = offsetof(struct dummy_offset, u8),
2001 },
2002 {
2003 .code = (BPF_LDX | BPF_MEM | BPF_H),
2004 .dst_reg = EBPF_REG_3,
2005 .src_reg = EBPF_REG_0,
2006 .off = offsetof(struct dummy_offset, u16),
2007 },
2008 {
2009 .code = (BPF_LDX | BPF_MEM | BPF_W),
2010 .dst_reg = EBPF_REG_4,
2011 .src_reg = EBPF_REG_0,
2012 .off = offsetof(struct dummy_offset, u32),
2013 },
2014 {
2015 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2016 .dst_reg = EBPF_REG_0,
2017 .src_reg = EBPF_REG_0,
2018 .off = offsetof(struct dummy_offset, u64),
2019 },
2020 /* return sum */
2021 {
2022 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2023 .dst_reg = EBPF_REG_0,
2024 .src_reg = EBPF_REG_4,
2025 },
2026 {
2027 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2028 .dst_reg = EBPF_REG_0,
2029 .src_reg = EBPF_REG_3,
2030 },
2031 {
2032 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2033 .dst_reg = EBPF_REG_0,
2034 .src_reg = EBPF_REG_2,
2035 },
2036 {
2037 .code = (BPF_JMP | EBPF_EXIT),
2038 },
2039 };
2040
2041 static const struct dummy_offset *
dummy_func3(const struct dummy_vect8 * p)2042 dummy_func3(const struct dummy_vect8 *p)
2043 {
2044 return &p->in[RTE_DIM(p->in) - 1];
2045 }
2046
2047 static void
test_call3_prepare(void * arg)2048 test_call3_prepare(void *arg)
2049 {
2050 struct dummy_vect8 *pv;
2051 struct dummy_offset *df;
2052
2053 pv = arg;
2054 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2055
2056 memset(pv, 0, sizeof(*pv));
2057 df->u64 = (int32_t)TEST_FILL_1;
2058 df->u32 = df->u64;
2059 df->u16 = df->u64;
2060 df->u8 = df->u64;
2061 }
2062
2063 static int
test_call3_check(uint64_t rc,const void * arg)2064 test_call3_check(uint64_t rc, const void *arg)
2065 {
2066 uint64_t v;
2067 const struct dummy_vect8 *pv;
2068 const struct dummy_offset *dft;
2069
2070 pv = arg;
2071 dft = dummy_func3(pv);
2072
2073 v = dft->u64;
2074 v += dft->u32;
2075 v += dft->u16;
2076 v += dft->u8;
2077
2078 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2079 }
2080
2081 static const struct rte_bpf_xsym test_call3_xsym[] = {
2082 {
2083 .name = RTE_STR(dummy_func3),
2084 .type = RTE_BPF_XTYPE_FUNC,
2085 .func = {
2086 .val = (void *)dummy_func3,
2087 .nb_args = 1,
2088 .args = {
2089 [0] = {
2090 .type = RTE_BPF_ARG_PTR,
2091 .size = sizeof(struct dummy_vect8),
2092 },
2093 },
2094 .ret = {
2095 .type = RTE_BPF_ARG_PTR,
2096 .size = sizeof(struct dummy_offset),
2097 },
2098 },
2099 },
2100 };
2101
2102 /* Test for stack corruption in multiple function calls */
2103 static const struct ebpf_insn test_call4_prog[] = {
2104 {
2105 .code = (BPF_ST | BPF_MEM | BPF_B),
2106 .dst_reg = EBPF_REG_10,
2107 .off = -4,
2108 .imm = 1,
2109 },
2110 {
2111 .code = (BPF_ST | BPF_MEM | BPF_B),
2112 .dst_reg = EBPF_REG_10,
2113 .off = -3,
2114 .imm = 2,
2115 },
2116 {
2117 .code = (BPF_ST | BPF_MEM | BPF_B),
2118 .dst_reg = EBPF_REG_10,
2119 .off = -2,
2120 .imm = 3,
2121 },
2122 {
2123 .code = (BPF_ST | BPF_MEM | BPF_B),
2124 .dst_reg = EBPF_REG_10,
2125 .off = -1,
2126 .imm = 4,
2127 },
2128 {
2129 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2130 .dst_reg = EBPF_REG_1,
2131 .src_reg = EBPF_REG_10,
2132 },
2133 {
2134 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2135 .dst_reg = EBPF_REG_2,
2136 .imm = 4,
2137 },
2138 {
2139 .code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2140 .dst_reg = EBPF_REG_1,
2141 .src_reg = EBPF_REG_2,
2142 },
2143 {
2144 .code = (BPF_JMP | EBPF_CALL),
2145 .imm = 0,
2146 },
2147 {
2148 .code = (BPF_LDX | BPF_MEM | BPF_B),
2149 .dst_reg = EBPF_REG_1,
2150 .src_reg = EBPF_REG_10,
2151 .off = -4,
2152 },
2153 {
2154 .code = (BPF_LDX | BPF_MEM | BPF_B),
2155 .dst_reg = EBPF_REG_2,
2156 .src_reg = EBPF_REG_10,
2157 .off = -3,
2158 },
2159 {
2160 .code = (BPF_LDX | BPF_MEM | BPF_B),
2161 .dst_reg = EBPF_REG_3,
2162 .src_reg = EBPF_REG_10,
2163 .off = -2,
2164 },
2165 {
2166 .code = (BPF_LDX | BPF_MEM | BPF_B),
2167 .dst_reg = EBPF_REG_4,
2168 .src_reg = EBPF_REG_10,
2169 .off = -1,
2170 },
2171 {
2172 .code = (BPF_JMP | EBPF_CALL),
2173 .imm = 1,
2174 },
2175 {
2176 .code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2177 .dst_reg = EBPF_REG_0,
2178 .imm = TEST_MEMFROB,
2179 },
2180 {
2181 .code = (BPF_JMP | EBPF_EXIT),
2182 },
2183 };
2184
2185 /* Gathering the bytes together */
2186 static uint32_t
dummy_func4_1(uint8_t a,uint8_t b,uint8_t c,uint8_t d)2187 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2188 {
2189 return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2190 }
2191
2192 /* Implementation of memfrob */
2193 static uint32_t
dummy_func4_0(uint32_t * s,uint8_t n)2194 dummy_func4_0(uint32_t *s, uint8_t n)
2195 {
2196 char *p = (char *) s;
2197 while (n-- > 0)
2198 *p++ ^= 42;
2199 return *s;
2200 }
2201
2202
2203 static int
test_call4_check(uint64_t rc,const void * arg)2204 test_call4_check(uint64_t rc, const void *arg)
2205 {
2206 uint8_t a[4] = {1, 2, 3, 4};
2207 uint32_t s, v = 0;
2208
2209 RTE_SET_USED(arg);
2210
2211 s = dummy_func4_0((uint32_t *)a, 4);
2212
2213 s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2214
2215 v = s ^ TEST_MEMFROB;
2216
2217 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2218 }
2219
2220 static const struct rte_bpf_xsym test_call4_xsym[] = {
2221 [0] = {
2222 .name = RTE_STR(dummy_func4_0),
2223 .type = RTE_BPF_XTYPE_FUNC,
2224 .func = {
2225 .val = (void *)dummy_func4_0,
2226 .nb_args = 2,
2227 .args = {
2228 [0] = {
2229 .type = RTE_BPF_ARG_PTR,
2230 .size = 4 * sizeof(uint8_t),
2231 },
2232 [1] = {
2233 .type = RTE_BPF_ARG_RAW,
2234 .size = sizeof(uint8_t),
2235 },
2236 },
2237 .ret = {
2238 .type = RTE_BPF_ARG_RAW,
2239 .size = sizeof(uint32_t),
2240 },
2241 },
2242 },
2243 [1] = {
2244 .name = RTE_STR(dummy_func4_1),
2245 .type = RTE_BPF_XTYPE_FUNC,
2246 .func = {
2247 .val = (void *)dummy_func4_1,
2248 .nb_args = 4,
2249 .args = {
2250 [0] = {
2251 .type = RTE_BPF_ARG_RAW,
2252 .size = sizeof(uint8_t),
2253 },
2254 [1] = {
2255 .type = RTE_BPF_ARG_RAW,
2256 .size = sizeof(uint8_t),
2257 },
2258 [2] = {
2259 .type = RTE_BPF_ARG_RAW,
2260 .size = sizeof(uint8_t),
2261 },
2262 [3] = {
2263 .type = RTE_BPF_ARG_RAW,
2264 .size = sizeof(uint8_t),
2265 },
2266 },
2267 .ret = {
2268 .type = RTE_BPF_ARG_RAW,
2269 .size = sizeof(uint32_t),
2270 },
2271 },
2272 },
2273 };
2274
2275 /* string compare test case */
2276 static const struct ebpf_insn test_call5_prog[] = {
2277
2278 [0] = {
2279 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2280 .dst_reg = EBPF_REG_1,
2281 .imm = STRING_GEEK,
2282 },
2283 [1] = {
2284 .code = (BPF_STX | BPF_MEM | BPF_W),
2285 .dst_reg = EBPF_REG_10,
2286 .src_reg = EBPF_REG_1,
2287 .off = -8,
2288 },
2289 [2] = {
2290 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2291 .dst_reg = EBPF_REG_6,
2292 .imm = 0,
2293 },
2294 [3] = {
2295 .code = (BPF_STX | BPF_MEM | BPF_B),
2296 .dst_reg = EBPF_REG_10,
2297 .src_reg = EBPF_REG_6,
2298 .off = -4,
2299 },
2300 [4] = {
2301 .code = (BPF_STX | BPF_MEM | BPF_W),
2302 .dst_reg = EBPF_REG_10,
2303 .src_reg = EBPF_REG_6,
2304 .off = -12,
2305 },
2306 [5] = {
2307 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2308 .dst_reg = EBPF_REG_1,
2309 .imm = STRING_WEEK,
2310 },
2311 [6] = {
2312 .code = (BPF_STX | BPF_MEM | BPF_W),
2313 .dst_reg = EBPF_REG_10,
2314 .src_reg = EBPF_REG_1,
2315 .off = -16,
2316 },
2317 [7] = {
2318 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2319 .dst_reg = EBPF_REG_1,
2320 .src_reg = EBPF_REG_10,
2321 },
2322 [8] = {
2323 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2324 .dst_reg = EBPF_REG_1,
2325 .imm = -8,
2326 },
2327 [9] = {
2328 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2329 .dst_reg = EBPF_REG_2,
2330 .src_reg = EBPF_REG_1,
2331 },
2332 [10] = {
2333 .code = (BPF_JMP | EBPF_CALL),
2334 .imm = 0,
2335 },
2336 [11] = {
2337 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2338 .dst_reg = EBPF_REG_1,
2339 .src_reg = EBPF_REG_0,
2340 },
2341 [12] = {
2342 .code = (BPF_ALU | EBPF_MOV | BPF_K),
2343 .dst_reg = EBPF_REG_0,
2344 .imm = -1,
2345 },
2346 [13] = {
2347 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2348 .dst_reg = EBPF_REG_1,
2349 .imm = 0x20,
2350 },
2351 [14] = {
2352 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2353 .dst_reg = EBPF_REG_1,
2354 .imm = 0x20,
2355 },
2356 [15] = {
2357 .code = (BPF_JMP | EBPF_JNE | BPF_K),
2358 .dst_reg = EBPF_REG_1,
2359 .off = 11,
2360 .imm = 0,
2361 },
2362 [16] = {
2363 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2364 .dst_reg = EBPF_REG_1,
2365 .src_reg = EBPF_REG_10,
2366 },
2367 [17] = {
2368 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2369 .dst_reg = EBPF_REG_1,
2370 .imm = -8,
2371 },
2372 [18] = {
2373 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2374 .dst_reg = EBPF_REG_2,
2375 .src_reg = EBPF_REG_10,
2376 },
2377 [19] = {
2378 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2379 .dst_reg = EBPF_REG_2,
2380 .imm = -16,
2381 },
2382 [20] = {
2383 .code = (BPF_JMP | EBPF_CALL),
2384 .imm = 0,
2385 },
2386 [21] = {
2387 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2388 .dst_reg = EBPF_REG_1,
2389 .src_reg = EBPF_REG_0,
2390 },
2391 [22] = {
2392 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2393 .dst_reg = EBPF_REG_1,
2394 .imm = 0x20,
2395 },
2396 [23] = {
2397 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2398 .dst_reg = EBPF_REG_1,
2399 .imm = 0x20,
2400 },
2401 [24] = {
2402 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2403 .dst_reg = EBPF_REG_0,
2404 .src_reg = EBPF_REG_1,
2405 },
2406 [25] = {
2407 .code = (BPF_JMP | BPF_JEQ | BPF_X),
2408 .dst_reg = EBPF_REG_1,
2409 .src_reg = EBPF_REG_6,
2410 .off = 1,
2411 },
2412 [26] = {
2413 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2414 .dst_reg = EBPF_REG_0,
2415 .imm = 0,
2416 },
2417 [27] = {
2418 .code = (BPF_JMP | EBPF_EXIT),
2419 },
2420 };
2421
2422 /* String comparison implementation, return 0 if equal else difference */
2423 static uint32_t
dummy_func5(const char * s1,const char * s2)2424 dummy_func5(const char *s1, const char *s2)
2425 {
2426 while (*s1 && (*s1 == *s2)) {
2427 s1++;
2428 s2++;
2429 }
2430 return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2431 }
2432
2433 static int
test_call5_check(uint64_t rc,const void * arg)2434 test_call5_check(uint64_t rc, const void *arg)
2435 {
2436 char a[] = "geek";
2437 char b[] = "week";
2438 uint32_t v;
2439
2440 RTE_SET_USED(arg);
2441
2442 v = dummy_func5(a, a);
2443 if (v != 0) {
2444 v = -1;
2445 goto fail;
2446 }
2447
2448 v = dummy_func5(a, b);
2449 if (v == 0)
2450 goto fail;
2451
2452 v = 0;
2453
2454 fail:
2455 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2456 }
2457
2458 static const struct rte_bpf_xsym test_call5_xsym[] = {
2459 [0] = {
2460 .name = RTE_STR(dummy_func5),
2461 .type = RTE_BPF_XTYPE_FUNC,
2462 .func = {
2463 .val = (void *)dummy_func5,
2464 .nb_args = 2,
2465 .args = {
2466 [0] = {
2467 .type = RTE_BPF_ARG_PTR,
2468 .size = sizeof(char),
2469 },
2470 [1] = {
2471 .type = RTE_BPF_ARG_PTR,
2472 .size = sizeof(char),
2473 },
2474 },
2475 .ret = {
2476 .type = RTE_BPF_ARG_RAW,
2477 .size = sizeof(uint32_t),
2478 },
2479 },
2480 },
2481 };
2482
2483 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2484 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2485
2486 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2487 {
2488 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2489 .dst_reg = EBPF_REG_6,
2490 .src_reg = EBPF_REG_1,
2491 },
2492 /* load IPv4 version and IHL */
2493 {
2494 .code = (BPF_LD | BPF_ABS | BPF_B),
2495 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2496 },
2497 /* check IP version */
2498 {
2499 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2500 .dst_reg = EBPF_REG_2,
2501 .src_reg = EBPF_REG_0,
2502 },
2503 {
2504 .code = (BPF_ALU | BPF_AND | BPF_K),
2505 .dst_reg = EBPF_REG_2,
2506 .imm = 0xf0,
2507 },
2508 {
2509 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2510 .dst_reg = EBPF_REG_2,
2511 .imm = IPVERSION << 4,
2512 .off = 2,
2513 },
2514 /* invalid IP version, return 0 */
2515 {
2516 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2517 .dst_reg = EBPF_REG_0,
2518 .src_reg = EBPF_REG_0,
2519 },
2520 {
2521 .code = (BPF_JMP | EBPF_EXIT),
2522 },
2523 /* load 3-rd byte of IP data */
2524 {
2525 .code = (BPF_ALU | BPF_AND | BPF_K),
2526 .dst_reg = EBPF_REG_0,
2527 .imm = RTE_IPV4_HDR_IHL_MASK,
2528 },
2529 {
2530 .code = (BPF_ALU | BPF_LSH | BPF_K),
2531 .dst_reg = EBPF_REG_0,
2532 .imm = 2,
2533 },
2534 {
2535 .code = (BPF_LD | BPF_IND | BPF_B),
2536 .src_reg = EBPF_REG_0,
2537 .imm = 3,
2538 },
2539 {
2540 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2541 .dst_reg = EBPF_REG_7,
2542 .src_reg = EBPF_REG_0,
2543 },
2544 /* load IPv4 src addr */
2545 {
2546 .code = (BPF_LD | BPF_ABS | BPF_W),
2547 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2548 },
2549 {
2550 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2551 .dst_reg = EBPF_REG_7,
2552 .src_reg = EBPF_REG_0,
2553 },
2554 /* load IPv4 total length */
2555 {
2556 .code = (BPF_LD | BPF_ABS | BPF_H),
2557 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2558 },
2559 {
2560 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2561 .dst_reg = EBPF_REG_8,
2562 .src_reg = EBPF_REG_0,
2563 },
2564 /* load last 4 bytes of IP data */
2565 {
2566 .code = (BPF_LD | BPF_IND | BPF_W),
2567 .src_reg = EBPF_REG_8,
2568 .imm = -(int32_t)sizeof(uint32_t),
2569 },
2570 {
2571 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2572 .dst_reg = EBPF_REG_7,
2573 .src_reg = EBPF_REG_0,
2574 },
2575 /* load 2 bytes from the middle of IP data */
2576 {
2577 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2578 .dst_reg = EBPF_REG_8,
2579 .imm = 1,
2580 },
2581 {
2582 .code = (BPF_LD | BPF_IND | BPF_H),
2583 .src_reg = EBPF_REG_8,
2584 },
2585 {
2586 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2587 .dst_reg = EBPF_REG_0,
2588 .src_reg = EBPF_REG_7,
2589 },
2590 {
2591 .code = (BPF_JMP | EBPF_EXIT),
2592 },
2593 };
2594
2595 static void
dummy_mbuf_prep(struct rte_mbuf * mb,uint8_t buf[],uint32_t buf_len,uint32_t data_len)2596 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2597 uint32_t data_len)
2598 {
2599 uint32_t i;
2600 uint8_t *db;
2601
2602 mb->buf_addr = buf;
2603 mb->buf_iova = (uintptr_t)buf;
2604 mb->buf_len = buf_len;
2605 rte_mbuf_refcnt_set(mb, 1);
2606
2607 /* set pool pointer to dummy value, test doesn't use it */
2608 mb->pool = (void *)buf;
2609
2610 rte_pktmbuf_reset(mb);
2611 db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2612
2613 for (i = 0; i != data_len; i++)
2614 db[i] = i;
2615 }
2616
2617 static void
test_ld_mbuf1_prepare(void * arg)2618 test_ld_mbuf1_prepare(void *arg)
2619 {
2620 struct dummy_mbuf *dm;
2621 struct rte_ipv4_hdr *ph;
2622
2623 const uint32_t plen = 400;
2624 const struct rte_ipv4_hdr iph = {
2625 .version_ihl = RTE_IPV4_VHL_DEF,
2626 .total_length = rte_cpu_to_be_16(plen),
2627 .time_to_live = IPDEFTTL,
2628 .next_proto_id = IPPROTO_RAW,
2629 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2630 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2631 };
2632
2633 dm = arg;
2634 memset(dm, 0, sizeof(*dm));
2635
2636 dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2637 plen / 2 + 1);
2638 dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2639 plen / 2 - 1);
2640
2641 rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2642
2643 ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2644 memcpy(ph, &iph, sizeof(iph));
2645 }
2646
2647 static uint64_t
test_ld_mbuf1(const struct rte_mbuf * pkt)2648 test_ld_mbuf1(const struct rte_mbuf *pkt)
2649 {
2650 uint64_t n, v;
2651 const uint8_t *p8;
2652 const uint16_t *p16;
2653 const uint32_t *p32;
2654 struct dummy_offset dof;
2655
2656 /* load IPv4 version and IHL */
2657 p8 = rte_pktmbuf_read(pkt,
2658 offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2659 &dof);
2660 if (p8 == NULL)
2661 return 0;
2662
2663 /* check IP version */
2664 if ((p8[0] & 0xf0) != IPVERSION << 4)
2665 return 0;
2666
2667 n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2668
2669 /* load 3-rd byte of IP data */
2670 p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2671 if (p8 == NULL)
2672 return 0;
2673
2674 v = p8[0];
2675
2676 /* load IPv4 src addr */
2677 p32 = rte_pktmbuf_read(pkt,
2678 offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2679 &dof);
2680 if (p32 == NULL)
2681 return 0;
2682
2683 v += rte_be_to_cpu_32(p32[0]);
2684
2685 /* load IPv4 total length */
2686 p16 = rte_pktmbuf_read(pkt,
2687 offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2688 &dof);
2689 if (p16 == NULL)
2690 return 0;
2691
2692 n = rte_be_to_cpu_16(p16[0]);
2693
2694 /* load last 4 bytes of IP data */
2695 p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2696 if (p32 == NULL)
2697 return 0;
2698
2699 v += rte_be_to_cpu_32(p32[0]);
2700
2701 /* load 2 bytes from the middle of IP data */
2702 p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2703 if (p16 == NULL)
2704 return 0;
2705
2706 v += rte_be_to_cpu_16(p16[0]);
2707 return v;
2708 }
2709
2710 static int
test_ld_mbuf1_check(uint64_t rc,const void * arg)2711 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2712 {
2713 const struct dummy_mbuf *dm;
2714 uint64_t v;
2715
2716 dm = arg;
2717 v = test_ld_mbuf1(dm->mb);
2718 return cmp_res(__func__, v, rc, arg, arg, 0);
2719 }
2720
2721 /*
2722 * same as ld_mbuf1, but then truncate the mbuf by 1B,
2723 * so load of last 4B fail.
2724 */
2725 static void
test_ld_mbuf2_prepare(void * arg)2726 test_ld_mbuf2_prepare(void *arg)
2727 {
2728 struct dummy_mbuf *dm;
2729
2730 test_ld_mbuf1_prepare(arg);
2731 dm = arg;
2732 rte_pktmbuf_trim(dm->mb, 1);
2733 }
2734
2735 static int
test_ld_mbuf2_check(uint64_t rc,const void * arg)2736 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2737 {
2738 return cmp_res(__func__, 0, rc, arg, arg, 0);
2739 }
2740
2741 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2742 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2743
2744 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2745 {
2746 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2747 .dst_reg = EBPF_REG_6,
2748 .src_reg = EBPF_REG_1,
2749 },
2750 /* load IPv4 version and IHL */
2751 {
2752 .code = (BPF_LD | BPF_ABS | BPF_B),
2753 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2754 },
2755 /* check IP version */
2756 {
2757 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2758 .dst_reg = EBPF_REG_2,
2759 .src_reg = EBPF_REG_0,
2760 },
2761 {
2762 .code = (BPF_ALU | BPF_AND | BPF_K),
2763 .dst_reg = EBPF_REG_2,
2764 .imm = 0xf0,
2765 },
2766 {
2767 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2768 .dst_reg = EBPF_REG_2,
2769 .imm = IPVERSION << 4,
2770 .off = 2,
2771 },
2772 /* invalid IP version, return 0 */
2773 {
2774 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2775 .dst_reg = EBPF_REG_0,
2776 .src_reg = EBPF_REG_0,
2777 },
2778 {
2779 .code = (BPF_JMP | EBPF_EXIT),
2780 },
2781 /* load 3-rd byte of IP data */
2782 {
2783 .code = (BPF_ALU | BPF_AND | BPF_K),
2784 .dst_reg = EBPF_REG_0,
2785 .imm = RTE_IPV4_HDR_IHL_MASK,
2786 },
2787 {
2788 .code = (BPF_ALU | BPF_LSH | BPF_K),
2789 .dst_reg = EBPF_REG_0,
2790 .imm = 2,
2791 },
2792 {
2793 .code = (BPF_LD | BPF_IND | BPF_B),
2794 .src_reg = EBPF_REG_0,
2795 .imm = 3,
2796 },
2797 {
2798 .code = (BPF_STX | BPF_MEM | BPF_B),
2799 .dst_reg = EBPF_REG_10,
2800 .src_reg = EBPF_REG_0,
2801 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2802 sizeof(struct dummy_offset)),
2803 },
2804 /* load IPv4 src addr */
2805 {
2806 .code = (BPF_LD | BPF_ABS | BPF_W),
2807 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2808 },
2809 {
2810 .code = (BPF_STX | BPF_MEM | BPF_W),
2811 .dst_reg = EBPF_REG_10,
2812 .src_reg = EBPF_REG_0,
2813 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2814 sizeof(struct dummy_offset)),
2815 },
2816 /* load IPv4 total length */
2817 {
2818 .code = (BPF_LD | BPF_ABS | BPF_H),
2819 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2820 },
2821 {
2822 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2823 .dst_reg = EBPF_REG_8,
2824 .src_reg = EBPF_REG_0,
2825 },
2826 /* load last 4 bytes of IP data */
2827 {
2828 .code = (BPF_LD | BPF_IND | BPF_W),
2829 .src_reg = EBPF_REG_8,
2830 .imm = -(int32_t)sizeof(uint32_t),
2831 },
2832 {
2833 .code = (BPF_STX | BPF_MEM | EBPF_DW),
2834 .dst_reg = EBPF_REG_10,
2835 .src_reg = EBPF_REG_0,
2836 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2837 sizeof(struct dummy_offset)),
2838 },
2839 /* load 2 bytes from the middle of IP data */
2840 {
2841 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2842 .dst_reg = EBPF_REG_8,
2843 .imm = 1,
2844 },
2845 {
2846 .code = (BPF_LD | BPF_IND | BPF_H),
2847 .src_reg = EBPF_REG_8,
2848 },
2849 {
2850 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2851 .dst_reg = EBPF_REG_1,
2852 .src_reg = EBPF_REG_10,
2853 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2854 sizeof(struct dummy_offset)),
2855 },
2856 {
2857 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2858 .dst_reg = EBPF_REG_0,
2859 .src_reg = EBPF_REG_1,
2860 },
2861 {
2862 .code = (BPF_LDX | BPF_MEM | BPF_W),
2863 .dst_reg = EBPF_REG_1,
2864 .src_reg = EBPF_REG_10,
2865 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2866 sizeof(struct dummy_offset)),
2867 },
2868 {
2869 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2870 .dst_reg = EBPF_REG_0,
2871 .src_reg = EBPF_REG_1,
2872 },
2873 {
2874 .code = (BPF_LDX | BPF_MEM | BPF_B),
2875 .dst_reg = EBPF_REG_1,
2876 .src_reg = EBPF_REG_10,
2877 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2878 sizeof(struct dummy_offset)),
2879 },
2880 {
2881 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2882 .dst_reg = EBPF_REG_0,
2883 .src_reg = EBPF_REG_1,
2884 },
2885 {
2886 .code = (BPF_JMP | EBPF_EXIT),
2887 },
2888 };
2889
2890 /* all bpf test cases */
2891 static const struct bpf_test tests[] = {
2892 {
2893 .name = "test_store1",
2894 .arg_sz = sizeof(struct dummy_offset),
2895 .prm = {
2896 .ins = test_store1_prog,
2897 .nb_ins = RTE_DIM(test_store1_prog),
2898 .prog_arg = {
2899 .type = RTE_BPF_ARG_PTR,
2900 .size = sizeof(struct dummy_offset),
2901 },
2902 },
2903 .prepare = test_store1_prepare,
2904 .check_result = test_store1_check,
2905 },
2906 {
2907 .name = "test_store2",
2908 .arg_sz = sizeof(struct dummy_offset),
2909 .prm = {
2910 .ins = test_store2_prog,
2911 .nb_ins = RTE_DIM(test_store2_prog),
2912 .prog_arg = {
2913 .type = RTE_BPF_ARG_PTR,
2914 .size = sizeof(struct dummy_offset),
2915 },
2916 },
2917 .prepare = test_store1_prepare,
2918 .check_result = test_store1_check,
2919 },
2920 {
2921 .name = "test_load1",
2922 .arg_sz = sizeof(struct dummy_offset),
2923 .prm = {
2924 .ins = test_load1_prog,
2925 .nb_ins = RTE_DIM(test_load1_prog),
2926 .prog_arg = {
2927 .type = RTE_BPF_ARG_PTR,
2928 .size = sizeof(struct dummy_offset),
2929 },
2930 },
2931 .prepare = test_load1_prepare,
2932 .check_result = test_load1_check,
2933 },
2934 {
2935 .name = "test_ldimm1",
2936 .arg_sz = sizeof(struct dummy_offset),
2937 .prm = {
2938 .ins = test_ldimm1_prog,
2939 .nb_ins = RTE_DIM(test_ldimm1_prog),
2940 .prog_arg = {
2941 .type = RTE_BPF_ARG_PTR,
2942 .size = sizeof(struct dummy_offset),
2943 },
2944 },
2945 .prepare = test_store1_prepare,
2946 .check_result = test_ldimm1_check,
2947 },
2948 {
2949 .name = "test_mul1",
2950 .arg_sz = sizeof(struct dummy_vect8),
2951 .prm = {
2952 .ins = test_mul1_prog,
2953 .nb_ins = RTE_DIM(test_mul1_prog),
2954 .prog_arg = {
2955 .type = RTE_BPF_ARG_PTR,
2956 .size = sizeof(struct dummy_vect8),
2957 },
2958 },
2959 .prepare = test_mul1_prepare,
2960 .check_result = test_mul1_check,
2961 },
2962 {
2963 .name = "test_shift1",
2964 .arg_sz = sizeof(struct dummy_vect8),
2965 .prm = {
2966 .ins = test_shift1_prog,
2967 .nb_ins = RTE_DIM(test_shift1_prog),
2968 .prog_arg = {
2969 .type = RTE_BPF_ARG_PTR,
2970 .size = sizeof(struct dummy_vect8),
2971 },
2972 },
2973 .prepare = test_shift1_prepare,
2974 .check_result = test_shift1_check,
2975 },
2976 {
2977 .name = "test_jump1",
2978 .arg_sz = sizeof(struct dummy_vect8),
2979 .prm = {
2980 .ins = test_jump1_prog,
2981 .nb_ins = RTE_DIM(test_jump1_prog),
2982 .prog_arg = {
2983 .type = RTE_BPF_ARG_PTR,
2984 .size = sizeof(struct dummy_vect8),
2985 },
2986 },
2987 .prepare = test_jump1_prepare,
2988 .check_result = test_jump1_check,
2989 },
2990 {
2991 .name = "test_jump2",
2992 .arg_sz = sizeof(struct dummy_net),
2993 .prm = {
2994 .ins = test_jump2_prog,
2995 .nb_ins = RTE_DIM(test_jump2_prog),
2996 .prog_arg = {
2997 .type = RTE_BPF_ARG_PTR,
2998 .size = sizeof(struct dummy_net),
2999 },
3000 },
3001 .prepare = test_jump2_prepare,
3002 .check_result = test_jump2_check,
3003 },
3004 {
3005 .name = "test_alu1",
3006 .arg_sz = sizeof(struct dummy_vect8),
3007 .prm = {
3008 .ins = test_alu1_prog,
3009 .nb_ins = RTE_DIM(test_alu1_prog),
3010 .prog_arg = {
3011 .type = RTE_BPF_ARG_PTR,
3012 .size = sizeof(struct dummy_vect8),
3013 },
3014 },
3015 .prepare = test_jump1_prepare,
3016 .check_result = test_alu1_check,
3017 },
3018 {
3019 .name = "test_bele1",
3020 .arg_sz = sizeof(struct dummy_vect8),
3021 .prm = {
3022 .ins = test_bele1_prog,
3023 .nb_ins = RTE_DIM(test_bele1_prog),
3024 .prog_arg = {
3025 .type = RTE_BPF_ARG_PTR,
3026 .size = sizeof(struct dummy_vect8),
3027 },
3028 },
3029 .prepare = test_bele1_prepare,
3030 .check_result = test_bele1_check,
3031 },
3032 {
3033 .name = "test_xadd1",
3034 .arg_sz = sizeof(struct dummy_offset),
3035 .prm = {
3036 .ins = test_xadd1_prog,
3037 .nb_ins = RTE_DIM(test_xadd1_prog),
3038 .prog_arg = {
3039 .type = RTE_BPF_ARG_PTR,
3040 .size = sizeof(struct dummy_offset),
3041 },
3042 },
3043 .prepare = test_store1_prepare,
3044 .check_result = test_xadd1_check,
3045 },
3046 {
3047 .name = "test_div1",
3048 .arg_sz = sizeof(struct dummy_vect8),
3049 .prm = {
3050 .ins = test_div1_prog,
3051 .nb_ins = RTE_DIM(test_div1_prog),
3052 .prog_arg = {
3053 .type = RTE_BPF_ARG_PTR,
3054 .size = sizeof(struct dummy_vect8),
3055 },
3056 },
3057 .prepare = test_mul1_prepare,
3058 .check_result = test_div1_check,
3059 },
3060 {
3061 .name = "test_call1",
3062 .arg_sz = sizeof(struct dummy_offset),
3063 .prm = {
3064 .ins = test_call1_prog,
3065 .nb_ins = RTE_DIM(test_call1_prog),
3066 .prog_arg = {
3067 .type = RTE_BPF_ARG_PTR,
3068 .size = sizeof(struct dummy_offset),
3069 },
3070 .xsym = test_call1_xsym,
3071 .nb_xsym = RTE_DIM(test_call1_xsym),
3072 },
3073 .prepare = test_load1_prepare,
3074 .check_result = test_call1_check,
3075 /* for now don't support function calls on 32 bit platform */
3076 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3077 },
3078 {
3079 .name = "test_call2",
3080 .arg_sz = sizeof(struct dummy_offset),
3081 .prm = {
3082 .ins = test_call2_prog,
3083 .nb_ins = RTE_DIM(test_call2_prog),
3084 .prog_arg = {
3085 .type = RTE_BPF_ARG_PTR,
3086 .size = sizeof(struct dummy_offset),
3087 },
3088 .xsym = test_call2_xsym,
3089 .nb_xsym = RTE_DIM(test_call2_xsym),
3090 },
3091 .prepare = test_store1_prepare,
3092 .check_result = test_call2_check,
3093 /* for now don't support function calls on 32 bit platform */
3094 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3095 },
3096 {
3097 .name = "test_call3",
3098 .arg_sz = sizeof(struct dummy_vect8),
3099 .prm = {
3100 .ins = test_call3_prog,
3101 .nb_ins = RTE_DIM(test_call3_prog),
3102 .prog_arg = {
3103 .type = RTE_BPF_ARG_PTR,
3104 .size = sizeof(struct dummy_vect8),
3105 },
3106 .xsym = test_call3_xsym,
3107 .nb_xsym = RTE_DIM(test_call3_xsym),
3108 },
3109 .prepare = test_call3_prepare,
3110 .check_result = test_call3_check,
3111 /* for now don't support function calls on 32 bit platform */
3112 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3113 },
3114 {
3115 .name = "test_call4",
3116 .arg_sz = sizeof(struct dummy_offset),
3117 .prm = {
3118 .ins = test_call4_prog,
3119 .nb_ins = RTE_DIM(test_call4_prog),
3120 .prog_arg = {
3121 .type = RTE_BPF_ARG_PTR,
3122 .size = 2 * sizeof(struct dummy_offset),
3123 },
3124 .xsym = test_call4_xsym,
3125 .nb_xsym = RTE_DIM(test_call4_xsym),
3126 },
3127 .prepare = test_store1_prepare,
3128 .check_result = test_call4_check,
3129 /* for now don't support function calls on 32 bit platform */
3130 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3131 },
3132 {
3133 .name = "test_call5",
3134 .arg_sz = sizeof(struct dummy_offset),
3135 .prm = {
3136 .ins = test_call5_prog,
3137 .nb_ins = RTE_DIM(test_call5_prog),
3138 .prog_arg = {
3139 .type = RTE_BPF_ARG_PTR,
3140 .size = sizeof(struct dummy_offset),
3141 },
3142 .xsym = test_call5_xsym,
3143 .nb_xsym = RTE_DIM(test_call5_xsym),
3144 },
3145 .prepare = test_store1_prepare,
3146 .check_result = test_call5_check,
3147 /* for now don't support function calls on 32 bit platform */
3148 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3149 },
3150 {
3151 .name = "test_ld_mbuf1",
3152 .arg_sz = sizeof(struct dummy_mbuf),
3153 .prm = {
3154 .ins = test_ld_mbuf1_prog,
3155 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3156 .prog_arg = {
3157 .type = RTE_BPF_ARG_PTR_MBUF,
3158 .buf_size = sizeof(struct dummy_mbuf),
3159 },
3160 },
3161 .prepare = test_ld_mbuf1_prepare,
3162 .check_result = test_ld_mbuf1_check,
3163 /* mbuf as input argument is not supported on 32 bit platform */
3164 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3165 },
3166 {
3167 .name = "test_ld_mbuf2",
3168 .arg_sz = sizeof(struct dummy_mbuf),
3169 .prm = {
3170 .ins = test_ld_mbuf1_prog,
3171 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3172 .prog_arg = {
3173 .type = RTE_BPF_ARG_PTR_MBUF,
3174 .buf_size = sizeof(struct dummy_mbuf),
3175 },
3176 },
3177 .prepare = test_ld_mbuf2_prepare,
3178 .check_result = test_ld_mbuf2_check,
3179 /* mbuf as input argument is not supported on 32 bit platform */
3180 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3181 },
3182 {
3183 .name = "test_ld_mbuf3",
3184 .arg_sz = sizeof(struct dummy_mbuf),
3185 .prm = {
3186 .ins = test_ld_mbuf3_prog,
3187 .nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3188 .prog_arg = {
3189 .type = RTE_BPF_ARG_PTR_MBUF,
3190 .buf_size = sizeof(struct dummy_mbuf),
3191 },
3192 },
3193 .prepare = test_ld_mbuf1_prepare,
3194 .check_result = test_ld_mbuf1_check,
3195 /* mbuf as input argument is not supported on 32 bit platform */
3196 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3197 },
3198 };
3199
3200 static int
run_test(const struct bpf_test * tst)3201 run_test(const struct bpf_test *tst)
3202 {
3203 int32_t ret, rv;
3204 int64_t rc;
3205 struct rte_bpf *bpf;
3206 struct rte_bpf_jit jit;
3207 uint8_t tbuf[tst->arg_sz];
3208
3209 printf("%s(%s) start\n", __func__, tst->name);
3210
3211 bpf = rte_bpf_load(&tst->prm);
3212 if (bpf == NULL) {
3213 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3214 __func__, __LINE__, rte_errno, strerror(rte_errno));
3215 return -1;
3216 }
3217
3218 tst->prepare(tbuf);
3219 rc = rte_bpf_exec(bpf, tbuf);
3220 ret = tst->check_result(rc, tbuf);
3221 if (ret != 0) {
3222 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3223 __func__, __LINE__, tst->name, ret, strerror(ret));
3224 }
3225
3226 /* repeat the same test with jit, when possible */
3227 rte_bpf_get_jit(bpf, &jit);
3228 if (jit.func != NULL) {
3229
3230 tst->prepare(tbuf);
3231 rc = jit.func(tbuf);
3232 rv = tst->check_result(rc, tbuf);
3233 ret |= rv;
3234 if (rv != 0) {
3235 printf("%s@%d: check_result(%s) failed, "
3236 "error: %d(%s);\n",
3237 __func__, __LINE__, tst->name,
3238 rv, strerror(rv));
3239 }
3240 }
3241
3242 rte_bpf_destroy(bpf);
3243 return ret;
3244
3245 }
3246
3247 static int
test_bpf(void)3248 test_bpf(void)
3249 {
3250 int32_t rc, rv;
3251 uint32_t i;
3252
3253 rc = 0;
3254 for (i = 0; i != RTE_DIM(tests); i++) {
3255 rv = run_test(tests + i);
3256 if (tests[i].allow_fail == 0)
3257 rc |= rv;
3258 }
3259
3260 return rc;
3261 }
3262
3263 #endif /* !RTE_LIB_BPF */
3264
3265 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
3266
3267 #ifndef RTE_HAS_LIBPCAP
3268
3269 static int
test_bpf_convert(void)3270 test_bpf_convert(void)
3271 {
3272 printf("BPF convert RTE_HAS_LIBPCAP is undefined, skipping test\n");
3273 return TEST_SKIPPED;
3274 }
3275
3276 #else
3277 #include <pcap/pcap.h>
3278
3279 static void
test_bpf_dump(struct bpf_program * cbf,const struct rte_bpf_prm * prm)3280 test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm)
3281 {
3282 printf("cBPF program (%u insns)\n", cbf->bf_len);
3283 bpf_dump(cbf, 1);
3284
3285 if (prm != NULL) {
3286 printf("\neBPF program (%u insns)\n", prm->nb_ins);
3287 rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
3288 }
3289 }
3290
3291 static int
test_bpf_match(pcap_t * pcap,const char * str,struct rte_mbuf * mb)3292 test_bpf_match(pcap_t *pcap, const char *str,
3293 struct rte_mbuf *mb)
3294 {
3295 struct bpf_program fcode;
3296 struct rte_bpf_prm *prm = NULL;
3297 struct rte_bpf *bpf = NULL;
3298 int ret = -1;
3299 uint64_t rc;
3300
3301 if (pcap_compile(pcap, &fcode, str, 1, PCAP_NETMASK_UNKNOWN)) {
3302 printf("%s@%d: pcap_compile(\"%s\") failed: %s;\n",
3303 __func__, __LINE__, str, pcap_geterr(pcap));
3304 return -1;
3305 }
3306
3307 prm = rte_bpf_convert(&fcode);
3308 if (prm == NULL) {
3309 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
3310 __func__, __LINE__, str, rte_errno, strerror(rte_errno));
3311 goto error;
3312 }
3313
3314 bpf = rte_bpf_load(prm);
3315 if (bpf == NULL) {
3316 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3317 __func__, __LINE__, rte_errno, strerror(rte_errno));
3318 goto error;
3319 }
3320
3321 rc = rte_bpf_exec(bpf, mb);
3322 /* The return code from bpf capture filter is non-zero if matched */
3323 ret = (rc == 0);
3324 error:
3325 if (bpf)
3326 rte_bpf_destroy(bpf);
3327 rte_free(prm);
3328 pcap_freecode(&fcode);
3329 return ret;
3330 }
3331
3332 /* Basic sanity test can we match a IP packet */
3333 static int
test_bpf_filter_sanity(pcap_t * pcap)3334 test_bpf_filter_sanity(pcap_t *pcap)
3335 {
3336 const uint32_t plen = 100;
3337 struct rte_mbuf mb, *m;
3338 uint8_t tbuf[RTE_MBUF_DEFAULT_BUF_SIZE];
3339 struct {
3340 struct rte_ether_hdr eth_hdr;
3341 struct rte_ipv4_hdr ip_hdr;
3342 } *hdr;
3343
3344 dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen);
3345 m = &mb;
3346
3347 hdr = rte_pktmbuf_mtod(m, typeof(hdr));
3348 hdr->eth_hdr = (struct rte_ether_hdr) {
3349 .dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3350 .ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
3351 };
3352 hdr->ip_hdr = (struct rte_ipv4_hdr) {
3353 .version_ihl = RTE_IPV4_VHL_DEF,
3354 .total_length = rte_cpu_to_be_16(plen),
3355 .time_to_live = IPDEFTTL,
3356 .next_proto_id = IPPROTO_RAW,
3357 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
3358 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
3359 };
3360
3361 if (test_bpf_match(pcap, "ip", m) != 0) {
3362 printf("%s@%d: filter \"ip\" doesn't match test data\n",
3363 __func__, __LINE__);
3364 return -1;
3365 }
3366 if (test_bpf_match(pcap, "not ip", m) == 0) {
3367 printf("%s@%d: filter \"not ip\" does match test data\n",
3368 __func__, __LINE__);
3369 return -1;
3370 }
3371
3372 return 0;
3373 }
3374
3375 /*
3376 * Some sample pcap filter strings from
3377 * https://wiki.wireshark.org/CaptureFilters
3378 */
3379 static const char * const sample_filters[] = {
3380 "host 172.18.5.4",
3381 "net 192.168.0.0/24",
3382 "src net 192.168.0.0/24",
3383 "src net 192.168.0.0 mask 255.255.255.0",
3384 "dst net 192.168.0.0/24",
3385 "dst net 192.168.0.0 mask 255.255.255.0",
3386 "port 53",
3387 "host 192.0.2.1 and not (port 80 or port 25)",
3388 "host 2001:4b98:db0::8 and not port 80 and not port 25",
3389 "port not 53 and not arp",
3390 "(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)",
3391 "ether proto 0x888e",
3392 "ether[0] & 1 = 0 and ip[16] >= 224",
3393 "icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply",
3394 "tcp[tcpflags] & (tcp-syn|tcp-fin) != 0 and not src and dst net 127.0.0.1",
3395 "not ether dst 01:80:c2:00:00:0e",
3396 "not broadcast and not multicast",
3397 "dst host ff02::1",
3398 "port 80 and tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420",
3399 /* Worms */
3400 "dst port 135 and tcp port 135 and ip[2:2]==48",
3401 "icmp[icmptype]==icmp-echo and ip[2:2]==92 and icmp[8:4]==0xAAAAAAAA",
3402 "dst port 135 or dst port 445 or dst port 1433"
3403 " and tcp[tcpflags] & (tcp-syn) != 0"
3404 " and tcp[tcpflags] & (tcp-ack) = 0 and src net 192.168.0.0/24",
3405 "tcp src port 443 and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4] = 0x18)"
3406 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)"
3407 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)"
3408 " and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))",
3409 /* Other */
3410 "len = 128",
3411 };
3412
3413 static int
test_bpf_filter(pcap_t * pcap,const char * s)3414 test_bpf_filter(pcap_t *pcap, const char *s)
3415 {
3416 struct bpf_program fcode;
3417 struct rte_bpf_prm *prm = NULL;
3418 struct rte_bpf *bpf = NULL;
3419
3420 if (pcap_compile(pcap, &fcode, s, 1, PCAP_NETMASK_UNKNOWN)) {
3421 printf("%s@%d: pcap_compile('%s') failed: %s;\n",
3422 __func__, __LINE__, s, pcap_geterr(pcap));
3423 return -1;
3424 }
3425
3426 prm = rte_bpf_convert(&fcode);
3427 if (prm == NULL) {
3428 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
3429 __func__, __LINE__, s, rte_errno, strerror(rte_errno));
3430 goto error;
3431 }
3432
3433 bpf = rte_bpf_load(prm);
3434 if (bpf == NULL) {
3435 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3436 __func__, __LINE__, rte_errno, strerror(rte_errno));
3437 goto error;
3438 }
3439
3440 error:
3441 if (bpf)
3442 rte_bpf_destroy(bpf);
3443 else {
3444 printf("%s \"%s\"\n", __func__, s);
3445 test_bpf_dump(&fcode, prm);
3446 }
3447
3448 rte_free(prm);
3449 pcap_freecode(&fcode);
3450 return (bpf == NULL) ? -1 : 0;
3451 }
3452
3453 static int
test_bpf_convert(void)3454 test_bpf_convert(void)
3455 {
3456 unsigned int i;
3457 pcap_t *pcap;
3458 int rc;
3459
3460 pcap = pcap_open_dead(DLT_EN10MB, 262144);
3461 if (!pcap) {
3462 printf("pcap_open_dead failed\n");
3463 return -1;
3464 }
3465
3466 rc = test_bpf_filter_sanity(pcap);
3467 for (i = 0; i < RTE_DIM(sample_filters); i++)
3468 rc |= test_bpf_filter(pcap, sample_filters[i]);
3469
3470 pcap_close(pcap);
3471 return rc;
3472 }
3473
3474 #endif /* RTE_HAS_LIBPCAP */
3475
3476 REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);
3477