xref: /f-stack/dpdk/app/test/test_bpf.c (revision ebf5cedb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
16 #include <rte_bpf.h>
17 #include <rte_ether.h>
18 #include <rte_ip.h>
19 
20 #include "test.h"
21 
22 /*
23  * Basic functional tests for librte_bpf.
24  * The main procedure - load eBPF program, execute it and
25  * compare restuls with expected values.
26  */
27 
28 struct dummy_offset {
29 	uint64_t u64;
30 	uint32_t u32;
31 	uint16_t u16;
32 	uint8_t  u8;
33 };
34 
35 struct dummy_vect8 {
36 	struct dummy_offset in[8];
37 	struct dummy_offset out[8];
38 };
39 
40 struct dummy_net {
41 	struct rte_ether_hdr eth_hdr;
42 	struct rte_vlan_hdr vlan_hdr;
43 	struct rte_ipv4_hdr ip_hdr;
44 };
45 
46 #define	TEST_FILL_1	0xDEADBEEF
47 
48 #define	TEST_MUL_1	21
49 #define TEST_MUL_2	-100
50 
51 #define TEST_SHIFT_1	15
52 #define TEST_SHIFT_2	33
53 
54 #define TEST_JCC_1	0
55 #define TEST_JCC_2	-123
56 #define TEST_JCC_3	5678
57 #define TEST_JCC_4	TEST_FILL_1
58 
59 #define TEST_IMM_1	UINT64_MAX
60 #define TEST_IMM_2	((uint64_t)INT64_MIN)
61 #define TEST_IMM_3	((uint64_t)INT64_MAX + INT32_MAX)
62 #define TEST_IMM_4	((uint64_t)UINT32_MAX)
63 #define TEST_IMM_5	((uint64_t)UINT32_MAX + 1)
64 
65 #define TEST_MEMFROB	0x2a2a2a2a
66 
67 #define STRING_GEEK	0x6B656567
68 #define STRING_WEEK	0x6B656577
69 
70 #define TEST_NETMASK 0xffffff00
71 #define TEST_SUBNET  0xaca80200
72 
73 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
74 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
75 
76 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
77 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
78 
79 struct bpf_test {
80 	const char *name;
81 	size_t arg_sz;
82 	struct rte_bpf_prm prm;
83 	void (*prepare)(void *);
84 	int (*check_result)(uint64_t, const void *);
85 	uint32_t allow_fail;
86 };
87 
88 /*
89  * Compare return value and result data with expected ones.
90  * Report a failure if they don't match.
91  */
92 static int
93 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
94 	const void *exp_res, const void *ret_res, size_t res_sz)
95 {
96 	int32_t ret;
97 
98 	ret = 0;
99 	if (exp_rc != ret_rc) {
100 		printf("%s@%d: invalid return value, expected: 0x%" PRIx64
101 			",result: 0x%" PRIx64 "\n",
102 			func, __LINE__, exp_rc, ret_rc);
103 		ret |= -1;
104 	}
105 
106 	if (memcmp(exp_res, ret_res, res_sz) != 0) {
107 		printf("%s: invalid value\n", func);
108 		rte_memdump(stdout, "expected", exp_res, res_sz);
109 		rte_memdump(stdout, "result", ret_res, res_sz);
110 		ret |= -1;
111 	}
112 
113 	return ret;
114 }
115 
116 /* store immediate test-cases */
117 static const struct ebpf_insn test_store1_prog[] = {
118 	{
119 		.code = (BPF_ST | BPF_MEM | BPF_B),
120 		.dst_reg = EBPF_REG_1,
121 		.off = offsetof(struct dummy_offset, u8),
122 		.imm = TEST_FILL_1,
123 	},
124 	{
125 		.code = (BPF_ST | BPF_MEM | BPF_H),
126 		.dst_reg = EBPF_REG_1,
127 		.off = offsetof(struct dummy_offset, u16),
128 		.imm = TEST_FILL_1,
129 	},
130 	{
131 		.code = (BPF_ST | BPF_MEM | BPF_W),
132 		.dst_reg = EBPF_REG_1,
133 		.off = offsetof(struct dummy_offset, u32),
134 		.imm = TEST_FILL_1,
135 	},
136 	{
137 		.code = (BPF_ST | BPF_MEM | EBPF_DW),
138 		.dst_reg = EBPF_REG_1,
139 		.off = offsetof(struct dummy_offset, u64),
140 		.imm = TEST_FILL_1,
141 	},
142 	/* return 1 */
143 	{
144 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
145 		.dst_reg = EBPF_REG_0,
146 		.imm = 1,
147 	},
148 	{
149 		.code = (BPF_JMP | EBPF_EXIT),
150 	},
151 };
152 
153 static void
154 test_store1_prepare(void *arg)
155 {
156 	struct dummy_offset *df;
157 
158 	df = arg;
159 	memset(df, 0, sizeof(*df));
160 }
161 
162 static int
163 test_store1_check(uint64_t rc, const void *arg)
164 {
165 	const struct dummy_offset *dft;
166 	struct dummy_offset dfe;
167 
168 	dft = arg;
169 
170 	memset(&dfe, 0, sizeof(dfe));
171 	dfe.u64 = (int32_t)TEST_FILL_1;
172 	dfe.u32 = dfe.u64;
173 	dfe.u16 = dfe.u64;
174 	dfe.u8 = dfe.u64;
175 
176 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
177 }
178 
179 /* store register test-cases */
180 static const struct ebpf_insn test_store2_prog[] = {
181 
182 	{
183 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
184 		.dst_reg = EBPF_REG_2,
185 		.imm = TEST_FILL_1,
186 	},
187 	{
188 		.code = (BPF_STX | BPF_MEM | BPF_B),
189 		.dst_reg = EBPF_REG_1,
190 		.src_reg = EBPF_REG_2,
191 		.off = offsetof(struct dummy_offset, u8),
192 	},
193 	{
194 		.code = (BPF_STX | BPF_MEM | BPF_H),
195 		.dst_reg = EBPF_REG_1,
196 		.src_reg = EBPF_REG_2,
197 		.off = offsetof(struct dummy_offset, u16),
198 	},
199 	{
200 		.code = (BPF_STX | BPF_MEM | BPF_W),
201 		.dst_reg = EBPF_REG_1,
202 		.src_reg = EBPF_REG_2,
203 		.off = offsetof(struct dummy_offset, u32),
204 	},
205 	{
206 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
207 		.dst_reg = EBPF_REG_1,
208 		.src_reg = EBPF_REG_2,
209 		.off = offsetof(struct dummy_offset, u64),
210 	},
211 	/* return 1 */
212 	{
213 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
214 		.dst_reg = EBPF_REG_0,
215 		.imm = 1,
216 	},
217 	{
218 		.code = (BPF_JMP | EBPF_EXIT),
219 	},
220 };
221 
222 /* load test-cases */
223 static const struct ebpf_insn test_load1_prog[] = {
224 
225 	{
226 		.code = (BPF_LDX | BPF_MEM | BPF_B),
227 		.dst_reg = EBPF_REG_2,
228 		.src_reg = EBPF_REG_1,
229 		.off = offsetof(struct dummy_offset, u8),
230 	},
231 	{
232 		.code = (BPF_LDX | BPF_MEM | BPF_H),
233 		.dst_reg = EBPF_REG_3,
234 		.src_reg = EBPF_REG_1,
235 		.off = offsetof(struct dummy_offset, u16),
236 	},
237 	{
238 		.code = (BPF_LDX | BPF_MEM | BPF_W),
239 		.dst_reg = EBPF_REG_4,
240 		.src_reg = EBPF_REG_1,
241 		.off = offsetof(struct dummy_offset, u32),
242 	},
243 	{
244 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
245 		.dst_reg = EBPF_REG_0,
246 		.src_reg = EBPF_REG_1,
247 		.off = offsetof(struct dummy_offset, u64),
248 	},
249 	/* return sum */
250 	{
251 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
252 		.dst_reg = EBPF_REG_0,
253 		.src_reg = EBPF_REG_4,
254 	},
255 	{
256 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
257 		.dst_reg = EBPF_REG_0,
258 		.src_reg = EBPF_REG_3,
259 	},
260 	{
261 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
262 		.dst_reg = EBPF_REG_0,
263 		.src_reg = EBPF_REG_2,
264 	},
265 	{
266 		.code = (BPF_JMP | EBPF_EXIT),
267 	},
268 };
269 
270 static void
271 test_load1_prepare(void *arg)
272 {
273 	struct dummy_offset *df;
274 
275 	df = arg;
276 
277 	memset(df, 0, sizeof(*df));
278 	df->u64 = (int32_t)TEST_FILL_1;
279 	df->u32 = df->u64;
280 	df->u16 = df->u64;
281 	df->u8 = df->u64;
282 }
283 
284 static int
285 test_load1_check(uint64_t rc, const void *arg)
286 {
287 	uint64_t v;
288 	const struct dummy_offset *dft;
289 
290 	dft = arg;
291 	v = dft->u64;
292 	v += dft->u32;
293 	v += dft->u16;
294 	v += dft->u8;
295 
296 	return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
297 }
298 
299 /* load immediate test-cases */
300 static const struct ebpf_insn test_ldimm1_prog[] = {
301 
302 	{
303 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
304 		.dst_reg = EBPF_REG_0,
305 		.imm = (uint32_t)TEST_IMM_1,
306 	},
307 	{
308 		.imm = TEST_IMM_1 >> 32,
309 	},
310 	{
311 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
312 		.dst_reg = EBPF_REG_3,
313 		.imm = (uint32_t)TEST_IMM_2,
314 	},
315 	{
316 		.imm = TEST_IMM_2 >> 32,
317 	},
318 	{
319 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
320 		.dst_reg = EBPF_REG_5,
321 		.imm = (uint32_t)TEST_IMM_3,
322 	},
323 	{
324 		.imm = TEST_IMM_3 >> 32,
325 	},
326 	{
327 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
328 		.dst_reg = EBPF_REG_7,
329 		.imm = (uint32_t)TEST_IMM_4,
330 	},
331 	{
332 		.imm = TEST_IMM_4 >> 32,
333 	},
334 	{
335 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
336 		.dst_reg = EBPF_REG_9,
337 		.imm = (uint32_t)TEST_IMM_5,
338 	},
339 	{
340 		.imm = TEST_IMM_5 >> 32,
341 	},
342 	/* return sum */
343 	{
344 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
345 		.dst_reg = EBPF_REG_0,
346 		.src_reg = EBPF_REG_3,
347 	},
348 	{
349 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
350 		.dst_reg = EBPF_REG_0,
351 		.src_reg = EBPF_REG_5,
352 	},
353 	{
354 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
355 		.dst_reg = EBPF_REG_0,
356 		.src_reg = EBPF_REG_7,
357 	},
358 	{
359 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
360 		.dst_reg = EBPF_REG_0,
361 		.src_reg = EBPF_REG_9,
362 	},
363 	{
364 		.code = (BPF_JMP | EBPF_EXIT),
365 	},
366 };
367 
368 static int
369 test_ldimm1_check(uint64_t rc, const void *arg)
370 {
371 	uint64_t v1, v2;
372 
373 	v1 = TEST_IMM_1;
374 	v2 = TEST_IMM_2;
375 	v1 += v2;
376 	v2 = TEST_IMM_3;
377 	v1 += v2;
378 	v2 = TEST_IMM_4;
379 	v1 += v2;
380 	v2 = TEST_IMM_5;
381 	v1 += v2;
382 
383 	return cmp_res(__func__, v1, rc, arg, arg, 0);
384 }
385 
386 
387 /* alu mul test-cases */
388 static const struct ebpf_insn test_mul1_prog[] = {
389 
390 	{
391 		.code = (BPF_LDX | BPF_MEM | BPF_W),
392 		.dst_reg = EBPF_REG_2,
393 		.src_reg = EBPF_REG_1,
394 		.off = offsetof(struct dummy_vect8, in[0].u32),
395 	},
396 	{
397 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
398 		.dst_reg = EBPF_REG_3,
399 		.src_reg = EBPF_REG_1,
400 		.off = offsetof(struct dummy_vect8, in[1].u64),
401 	},
402 	{
403 		.code = (BPF_LDX | BPF_MEM | BPF_W),
404 		.dst_reg = EBPF_REG_4,
405 		.src_reg = EBPF_REG_1,
406 		.off = offsetof(struct dummy_vect8, in[2].u32),
407 	},
408 	{
409 		.code = (BPF_ALU | BPF_MUL | BPF_K),
410 		.dst_reg = EBPF_REG_2,
411 		.imm = TEST_MUL_1,
412 	},
413 	{
414 		.code = (EBPF_ALU64 | BPF_MUL | BPF_K),
415 		.dst_reg = EBPF_REG_3,
416 		.imm = TEST_MUL_2,
417 	},
418 	{
419 		.code = (BPF_ALU | BPF_MUL | BPF_X),
420 		.dst_reg = EBPF_REG_4,
421 		.src_reg = EBPF_REG_2,
422 	},
423 	{
424 		.code = (EBPF_ALU64 | BPF_MUL | BPF_X),
425 		.dst_reg = EBPF_REG_4,
426 		.src_reg = EBPF_REG_3,
427 	},
428 	{
429 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
430 		.dst_reg = EBPF_REG_1,
431 		.src_reg = EBPF_REG_2,
432 		.off = offsetof(struct dummy_vect8, out[0].u64),
433 	},
434 	{
435 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
436 		.dst_reg = EBPF_REG_1,
437 		.src_reg = EBPF_REG_3,
438 		.off = offsetof(struct dummy_vect8, out[1].u64),
439 	},
440 	{
441 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
442 		.dst_reg = EBPF_REG_1,
443 		.src_reg = EBPF_REG_4,
444 		.off = offsetof(struct dummy_vect8, out[2].u64),
445 	},
446 	/* return 1 */
447 	{
448 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
449 		.dst_reg = EBPF_REG_0,
450 		.imm = 1,
451 	},
452 	{
453 		.code = (BPF_JMP | EBPF_EXIT),
454 	},
455 };
456 
457 static void
458 test_mul1_prepare(void *arg)
459 {
460 	struct dummy_vect8 *dv;
461 	uint64_t v;
462 
463 	dv = arg;
464 
465 	v = rte_rand();
466 
467 	memset(dv, 0, sizeof(*dv));
468 	dv->in[0].u32 = v;
469 	dv->in[1].u64 = v << 12 | v >> 6;
470 	dv->in[2].u32 = -v;
471 }
472 
473 static int
474 test_mul1_check(uint64_t rc, const void *arg)
475 {
476 	uint64_t r2, r3, r4;
477 	const struct dummy_vect8 *dvt;
478 	struct dummy_vect8 dve;
479 
480 	dvt = arg;
481 	memset(&dve, 0, sizeof(dve));
482 
483 	r2 = dvt->in[0].u32;
484 	r3 = dvt->in[1].u64;
485 	r4 = dvt->in[2].u32;
486 
487 	r2 = (uint32_t)r2 * TEST_MUL_1;
488 	r3 *= TEST_MUL_2;
489 	r4 = (uint32_t)(r4 * r2);
490 	r4 *= r3;
491 
492 	dve.out[0].u64 = r2;
493 	dve.out[1].u64 = r3;
494 	dve.out[2].u64 = r4;
495 
496 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
497 }
498 
499 /* alu shift test-cases */
500 static const struct ebpf_insn test_shift1_prog[] = {
501 
502 	{
503 		.code = (BPF_LDX | BPF_MEM | BPF_W),
504 		.dst_reg = EBPF_REG_2,
505 		.src_reg = EBPF_REG_1,
506 		.off = offsetof(struct dummy_vect8, in[0].u32),
507 	},
508 	{
509 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
510 		.dst_reg = EBPF_REG_3,
511 		.src_reg = EBPF_REG_1,
512 		.off = offsetof(struct dummy_vect8, in[1].u64),
513 	},
514 	{
515 		.code = (BPF_LDX | BPF_MEM | BPF_W),
516 		.dst_reg = EBPF_REG_4,
517 		.src_reg = EBPF_REG_1,
518 		.off = offsetof(struct dummy_vect8, in[2].u32),
519 	},
520 	{
521 		.code = (BPF_ALU | BPF_LSH | BPF_K),
522 		.dst_reg = EBPF_REG_2,
523 		.imm = TEST_SHIFT_1,
524 	},
525 	{
526 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
527 		.dst_reg = EBPF_REG_3,
528 		.imm = TEST_SHIFT_2,
529 	},
530 	{
531 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
532 		.dst_reg = EBPF_REG_1,
533 		.src_reg = EBPF_REG_2,
534 		.off = offsetof(struct dummy_vect8, out[0].u64),
535 	},
536 	{
537 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
538 		.dst_reg = EBPF_REG_1,
539 		.src_reg = EBPF_REG_3,
540 		.off = offsetof(struct dummy_vect8, out[1].u64),
541 	},
542 	{
543 		.code = (BPF_ALU | BPF_RSH | BPF_X),
544 		.dst_reg = EBPF_REG_2,
545 		.src_reg = EBPF_REG_4,
546 	},
547 	{
548 		.code = (EBPF_ALU64 | BPF_LSH | BPF_X),
549 		.dst_reg = EBPF_REG_3,
550 		.src_reg = EBPF_REG_4,
551 	},
552 	{
553 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
554 		.dst_reg = EBPF_REG_1,
555 		.src_reg = EBPF_REG_2,
556 		.off = offsetof(struct dummy_vect8, out[2].u64),
557 	},
558 	{
559 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
560 		.dst_reg = EBPF_REG_1,
561 		.src_reg = EBPF_REG_3,
562 		.off = offsetof(struct dummy_vect8, out[3].u64),
563 	},
564 	{
565 		.code = (BPF_LDX | BPF_MEM | BPF_W),
566 		.dst_reg = EBPF_REG_2,
567 		.src_reg = EBPF_REG_1,
568 		.off = offsetof(struct dummy_vect8, in[0].u32),
569 	},
570 	{
571 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
572 		.dst_reg = EBPF_REG_3,
573 		.src_reg = EBPF_REG_1,
574 		.off = offsetof(struct dummy_vect8, in[1].u64),
575 	},
576 	{
577 		.code = (BPF_LDX | BPF_MEM | BPF_W),
578 		.dst_reg = EBPF_REG_4,
579 		.src_reg = EBPF_REG_1,
580 		.off = offsetof(struct dummy_vect8, in[2].u32),
581 	},
582 	{
583 		.code = (BPF_ALU | BPF_AND | BPF_K),
584 		.dst_reg = EBPF_REG_2,
585 		.imm = sizeof(uint64_t) * CHAR_BIT - 1,
586 	},
587 	{
588 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
589 		.dst_reg = EBPF_REG_3,
590 		.src_reg = EBPF_REG_2,
591 	},
592 	{
593 		.code = (BPF_ALU | BPF_AND | BPF_K),
594 		.dst_reg = EBPF_REG_2,
595 		.imm = sizeof(uint32_t) * CHAR_BIT - 1,
596 	},
597 	{
598 		.code = (BPF_ALU | BPF_LSH | BPF_X),
599 		.dst_reg = EBPF_REG_4,
600 		.src_reg = EBPF_REG_2,
601 	},
602 	{
603 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
604 		.dst_reg = EBPF_REG_1,
605 		.src_reg = EBPF_REG_4,
606 		.off = offsetof(struct dummy_vect8, out[4].u64),
607 	},
608 	{
609 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
610 		.dst_reg = EBPF_REG_1,
611 		.src_reg = EBPF_REG_3,
612 		.off = offsetof(struct dummy_vect8, out[5].u64),
613 	},
614 	/* return 1 */
615 	{
616 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
617 		.dst_reg = EBPF_REG_0,
618 		.imm = 1,
619 	},
620 	{
621 		.code = (BPF_JMP | EBPF_EXIT),
622 	},
623 };
624 
625 static void
626 test_shift1_prepare(void *arg)
627 {
628 	struct dummy_vect8 *dv;
629 	uint64_t v;
630 
631 	dv = arg;
632 
633 	v = rte_rand();
634 
635 	memset(dv, 0, sizeof(*dv));
636 	dv->in[0].u32 = v;
637 	dv->in[1].u64 = v << 12 | v >> 6;
638 	dv->in[2].u32 = (-v ^ 5);
639 }
640 
641 static int
642 test_shift1_check(uint64_t rc, const void *arg)
643 {
644 	uint64_t r2, r3, r4;
645 	const struct dummy_vect8 *dvt;
646 	struct dummy_vect8 dve;
647 
648 	dvt = arg;
649 	memset(&dve, 0, sizeof(dve));
650 
651 	r2 = dvt->in[0].u32;
652 	r3 = dvt->in[1].u64;
653 	r4 = dvt->in[2].u32;
654 
655 	r2 = (uint32_t)r2 << TEST_SHIFT_1;
656 	r3 = (int64_t)r3 >> TEST_SHIFT_2;
657 
658 	dve.out[0].u64 = r2;
659 	dve.out[1].u64 = r3;
660 
661 	r2 = (uint32_t)r2 >> r4;
662 	r3 <<= r4;
663 
664 	dve.out[2].u64 = r2;
665 	dve.out[3].u64 = r3;
666 
667 	r2 = dvt->in[0].u32;
668 	r3 = dvt->in[1].u64;
669 	r4 = dvt->in[2].u32;
670 
671 	r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
672 	r3 = (int64_t)r3 >> r2;
673 	r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
674 	r4 = (uint32_t)r4 << r2;
675 
676 	dve.out[4].u64 = r4;
677 	dve.out[5].u64 = r3;
678 
679 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
680 }
681 
682 /* jmp test-cases */
683 static const struct ebpf_insn test_jump1_prog[] = {
684 
685 	[0] = {
686 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
687 		.dst_reg = EBPF_REG_0,
688 		.imm = 0,
689 	},
690 	[1] = {
691 		.code = (BPF_LDX | BPF_MEM | BPF_W),
692 		.dst_reg = EBPF_REG_2,
693 		.src_reg = EBPF_REG_1,
694 		.off = offsetof(struct dummy_vect8, in[0].u32),
695 	},
696 	[2] = {
697 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
698 		.dst_reg = EBPF_REG_3,
699 		.src_reg = EBPF_REG_1,
700 		.off = offsetof(struct dummy_vect8, in[0].u64),
701 	},
702 	[3] = {
703 		.code = (BPF_LDX | BPF_MEM | BPF_W),
704 		.dst_reg = EBPF_REG_4,
705 		.src_reg = EBPF_REG_1,
706 		.off = offsetof(struct dummy_vect8, in[1].u32),
707 	},
708 	[4] = {
709 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
710 		.dst_reg = EBPF_REG_5,
711 		.src_reg = EBPF_REG_1,
712 		.off = offsetof(struct dummy_vect8, in[1].u64),
713 	},
714 	[5] = {
715 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
716 		.dst_reg = EBPF_REG_2,
717 		.imm = TEST_JCC_1,
718 		.off = 8,
719 	},
720 	[6] = {
721 		.code = (BPF_JMP | EBPF_JSLE | BPF_K),
722 		.dst_reg = EBPF_REG_3,
723 		.imm = TEST_JCC_2,
724 		.off = 9,
725 	},
726 	[7] = {
727 		.code = (BPF_JMP | BPF_JGT | BPF_K),
728 		.dst_reg = EBPF_REG_4,
729 		.imm = TEST_JCC_3,
730 		.off = 10,
731 	},
732 	[8] = {
733 		.code = (BPF_JMP | BPF_JSET | BPF_K),
734 		.dst_reg = EBPF_REG_5,
735 		.imm = TEST_JCC_4,
736 		.off = 11,
737 	},
738 	[9] = {
739 		.code = (BPF_JMP | EBPF_JNE | BPF_X),
740 		.dst_reg = EBPF_REG_2,
741 		.src_reg = EBPF_REG_3,
742 		.off = 12,
743 	},
744 	[10] = {
745 		.code = (BPF_JMP | EBPF_JSGT | BPF_X),
746 		.dst_reg = EBPF_REG_2,
747 		.src_reg = EBPF_REG_4,
748 		.off = 13,
749 	},
750 	[11] = {
751 		.code = (BPF_JMP | EBPF_JLE | BPF_X),
752 		.dst_reg = EBPF_REG_2,
753 		.src_reg = EBPF_REG_5,
754 		.off = 14,
755 	},
756 	[12] = {
757 		.code = (BPF_JMP | BPF_JSET | BPF_X),
758 		.dst_reg = EBPF_REG_3,
759 		.src_reg = EBPF_REG_5,
760 		.off = 15,
761 	},
762 	[13] = {
763 		.code = (BPF_JMP | EBPF_EXIT),
764 	},
765 	[14] = {
766 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
767 		.dst_reg = EBPF_REG_0,
768 		.imm = 0x1,
769 	},
770 	[15] = {
771 		.code = (BPF_JMP | BPF_JA),
772 		.off = -10,
773 	},
774 	[16] = {
775 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
776 		.dst_reg = EBPF_REG_0,
777 		.imm = 0x2,
778 	},
779 	[17] = {
780 		.code = (BPF_JMP | BPF_JA),
781 		.off = -11,
782 	},
783 	[18] = {
784 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
785 		.dst_reg = EBPF_REG_0,
786 		.imm = 0x4,
787 	},
788 	[19] = {
789 		.code = (BPF_JMP | BPF_JA),
790 		.off = -12,
791 	},
792 	[20] = {
793 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
794 		.dst_reg = EBPF_REG_0,
795 		.imm = 0x8,
796 	},
797 	[21] = {
798 		.code = (BPF_JMP | BPF_JA),
799 		.off = -13,
800 	},
801 	[22] = {
802 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
803 		.dst_reg = EBPF_REG_0,
804 		.imm = 0x10,
805 	},
806 	[23] = {
807 		.code = (BPF_JMP | BPF_JA),
808 		.off = -14,
809 	},
810 	[24] = {
811 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
812 		.dst_reg = EBPF_REG_0,
813 		.imm = 0x20,
814 	},
815 	[25] = {
816 		.code = (BPF_JMP | BPF_JA),
817 		.off = -15,
818 	},
819 	[26] = {
820 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
821 		.dst_reg = EBPF_REG_0,
822 		.imm = 0x40,
823 	},
824 	[27] = {
825 		.code = (BPF_JMP | BPF_JA),
826 		.off = -16,
827 	},
828 	[28] = {
829 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
830 		.dst_reg = EBPF_REG_0,
831 		.imm = 0x80,
832 	},
833 	[29] = {
834 		.code = (BPF_JMP | BPF_JA),
835 		.off = -17,
836 	},
837 };
838 
839 static void
840 test_jump1_prepare(void *arg)
841 {
842 	struct dummy_vect8 *dv;
843 	uint64_t v1, v2;
844 
845 	dv = arg;
846 
847 	v1 = rte_rand();
848 	v2 = rte_rand();
849 
850 	memset(dv, 0, sizeof(*dv));
851 	dv->in[0].u64 = v1;
852 	dv->in[1].u64 = v2;
853 	dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
854 	dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
855 }
856 
857 static int
858 test_jump1_check(uint64_t rc, const void *arg)
859 {
860 	uint64_t r2, r3, r4, r5, rv;
861 	const struct dummy_vect8 *dvt;
862 
863 	dvt = arg;
864 
865 	rv = 0;
866 	r2 = dvt->in[0].u32;
867 	r3 = dvt->in[0].u64;
868 	r4 = dvt->in[1].u32;
869 	r5 = dvt->in[1].u64;
870 
871 	if (r2 == TEST_JCC_1)
872 		rv |= 0x1;
873 	if ((int64_t)r3 <= TEST_JCC_2)
874 		rv |= 0x2;
875 	if (r4 > TEST_JCC_3)
876 		rv |= 0x4;
877 	if (r5 & TEST_JCC_4)
878 		rv |= 0x8;
879 	if (r2 != r3)
880 		rv |= 0x10;
881 	if ((int64_t)r2 > (int64_t)r4)
882 		rv |= 0x20;
883 	if (r2 <= r5)
884 		rv |= 0x40;
885 	if (r3 & r5)
886 		rv |= 0x80;
887 
888 	return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
889 }
890 
891 /* Jump test case - check ip4_dest in particular subnet */
892 static const struct ebpf_insn test_jump2_prog[] = {
893 
894 	[0] = {
895 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
896 		.dst_reg = EBPF_REG_2,
897 		.imm = 0xe,
898 	},
899 	[1] = {
900 		.code = (BPF_LDX | BPF_MEM | BPF_H),
901 		.dst_reg = EBPF_REG_3,
902 		.src_reg = EBPF_REG_1,
903 		.off = 12,
904 	},
905 	[2] = {
906 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
907 		.dst_reg = EBPF_REG_3,
908 		.off = 2,
909 		.imm = 0x81,
910 	},
911 	[3] = {
912 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
913 		.dst_reg = EBPF_REG_2,
914 		.imm = 0x12,
915 	},
916 	[4] = {
917 		.code = (BPF_LDX | BPF_MEM | BPF_H),
918 		.dst_reg = EBPF_REG_3,
919 		.src_reg = EBPF_REG_1,
920 		.off = 16,
921 	},
922 	[5] = {
923 		.code = (EBPF_ALU64 | BPF_AND | BPF_K),
924 		.dst_reg = EBPF_REG_3,
925 		.imm = 0xffff,
926 	},
927 	[6] = {
928 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
929 		.dst_reg = EBPF_REG_3,
930 		.off = 9,
931 		.imm = 0x8,
932 	},
933 	[7] = {
934 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
935 		.dst_reg = EBPF_REG_1,
936 		.src_reg = EBPF_REG_2,
937 	},
938 	[8] = {
939 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
940 		.dst_reg = EBPF_REG_0,
941 		.imm = 0,
942 	},
943 	[9] = {
944 		.code = (BPF_LDX | BPF_MEM | BPF_W),
945 		.dst_reg = EBPF_REG_1,
946 		.src_reg = EBPF_REG_1,
947 		.off = 16,
948 	},
949 	[10] = {
950 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
951 		.dst_reg = EBPF_REG_3,
952 		.imm = TEST_NETMASK,
953 	},
954 	[11] = {
955 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
956 		.dst_reg = EBPF_REG_3,
957 		.imm = sizeof(uint32_t) * CHAR_BIT,
958 	},
959 	[12] = {
960 		.code = (BPF_ALU | BPF_AND | BPF_X),
961 		.dst_reg = EBPF_REG_1,
962 		.src_reg = EBPF_REG_3,
963 	},
964 	[13] = {
965 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
966 		.dst_reg = EBPF_REG_3,
967 		.imm = TEST_SUBNET,
968 	},
969 	[14] = {
970 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
971 		.dst_reg = EBPF_REG_3,
972 		.imm = sizeof(uint32_t) * CHAR_BIT,
973 	},
974 	[15] = {
975 		.code = (BPF_JMP | BPF_JEQ | BPF_X),
976 		.dst_reg = EBPF_REG_1,
977 		.src_reg = EBPF_REG_3,
978 		.off = 1,
979 	},
980 	[16] = {
981 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
982 		.dst_reg = EBPF_REG_0,
983 		.imm = -1,
984 	},
985 	[17] = {
986 		.code = (BPF_JMP | EBPF_EXIT),
987 	},
988 };
989 
990 /* Preparing a vlan packet */
991 static void
992 test_jump2_prepare(void *arg)
993 {
994 	struct dummy_net *dn;
995 
996 	dn = arg;
997 	memset(dn, 0, sizeof(*dn));
998 
999 	/*
1000 	 * Initialize ether header.
1001 	 */
1002 	rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1003 			    &dn->eth_hdr.d_addr);
1004 	rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1005 			    &dn->eth_hdr.s_addr);
1006 	dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1007 
1008 	/*
1009 	 * Initialize vlan header.
1010 	 */
1011 	dn->vlan_hdr.eth_proto =  rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1012 	dn->vlan_hdr.vlan_tci = 32;
1013 
1014 	/*
1015 	 * Initialize IP header.
1016 	 */
1017 	dn->ip_hdr.version_ihl   = 0x45;    /*IP_VERSION | IP_HDRLEN*/
1018 	dn->ip_hdr.time_to_live   = 64;   /* IP_DEFTTL */
1019 	dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1020 	dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1021 	dn->ip_hdr.total_length   = rte_cpu_to_be_16(60);
1022 	dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1023 	dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1024 }
1025 
1026 static int
1027 test_jump2_check(uint64_t rc, const void *arg)
1028 {
1029 	const struct rte_ether_hdr *eth_hdr = arg;
1030 	const struct rte_ipv4_hdr *ipv4_hdr;
1031 	const void *next = eth_hdr;
1032 	uint16_t eth_type;
1033 	uint64_t v = -1;
1034 
1035 	if (eth_hdr->ether_type == htons(0x8100)) {
1036 		const struct rte_vlan_hdr *vlan_hdr =
1037 			(const void *)(eth_hdr + 1);
1038 		eth_type = vlan_hdr->eth_proto;
1039 		next = vlan_hdr + 1;
1040 	} else {
1041 		eth_type = eth_hdr->ether_type;
1042 		next = eth_hdr + 1;
1043 	}
1044 
1045 	if (eth_type == htons(0x0800)) {
1046 		ipv4_hdr = next;
1047 		if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1048 		    rte_cpu_to_be_32(TEST_SUBNET)) {
1049 			v = 0;
1050 		}
1051 	}
1052 
1053 	return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1054 }
1055 
1056 /* alu (add, sub, and, or, xor, neg)  test-cases */
1057 static const struct ebpf_insn test_alu1_prog[] = {
1058 
1059 	{
1060 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1061 		.dst_reg = EBPF_REG_2,
1062 		.src_reg = EBPF_REG_1,
1063 		.off = offsetof(struct dummy_vect8, in[0].u32),
1064 	},
1065 	{
1066 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1067 		.dst_reg = EBPF_REG_3,
1068 		.src_reg = EBPF_REG_1,
1069 		.off = offsetof(struct dummy_vect8, in[0].u64),
1070 	},
1071 	{
1072 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1073 		.dst_reg = EBPF_REG_4,
1074 		.src_reg = EBPF_REG_1,
1075 		.off = offsetof(struct dummy_vect8, in[1].u32),
1076 	},
1077 	{
1078 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1079 		.dst_reg = EBPF_REG_5,
1080 		.src_reg = EBPF_REG_1,
1081 		.off = offsetof(struct dummy_vect8, in[1].u64),
1082 	},
1083 	{
1084 		.code = (BPF_ALU | BPF_AND | BPF_K),
1085 		.dst_reg = EBPF_REG_2,
1086 		.imm = TEST_FILL_1,
1087 	},
1088 	{
1089 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1090 		.dst_reg = EBPF_REG_3,
1091 		.imm = TEST_FILL_1,
1092 	},
1093 	{
1094 		.code = (BPF_ALU | BPF_XOR | BPF_K),
1095 		.dst_reg = EBPF_REG_4,
1096 		.imm = TEST_FILL_1,
1097 	},
1098 	{
1099 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1100 		.dst_reg = EBPF_REG_5,
1101 		.imm = TEST_FILL_1,
1102 	},
1103 	{
1104 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1105 		.dst_reg = EBPF_REG_1,
1106 		.src_reg = EBPF_REG_2,
1107 		.off = offsetof(struct dummy_vect8, out[0].u64),
1108 	},
1109 	{
1110 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1111 		.dst_reg = EBPF_REG_1,
1112 		.src_reg = EBPF_REG_3,
1113 		.off = offsetof(struct dummy_vect8, out[1].u64),
1114 	},
1115 	{
1116 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1117 		.dst_reg = EBPF_REG_1,
1118 		.src_reg = EBPF_REG_4,
1119 		.off = offsetof(struct dummy_vect8, out[2].u64),
1120 	},
1121 	{
1122 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1123 		.dst_reg = EBPF_REG_1,
1124 		.src_reg = EBPF_REG_5,
1125 		.off = offsetof(struct dummy_vect8, out[3].u64),
1126 	},
1127 	{
1128 		.code = (BPF_ALU | BPF_OR | BPF_X),
1129 		.dst_reg = EBPF_REG_2,
1130 		.src_reg = EBPF_REG_3,
1131 	},
1132 	{
1133 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1134 		.dst_reg = EBPF_REG_3,
1135 		.src_reg = EBPF_REG_4,
1136 	},
1137 	{
1138 		.code = (BPF_ALU | BPF_SUB | BPF_X),
1139 		.dst_reg = EBPF_REG_4,
1140 		.src_reg = EBPF_REG_5,
1141 	},
1142 	{
1143 		.code = (EBPF_ALU64 | BPF_AND | BPF_X),
1144 		.dst_reg = EBPF_REG_5,
1145 		.src_reg = EBPF_REG_2,
1146 	},
1147 	{
1148 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1149 		.dst_reg = EBPF_REG_1,
1150 		.src_reg = EBPF_REG_2,
1151 		.off = offsetof(struct dummy_vect8, out[4].u64),
1152 	},
1153 	{
1154 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1155 		.dst_reg = EBPF_REG_1,
1156 		.src_reg = EBPF_REG_3,
1157 		.off = offsetof(struct dummy_vect8, out[5].u64),
1158 	},
1159 	{
1160 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1161 		.dst_reg = EBPF_REG_1,
1162 		.src_reg = EBPF_REG_4,
1163 		.off = offsetof(struct dummy_vect8, out[6].u64),
1164 	},
1165 	{
1166 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1167 		.dst_reg = EBPF_REG_1,
1168 		.src_reg = EBPF_REG_5,
1169 		.off = offsetof(struct dummy_vect8, out[7].u64),
1170 	},
1171 	/* return (-r2 + (-r3)) */
1172 	{
1173 		.code = (BPF_ALU | BPF_NEG),
1174 		.dst_reg = EBPF_REG_2,
1175 	},
1176 	{
1177 		.code = (EBPF_ALU64 | BPF_NEG),
1178 		.dst_reg = EBPF_REG_3,
1179 	},
1180 	{
1181 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1182 		.dst_reg = EBPF_REG_2,
1183 		.src_reg = EBPF_REG_3,
1184 	},
1185 	{
1186 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1187 		.dst_reg = EBPF_REG_0,
1188 		.src_reg = EBPF_REG_2,
1189 	},
1190 	{
1191 		.code = (BPF_JMP | EBPF_EXIT),
1192 	},
1193 };
1194 
1195 static int
1196 test_alu1_check(uint64_t rc, const void *arg)
1197 {
1198 	uint64_t r2, r3, r4, r5, rv;
1199 	const struct dummy_vect8 *dvt;
1200 	struct dummy_vect8 dve;
1201 
1202 	dvt = arg;
1203 	memset(&dve, 0, sizeof(dve));
1204 
1205 	r2 = dvt->in[0].u32;
1206 	r3 = dvt->in[0].u64;
1207 	r4 = dvt->in[1].u32;
1208 	r5 = dvt->in[1].u64;
1209 
1210 	r2 = (uint32_t)r2 & TEST_FILL_1;
1211 	r3 |= (int32_t) TEST_FILL_1;
1212 	r4 = (uint32_t)r4 ^ TEST_FILL_1;
1213 	r5 += (int32_t)TEST_FILL_1;
1214 
1215 	dve.out[0].u64 = r2;
1216 	dve.out[1].u64 = r3;
1217 	dve.out[2].u64 = r4;
1218 	dve.out[3].u64 = r5;
1219 
1220 	r2 = (uint32_t)r2 | (uint32_t)r3;
1221 	r3 ^= r4;
1222 	r4 = (uint32_t)r4 - (uint32_t)r5;
1223 	r5 &= r2;
1224 
1225 	dve.out[4].u64 = r2;
1226 	dve.out[5].u64 = r3;
1227 	dve.out[6].u64 = r4;
1228 	dve.out[7].u64 = r5;
1229 
1230 	r2 = -(int32_t)r2;
1231 	rv = (uint32_t)r2;
1232 	r3 = -r3;
1233 	rv += r3;
1234 
1235 	return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1236 }
1237 
1238 /* endianness conversions (BE->LE/LE->BE)  test-cases */
1239 static const struct ebpf_insn test_bele1_prog[] = {
1240 
1241 	{
1242 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1243 		.dst_reg = EBPF_REG_2,
1244 		.src_reg = EBPF_REG_1,
1245 		.off = offsetof(struct dummy_vect8, in[0].u16),
1246 	},
1247 	{
1248 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1249 		.dst_reg = EBPF_REG_3,
1250 		.src_reg = EBPF_REG_1,
1251 		.off = offsetof(struct dummy_vect8, in[0].u32),
1252 	},
1253 	{
1254 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1255 		.dst_reg = EBPF_REG_4,
1256 		.src_reg = EBPF_REG_1,
1257 		.off = offsetof(struct dummy_vect8, in[0].u64),
1258 	},
1259 	{
1260 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1261 		.dst_reg = EBPF_REG_2,
1262 		.imm = sizeof(uint16_t) * CHAR_BIT,
1263 	},
1264 	{
1265 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1266 		.dst_reg = EBPF_REG_3,
1267 		.imm = sizeof(uint32_t) * CHAR_BIT,
1268 	},
1269 	{
1270 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1271 		.dst_reg = EBPF_REG_4,
1272 		.imm = sizeof(uint64_t) * CHAR_BIT,
1273 	},
1274 	{
1275 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1276 		.dst_reg = EBPF_REG_1,
1277 		.src_reg = EBPF_REG_2,
1278 		.off = offsetof(struct dummy_vect8, out[0].u64),
1279 	},
1280 	{
1281 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1282 		.dst_reg = EBPF_REG_1,
1283 		.src_reg = EBPF_REG_3,
1284 		.off = offsetof(struct dummy_vect8, out[1].u64),
1285 	},
1286 	{
1287 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1288 		.dst_reg = EBPF_REG_1,
1289 		.src_reg = EBPF_REG_4,
1290 		.off = offsetof(struct dummy_vect8, out[2].u64),
1291 	},
1292 	{
1293 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1294 		.dst_reg = EBPF_REG_2,
1295 		.src_reg = EBPF_REG_1,
1296 		.off = offsetof(struct dummy_vect8, in[0].u16),
1297 	},
1298 	{
1299 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1300 		.dst_reg = EBPF_REG_3,
1301 		.src_reg = EBPF_REG_1,
1302 		.off = offsetof(struct dummy_vect8, in[0].u32),
1303 	},
1304 	{
1305 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1306 		.dst_reg = EBPF_REG_4,
1307 		.src_reg = EBPF_REG_1,
1308 		.off = offsetof(struct dummy_vect8, in[0].u64),
1309 	},
1310 	{
1311 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1312 		.dst_reg = EBPF_REG_2,
1313 		.imm = sizeof(uint16_t) * CHAR_BIT,
1314 	},
1315 	{
1316 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1317 		.dst_reg = EBPF_REG_3,
1318 		.imm = sizeof(uint32_t) * CHAR_BIT,
1319 	},
1320 	{
1321 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1322 		.dst_reg = EBPF_REG_4,
1323 		.imm = sizeof(uint64_t) * CHAR_BIT,
1324 	},
1325 	{
1326 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1327 		.dst_reg = EBPF_REG_1,
1328 		.src_reg = EBPF_REG_2,
1329 		.off = offsetof(struct dummy_vect8, out[3].u64),
1330 	},
1331 	{
1332 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1333 		.dst_reg = EBPF_REG_1,
1334 		.src_reg = EBPF_REG_3,
1335 		.off = offsetof(struct dummy_vect8, out[4].u64),
1336 	},
1337 	{
1338 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1339 		.dst_reg = EBPF_REG_1,
1340 		.src_reg = EBPF_REG_4,
1341 		.off = offsetof(struct dummy_vect8, out[5].u64),
1342 	},
1343 	/* return 1 */
1344 	{
1345 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1346 		.dst_reg = EBPF_REG_0,
1347 		.imm = 1,
1348 	},
1349 	{
1350 		.code = (BPF_JMP | EBPF_EXIT),
1351 	},
1352 };
1353 
1354 static void
1355 test_bele1_prepare(void *arg)
1356 {
1357 	struct dummy_vect8 *dv;
1358 
1359 	dv = arg;
1360 
1361 	memset(dv, 0, sizeof(*dv));
1362 	dv->in[0].u64 = rte_rand();
1363 	dv->in[0].u32 = dv->in[0].u64;
1364 	dv->in[0].u16 = dv->in[0].u64;
1365 }
1366 
1367 static int
1368 test_bele1_check(uint64_t rc, const void *arg)
1369 {
1370 	uint64_t r2, r3, r4;
1371 	const struct dummy_vect8 *dvt;
1372 	struct dummy_vect8 dve;
1373 
1374 	dvt = arg;
1375 	memset(&dve, 0, sizeof(dve));
1376 
1377 	r2 = dvt->in[0].u16;
1378 	r3 = dvt->in[0].u32;
1379 	r4 = dvt->in[0].u64;
1380 
1381 	r2 =  rte_cpu_to_be_16(r2);
1382 	r3 =  rte_cpu_to_be_32(r3);
1383 	r4 =  rte_cpu_to_be_64(r4);
1384 
1385 	dve.out[0].u64 = r2;
1386 	dve.out[1].u64 = r3;
1387 	dve.out[2].u64 = r4;
1388 
1389 	r2 = dvt->in[0].u16;
1390 	r3 = dvt->in[0].u32;
1391 	r4 = dvt->in[0].u64;
1392 
1393 	r2 =  rte_cpu_to_le_16(r2);
1394 	r3 =  rte_cpu_to_le_32(r3);
1395 	r4 =  rte_cpu_to_le_64(r4);
1396 
1397 	dve.out[3].u64 = r2;
1398 	dve.out[4].u64 = r3;
1399 	dve.out[5].u64 = r4;
1400 
1401 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1402 }
1403 
1404 /* atomic add test-cases */
1405 static const struct ebpf_insn test_xadd1_prog[] = {
1406 
1407 	{
1408 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1409 		.dst_reg = EBPF_REG_2,
1410 		.imm = 1,
1411 	},
1412 	{
1413 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1414 		.dst_reg = EBPF_REG_1,
1415 		.src_reg = EBPF_REG_2,
1416 		.off = offsetof(struct dummy_offset, u32),
1417 	},
1418 	{
1419 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1420 		.dst_reg = EBPF_REG_1,
1421 		.src_reg = EBPF_REG_2,
1422 		.off = offsetof(struct dummy_offset, u64),
1423 	},
1424 	{
1425 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1426 		.dst_reg = EBPF_REG_3,
1427 		.imm = -1,
1428 	},
1429 	{
1430 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1431 		.dst_reg = EBPF_REG_1,
1432 		.src_reg = EBPF_REG_3,
1433 		.off = offsetof(struct dummy_offset, u32),
1434 	},
1435 	{
1436 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1437 		.dst_reg = EBPF_REG_1,
1438 		.src_reg = EBPF_REG_3,
1439 		.off = offsetof(struct dummy_offset, u64),
1440 	},
1441 	{
1442 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1443 		.dst_reg = EBPF_REG_4,
1444 		.imm = TEST_FILL_1,
1445 	},
1446 	{
1447 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1448 		.dst_reg = EBPF_REG_1,
1449 		.src_reg = EBPF_REG_4,
1450 		.off = offsetof(struct dummy_offset, u32),
1451 	},
1452 	{
1453 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1454 		.dst_reg = EBPF_REG_1,
1455 		.src_reg = EBPF_REG_4,
1456 		.off = offsetof(struct dummy_offset, u64),
1457 	},
1458 	{
1459 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1460 		.dst_reg = EBPF_REG_5,
1461 		.imm = TEST_MUL_1,
1462 	},
1463 	{
1464 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1465 		.dst_reg = EBPF_REG_1,
1466 		.src_reg = EBPF_REG_5,
1467 		.off = offsetof(struct dummy_offset, u32),
1468 	},
1469 	{
1470 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1471 		.dst_reg = EBPF_REG_1,
1472 		.src_reg = EBPF_REG_5,
1473 		.off = offsetof(struct dummy_offset, u64),
1474 	},
1475 	{
1476 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1477 		.dst_reg = EBPF_REG_6,
1478 		.imm = TEST_MUL_2,
1479 	},
1480 	{
1481 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1482 		.dst_reg = EBPF_REG_1,
1483 		.src_reg = EBPF_REG_6,
1484 		.off = offsetof(struct dummy_offset, u32),
1485 	},
1486 	{
1487 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1488 		.dst_reg = EBPF_REG_1,
1489 		.src_reg = EBPF_REG_6,
1490 		.off = offsetof(struct dummy_offset, u64),
1491 	},
1492 	{
1493 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1494 		.dst_reg = EBPF_REG_7,
1495 		.imm = TEST_JCC_2,
1496 	},
1497 	{
1498 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1499 		.dst_reg = EBPF_REG_1,
1500 		.src_reg = EBPF_REG_7,
1501 		.off = offsetof(struct dummy_offset, u32),
1502 	},
1503 	{
1504 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1505 		.dst_reg = EBPF_REG_1,
1506 		.src_reg = EBPF_REG_7,
1507 		.off = offsetof(struct dummy_offset, u64),
1508 	},
1509 	{
1510 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1511 		.dst_reg = EBPF_REG_8,
1512 		.imm = TEST_JCC_3,
1513 	},
1514 	{
1515 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1516 		.dst_reg = EBPF_REG_1,
1517 		.src_reg = EBPF_REG_8,
1518 		.off = offsetof(struct dummy_offset, u32),
1519 	},
1520 	{
1521 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1522 		.dst_reg = EBPF_REG_1,
1523 		.src_reg = EBPF_REG_8,
1524 		.off = offsetof(struct dummy_offset, u64),
1525 	},
1526 	/* return 1 */
1527 	{
1528 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1529 		.dst_reg = EBPF_REG_0,
1530 		.imm = 1,
1531 	},
1532 	{
1533 		.code = (BPF_JMP | EBPF_EXIT),
1534 	},
1535 };
1536 
1537 static int
1538 test_xadd1_check(uint64_t rc, const void *arg)
1539 {
1540 	uint64_t rv;
1541 	const struct dummy_offset *dft;
1542 	struct dummy_offset dfe;
1543 
1544 	dft = arg;
1545 	memset(&dfe, 0, sizeof(dfe));
1546 
1547 	rv = 1;
1548 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1549 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1550 
1551 	rv = -1;
1552 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1553 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1554 
1555 	rv = (int32_t)TEST_FILL_1;
1556 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1557 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1558 
1559 	rv = TEST_MUL_1;
1560 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1561 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1562 
1563 	rv = TEST_MUL_2;
1564 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1565 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1566 
1567 	rv = TEST_JCC_2;
1568 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1569 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1570 
1571 	rv = TEST_JCC_3;
1572 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1573 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1574 
1575 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1576 }
1577 
1578 /* alu div test-cases */
1579 static const struct ebpf_insn test_div1_prog[] = {
1580 
1581 	{
1582 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1583 		.dst_reg = EBPF_REG_2,
1584 		.src_reg = EBPF_REG_1,
1585 		.off = offsetof(struct dummy_vect8, in[0].u32),
1586 	},
1587 	{
1588 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1589 		.dst_reg = EBPF_REG_3,
1590 		.src_reg = EBPF_REG_1,
1591 		.off = offsetof(struct dummy_vect8, in[1].u64),
1592 	},
1593 	{
1594 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1595 		.dst_reg = EBPF_REG_4,
1596 		.src_reg = EBPF_REG_1,
1597 		.off = offsetof(struct dummy_vect8, in[2].u32),
1598 	},
1599 	{
1600 		.code = (BPF_ALU | BPF_DIV | BPF_K),
1601 		.dst_reg = EBPF_REG_2,
1602 		.imm = TEST_MUL_1,
1603 	},
1604 	{
1605 		.code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1606 		.dst_reg = EBPF_REG_3,
1607 		.imm = TEST_MUL_2,
1608 	},
1609 	{
1610 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1611 		.dst_reg = EBPF_REG_2,
1612 		.imm = 1,
1613 	},
1614 	{
1615 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1616 		.dst_reg = EBPF_REG_3,
1617 		.imm = 1,
1618 	},
1619 	{
1620 		.code = (BPF_ALU | BPF_MOD | BPF_X),
1621 		.dst_reg = EBPF_REG_4,
1622 		.src_reg = EBPF_REG_2,
1623 	},
1624 	{
1625 		.code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1626 		.dst_reg = EBPF_REG_4,
1627 		.src_reg = EBPF_REG_3,
1628 	},
1629 	{
1630 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1631 		.dst_reg = EBPF_REG_1,
1632 		.src_reg = EBPF_REG_2,
1633 		.off = offsetof(struct dummy_vect8, out[0].u64),
1634 	},
1635 	{
1636 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1637 		.dst_reg = EBPF_REG_1,
1638 		.src_reg = EBPF_REG_3,
1639 		.off = offsetof(struct dummy_vect8, out[1].u64),
1640 	},
1641 	{
1642 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1643 		.dst_reg = EBPF_REG_1,
1644 		.src_reg = EBPF_REG_4,
1645 		.off = offsetof(struct dummy_vect8, out[2].u64),
1646 	},
1647 	/* check that we can handle division by zero gracefully. */
1648 	{
1649 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1650 		.dst_reg = EBPF_REG_2,
1651 		.src_reg = EBPF_REG_1,
1652 		.off = offsetof(struct dummy_vect8, in[3].u32),
1653 	},
1654 	{
1655 		.code = (BPF_ALU | BPF_DIV | BPF_X),
1656 		.dst_reg = EBPF_REG_4,
1657 		.src_reg = EBPF_REG_2,
1658 	},
1659 	/* return 1 */
1660 	{
1661 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1662 		.dst_reg = EBPF_REG_0,
1663 		.imm = 1,
1664 	},
1665 	{
1666 		.code = (BPF_JMP | EBPF_EXIT),
1667 	},
1668 };
1669 
1670 static int
1671 test_div1_check(uint64_t rc, const void *arg)
1672 {
1673 	uint64_t r2, r3, r4;
1674 	const struct dummy_vect8 *dvt;
1675 	struct dummy_vect8 dve;
1676 
1677 	dvt = arg;
1678 	memset(&dve, 0, sizeof(dve));
1679 
1680 	r2 = dvt->in[0].u32;
1681 	r3 = dvt->in[1].u64;
1682 	r4 = dvt->in[2].u32;
1683 
1684 	r2 = (uint32_t)r2 / TEST_MUL_1;
1685 	r3 %= TEST_MUL_2;
1686 	r2 |= 1;
1687 	r3 |= 1;
1688 	r4 = (uint32_t)(r4 % r2);
1689 	r4 /= r3;
1690 
1691 	dve.out[0].u64 = r2;
1692 	dve.out[1].u64 = r3;
1693 	dve.out[2].u64 = r4;
1694 
1695 	/*
1696 	 * in the test prog we attempted to divide by zero.
1697 	 * so return value should return 0.
1698 	 */
1699 	return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1700 }
1701 
1702 /* call test-cases */
1703 static const struct ebpf_insn test_call1_prog[] = {
1704 
1705 	{
1706 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1707 		.dst_reg = EBPF_REG_2,
1708 		.src_reg = EBPF_REG_1,
1709 		.off = offsetof(struct dummy_offset, u32),
1710 	},
1711 	{
1712 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1713 		.dst_reg = EBPF_REG_3,
1714 		.src_reg = EBPF_REG_1,
1715 		.off = offsetof(struct dummy_offset, u64),
1716 	},
1717 	{
1718 		.code = (BPF_STX | BPF_MEM | BPF_W),
1719 		.dst_reg = EBPF_REG_10,
1720 		.src_reg = EBPF_REG_2,
1721 		.off = -4,
1722 	},
1723 	{
1724 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1725 		.dst_reg = EBPF_REG_10,
1726 		.src_reg = EBPF_REG_3,
1727 		.off = -16,
1728 	},
1729 	{
1730 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1731 		.dst_reg = EBPF_REG_2,
1732 		.src_reg = EBPF_REG_10,
1733 	},
1734 	{
1735 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1736 		.dst_reg = EBPF_REG_2,
1737 		.imm = 4,
1738 	},
1739 	{
1740 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1741 		.dst_reg = EBPF_REG_3,
1742 		.src_reg = EBPF_REG_10,
1743 	},
1744 	{
1745 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1746 		.dst_reg = EBPF_REG_3,
1747 		.imm = 16,
1748 	},
1749 	{
1750 		.code = (BPF_JMP | EBPF_CALL),
1751 		.imm = 0,
1752 	},
1753 	{
1754 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1755 		.dst_reg = EBPF_REG_2,
1756 		.src_reg = EBPF_REG_10,
1757 		.off = -4,
1758 	},
1759 	{
1760 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1761 		.dst_reg = EBPF_REG_0,
1762 		.src_reg = EBPF_REG_10,
1763 		.off = -16
1764 	},
1765 	{
1766 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1767 		.dst_reg = EBPF_REG_0,
1768 		.src_reg = EBPF_REG_2,
1769 	},
1770 	{
1771 		.code = (BPF_JMP | EBPF_EXIT),
1772 	},
1773 };
1774 
1775 static void
1776 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1777 {
1778 	const struct dummy_offset *dv;
1779 
1780 	dv = p;
1781 
1782 	v32[0] += dv->u16;
1783 	v64[0] += dv->u8;
1784 }
1785 
1786 static int
1787 test_call1_check(uint64_t rc, const void *arg)
1788 {
1789 	uint32_t v32;
1790 	uint64_t v64;
1791 	const struct dummy_offset *dv;
1792 
1793 	dv = arg;
1794 
1795 	v32 = dv->u32;
1796 	v64 = dv->u64;
1797 	dummy_func1(arg, &v32, &v64);
1798 	v64 += v32;
1799 
1800 	if (v64 != rc) {
1801 		printf("%s@%d: invalid return value "
1802 			"expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1803 			__func__, __LINE__, v64, rc);
1804 		return -1;
1805 	}
1806 	return 0;
1807 	return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1808 }
1809 
1810 static const struct rte_bpf_xsym test_call1_xsym[] = {
1811 	{
1812 		.name = RTE_STR(dummy_func1),
1813 		.type = RTE_BPF_XTYPE_FUNC,
1814 		.func = {
1815 			.val = (void *)dummy_func1,
1816 			.nb_args = 3,
1817 			.args = {
1818 				[0] = {
1819 					.type = RTE_BPF_ARG_PTR,
1820 					.size = sizeof(struct dummy_offset),
1821 				},
1822 				[1] = {
1823 					.type = RTE_BPF_ARG_PTR,
1824 					.size = sizeof(uint32_t),
1825 				},
1826 				[2] = {
1827 					.type = RTE_BPF_ARG_PTR,
1828 					.size = sizeof(uint64_t),
1829 				},
1830 			},
1831 		},
1832 	},
1833 };
1834 
1835 static const struct ebpf_insn test_call2_prog[] = {
1836 
1837 	{
1838 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1839 		.dst_reg = EBPF_REG_1,
1840 		.src_reg = EBPF_REG_10,
1841 	},
1842 	{
1843 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1844 		.dst_reg = EBPF_REG_1,
1845 		.imm = -(int32_t)sizeof(struct dummy_offset),
1846 	},
1847 	{
1848 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1849 		.dst_reg = EBPF_REG_2,
1850 		.src_reg = EBPF_REG_10,
1851 	},
1852 	{
1853 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1854 		.dst_reg = EBPF_REG_2,
1855 		.imm = -2 * (int32_t)sizeof(struct dummy_offset),
1856 	},
1857 	{
1858 		.code = (BPF_JMP | EBPF_CALL),
1859 		.imm = 0,
1860 	},
1861 	{
1862 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1863 		.dst_reg = EBPF_REG_1,
1864 		.src_reg = EBPF_REG_10,
1865 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1866 			offsetof(struct dummy_offset, u64)),
1867 	},
1868 	{
1869 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1870 		.dst_reg = EBPF_REG_0,
1871 		.src_reg = EBPF_REG_10,
1872 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1873 			offsetof(struct dummy_offset, u32)),
1874 	},
1875 	{
1876 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1877 		.dst_reg = EBPF_REG_0,
1878 		.src_reg = EBPF_REG_1,
1879 	},
1880 	{
1881 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1882 		.dst_reg = EBPF_REG_1,
1883 		.src_reg = EBPF_REG_10,
1884 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1885 			offsetof(struct dummy_offset, u16)),
1886 	},
1887 	{
1888 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1889 		.dst_reg = EBPF_REG_0,
1890 		.src_reg = EBPF_REG_1,
1891 	},
1892 	{
1893 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1894 		.dst_reg = EBPF_REG_1,
1895 		.src_reg = EBPF_REG_10,
1896 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1897 			offsetof(struct dummy_offset, u8)),
1898 	},
1899 	{
1900 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1901 		.dst_reg = EBPF_REG_0,
1902 		.src_reg = EBPF_REG_1,
1903 	},
1904 	{
1905 		.code = (BPF_JMP | EBPF_EXIT),
1906 	},
1907 
1908 };
1909 
1910 static void
1911 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1912 {
1913 	uint64_t v;
1914 
1915 	v = 0;
1916 	a->u64 = v++;
1917 	a->u32 = v++;
1918 	a->u16 = v++;
1919 	a->u8 = v++;
1920 	b->u64 = v++;
1921 	b->u32 = v++;
1922 	b->u16 = v++;
1923 	b->u8 = v++;
1924 }
1925 
1926 static int
1927 test_call2_check(uint64_t rc, const void *arg)
1928 {
1929 	uint64_t v;
1930 	struct dummy_offset a, b;
1931 
1932 	RTE_SET_USED(arg);
1933 
1934 	dummy_func2(&a, &b);
1935 	v = a.u64 + a.u32 + b.u16 + b.u8;
1936 
1937 	if (v != rc) {
1938 		printf("%s@%d: invalid return value "
1939 			"expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1940 			__func__, __LINE__, v, rc);
1941 		return -1;
1942 	}
1943 	return 0;
1944 }
1945 
1946 static const struct rte_bpf_xsym test_call2_xsym[] = {
1947 	{
1948 		.name = RTE_STR(dummy_func2),
1949 		.type = RTE_BPF_XTYPE_FUNC,
1950 		.func = {
1951 			.val = (void *)dummy_func2,
1952 			.nb_args = 2,
1953 			.args = {
1954 				[0] = {
1955 					.type = RTE_BPF_ARG_PTR,
1956 					.size = sizeof(struct dummy_offset),
1957 				},
1958 				[1] = {
1959 					.type = RTE_BPF_ARG_PTR,
1960 					.size = sizeof(struct dummy_offset),
1961 				},
1962 			},
1963 		},
1964 	},
1965 };
1966 
1967 static const struct ebpf_insn test_call3_prog[] = {
1968 
1969 	{
1970 		.code = (BPF_JMP | EBPF_CALL),
1971 		.imm = 0,
1972 	},
1973 	{
1974 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1975 		.dst_reg = EBPF_REG_2,
1976 		.src_reg = EBPF_REG_0,
1977 		.off = offsetof(struct dummy_offset, u8),
1978 	},
1979 	{
1980 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1981 		.dst_reg = EBPF_REG_3,
1982 		.src_reg = EBPF_REG_0,
1983 		.off = offsetof(struct dummy_offset, u16),
1984 	},
1985 	{
1986 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1987 		.dst_reg = EBPF_REG_4,
1988 		.src_reg = EBPF_REG_0,
1989 		.off = offsetof(struct dummy_offset, u32),
1990 	},
1991 	{
1992 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1993 		.dst_reg = EBPF_REG_0,
1994 		.src_reg = EBPF_REG_0,
1995 		.off = offsetof(struct dummy_offset, u64),
1996 	},
1997 	/* return sum */
1998 	{
1999 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2000 		.dst_reg = EBPF_REG_0,
2001 		.src_reg = EBPF_REG_4,
2002 	},
2003 	{
2004 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2005 		.dst_reg = EBPF_REG_0,
2006 		.src_reg = EBPF_REG_3,
2007 	},
2008 	{
2009 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2010 		.dst_reg = EBPF_REG_0,
2011 		.src_reg = EBPF_REG_2,
2012 	},
2013 	{
2014 		.code = (BPF_JMP | EBPF_EXIT),
2015 	},
2016 };
2017 
2018 static const struct dummy_offset *
2019 dummy_func3(const struct dummy_vect8 *p)
2020 {
2021 	return &p->in[RTE_DIM(p->in) - 1];
2022 }
2023 
2024 static void
2025 test_call3_prepare(void *arg)
2026 {
2027 	struct dummy_vect8 *pv;
2028 	struct dummy_offset *df;
2029 
2030 	pv = arg;
2031 	df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2032 
2033 	memset(pv, 0, sizeof(*pv));
2034 	df->u64 = (int32_t)TEST_FILL_1;
2035 	df->u32 = df->u64;
2036 	df->u16 = df->u64;
2037 	df->u8 = df->u64;
2038 }
2039 
2040 static int
2041 test_call3_check(uint64_t rc, const void *arg)
2042 {
2043 	uint64_t v;
2044 	const struct dummy_vect8 *pv;
2045 	const struct dummy_offset *dft;
2046 
2047 	pv = arg;
2048 	dft = dummy_func3(pv);
2049 
2050 	v = dft->u64;
2051 	v += dft->u32;
2052 	v += dft->u16;
2053 	v += dft->u8;
2054 
2055 	return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2056 }
2057 
2058 static const struct rte_bpf_xsym test_call3_xsym[] = {
2059 	{
2060 		.name = RTE_STR(dummy_func3),
2061 		.type = RTE_BPF_XTYPE_FUNC,
2062 		.func = {
2063 			.val = (void *)dummy_func3,
2064 			.nb_args = 1,
2065 			.args = {
2066 				[0] = {
2067 					.type = RTE_BPF_ARG_PTR,
2068 					.size = sizeof(struct dummy_vect8),
2069 				},
2070 			},
2071 			.ret = {
2072 				.type = RTE_BPF_ARG_PTR,
2073 				.size = sizeof(struct dummy_offset),
2074 			},
2075 		},
2076 	},
2077 };
2078 
2079 /* Test for stack corruption in multiple function calls */
2080 static const struct ebpf_insn test_call4_prog[] = {
2081 	{
2082 		.code = (BPF_ST | BPF_MEM | BPF_B),
2083 		.dst_reg = EBPF_REG_10,
2084 		.off = -4,
2085 		.imm = 1,
2086 	},
2087 	{
2088 		.code = (BPF_ST | BPF_MEM | BPF_B),
2089 		.dst_reg = EBPF_REG_10,
2090 		.off = -3,
2091 		.imm = 2,
2092 	},
2093 	{
2094 		.code = (BPF_ST | BPF_MEM | BPF_B),
2095 		.dst_reg = EBPF_REG_10,
2096 		.off = -2,
2097 		.imm = 3,
2098 	},
2099 	{
2100 		.code = (BPF_ST | BPF_MEM | BPF_B),
2101 		.dst_reg = EBPF_REG_10,
2102 		.off = -1,
2103 		.imm = 4,
2104 	},
2105 	{
2106 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2107 		.dst_reg = EBPF_REG_1,
2108 		.src_reg = EBPF_REG_10,
2109 	},
2110 	{
2111 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2112 		.dst_reg = EBPF_REG_2,
2113 		.imm = 4,
2114 	},
2115 	{
2116 		.code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2117 		.dst_reg = EBPF_REG_1,
2118 		.src_reg = EBPF_REG_2,
2119 	},
2120 	{
2121 		.code = (BPF_JMP | EBPF_CALL),
2122 		.imm = 0,
2123 	},
2124 	{
2125 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2126 		.dst_reg = EBPF_REG_1,
2127 		.src_reg = EBPF_REG_10,
2128 		.off = -4,
2129 	},
2130 	{
2131 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2132 		.dst_reg = EBPF_REG_2,
2133 		.src_reg = EBPF_REG_10,
2134 		.off = -3,
2135 	},
2136 	{
2137 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2138 		.dst_reg = EBPF_REG_3,
2139 		.src_reg = EBPF_REG_10,
2140 		.off = -2,
2141 	},
2142 	{
2143 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2144 		.dst_reg = EBPF_REG_4,
2145 		.src_reg = EBPF_REG_10,
2146 		.off = -1,
2147 	},
2148 	{
2149 		.code = (BPF_JMP | EBPF_CALL),
2150 		.imm = 1,
2151 	},
2152 	{
2153 		.code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2154 		.dst_reg = EBPF_REG_0,
2155 		.imm = TEST_MEMFROB,
2156 	},
2157 	{
2158 		.code = (BPF_JMP | EBPF_EXIT),
2159 	},
2160 };
2161 
2162 /* Gathering the bytes together */
2163 static uint32_t
2164 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2165 {
2166 	return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2167 }
2168 
2169 /* Implementation of memfrob */
2170 static uint32_t
2171 dummy_func4_0(uint32_t *s, uint8_t n)
2172 {
2173 	char *p = (char *) s;
2174 	while (n-- > 0)
2175 		*p++ ^= 42;
2176 	return *s;
2177 }
2178 
2179 
2180 static int
2181 test_call4_check(uint64_t rc, const void *arg)
2182 {
2183 	uint8_t a[4] = {1, 2, 3, 4};
2184 	uint32_t s, v = 0;
2185 
2186 	RTE_SET_USED(arg);
2187 
2188 	s = dummy_func4_0((uint32_t *)a, 4);
2189 
2190 	s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2191 
2192 	v = s ^ TEST_MEMFROB;
2193 
2194 	return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2195 }
2196 
2197 static const struct rte_bpf_xsym test_call4_xsym[] = {
2198 	[0] = {
2199 		.name = RTE_STR(dummy_func4_0),
2200 		.type = RTE_BPF_XTYPE_FUNC,
2201 		.func = {
2202 			.val = (void *)dummy_func4_0,
2203 			.nb_args = 2,
2204 			.args = {
2205 				[0] = {
2206 					.type = RTE_BPF_ARG_PTR,
2207 					.size = 4 * sizeof(uint8_t),
2208 				},
2209 				[1] = {
2210 					.type = RTE_BPF_ARG_RAW,
2211 					.size = sizeof(uint8_t),
2212 				},
2213 			},
2214 			.ret = {
2215 				.type = RTE_BPF_ARG_RAW,
2216 				.size = sizeof(uint32_t),
2217 			},
2218 		},
2219 	},
2220 	[1] = {
2221 		.name = RTE_STR(dummy_func4_1),
2222 		.type = RTE_BPF_XTYPE_FUNC,
2223 		.func = {
2224 			.val = (void *)dummy_func4_1,
2225 			.nb_args = 4,
2226 			.args = {
2227 				[0] = {
2228 					.type = RTE_BPF_ARG_RAW,
2229 					.size = sizeof(uint8_t),
2230 				},
2231 				[1] = {
2232 					.type = RTE_BPF_ARG_RAW,
2233 					.size = sizeof(uint8_t),
2234 				},
2235 				[2] = {
2236 					.type = RTE_BPF_ARG_RAW,
2237 					.size = sizeof(uint8_t),
2238 				},
2239 				[3] = {
2240 					.type = RTE_BPF_ARG_RAW,
2241 					.size = sizeof(uint8_t),
2242 				},
2243 			},
2244 			.ret = {
2245 				.type = RTE_BPF_ARG_RAW,
2246 				.size = sizeof(uint32_t),
2247 			},
2248 		},
2249 	},
2250 };
2251 
2252 /* string compare test case */
2253 static const struct ebpf_insn test_call5_prog[] = {
2254 
2255 	[0] = {
2256 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2257 		.dst_reg = EBPF_REG_1,
2258 		.imm = STRING_GEEK,
2259 	},
2260 	[1] = {
2261 		.code = (BPF_STX | BPF_MEM | BPF_W),
2262 		.dst_reg = EBPF_REG_10,
2263 		.src_reg = EBPF_REG_1,
2264 		.off = -8,
2265 	},
2266 	[2] = {
2267 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2268 		.dst_reg = EBPF_REG_6,
2269 		.imm = 0,
2270 	},
2271 	[3] = {
2272 		.code = (BPF_STX | BPF_MEM | BPF_B),
2273 		.dst_reg = EBPF_REG_10,
2274 		.src_reg = EBPF_REG_6,
2275 		.off = -4,
2276 	},
2277 	[4] = {
2278 		.code = (BPF_STX | BPF_MEM | BPF_W),
2279 		.dst_reg = EBPF_REG_10,
2280 		.src_reg = EBPF_REG_6,
2281 		.off = -12,
2282 	},
2283 	[5] = {
2284 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2285 		.dst_reg = EBPF_REG_1,
2286 		.imm = STRING_WEEK,
2287 	},
2288 	[6] = {
2289 		.code = (BPF_STX | BPF_MEM | BPF_W),
2290 		.dst_reg = EBPF_REG_10,
2291 		.src_reg = EBPF_REG_1,
2292 		.off = -16,
2293 	},
2294 	[7] = {
2295 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2296 		.dst_reg = EBPF_REG_1,
2297 		.src_reg = EBPF_REG_10,
2298 	},
2299 	[8] = {
2300 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2301 		.dst_reg = EBPF_REG_1,
2302 		.imm = -8,
2303 	},
2304 	[9] = {
2305 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2306 		.dst_reg = EBPF_REG_2,
2307 		.src_reg = EBPF_REG_1,
2308 	},
2309 	[10] = {
2310 		.code = (BPF_JMP | EBPF_CALL),
2311 		.imm = 0,
2312 	},
2313 	[11] = {
2314 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2315 		.dst_reg = EBPF_REG_1,
2316 		.src_reg = EBPF_REG_0,
2317 	},
2318 	[12] = {
2319 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
2320 		.dst_reg = EBPF_REG_0,
2321 		.imm = -1,
2322 	},
2323 	[13] = {
2324 		.code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2325 		.dst_reg = EBPF_REG_1,
2326 		.imm = 0x20,
2327 	},
2328 	[14] = {
2329 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2330 		.dst_reg = EBPF_REG_1,
2331 		.imm = 0x20,
2332 	},
2333 	[15] = {
2334 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
2335 		.dst_reg = EBPF_REG_1,
2336 		.off = 11,
2337 		.imm = 0,
2338 	},
2339 	[16] = {
2340 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2341 		.dst_reg = EBPF_REG_1,
2342 		.src_reg = EBPF_REG_10,
2343 	},
2344 	[17] = {
2345 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2346 		.dst_reg = EBPF_REG_1,
2347 		.imm = -8,
2348 	},
2349 	[18] = {
2350 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2351 		.dst_reg = EBPF_REG_2,
2352 		.src_reg = EBPF_REG_10,
2353 	},
2354 	[19] = {
2355 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2356 		.dst_reg = EBPF_REG_2,
2357 		.imm = -16,
2358 	},
2359 	[20] = {
2360 		.code = (BPF_JMP | EBPF_CALL),
2361 		.imm = 0,
2362 	},
2363 	[21] = {
2364 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2365 		.dst_reg = EBPF_REG_1,
2366 		.src_reg = EBPF_REG_0,
2367 	},
2368 	[22] = {
2369 		.code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2370 		.dst_reg = EBPF_REG_1,
2371 		.imm = 0x20,
2372 	},
2373 	[23] = {
2374 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2375 		.dst_reg = EBPF_REG_1,
2376 		.imm = 0x20,
2377 	},
2378 	[24] = {
2379 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2380 		.dst_reg = EBPF_REG_0,
2381 		.src_reg = EBPF_REG_1,
2382 	},
2383 	[25] = {
2384 		.code = (BPF_JMP | BPF_JEQ | BPF_X),
2385 		.dst_reg = EBPF_REG_1,
2386 		.src_reg = EBPF_REG_6,
2387 		.off = 1,
2388 	},
2389 	[26] = {
2390 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2391 		.dst_reg = EBPF_REG_0,
2392 		.imm = 0,
2393 	},
2394 	[27] = {
2395 		.code = (BPF_JMP | EBPF_EXIT),
2396 	},
2397 };
2398 
2399 /* String comparision impelementation, return 0 if equal else difference */
2400 static uint32_t
2401 dummy_func5(const char *s1, const char *s2)
2402 {
2403 	while (*s1 && (*s1 == *s2)) {
2404 		s1++;
2405 		s2++;
2406 	}
2407 	return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2408 }
2409 
2410 static int
2411 test_call5_check(uint64_t rc, const void *arg)
2412 {
2413 	char a[] = "geek";
2414 	char b[] = "week";
2415 	uint32_t v;
2416 
2417 	RTE_SET_USED(arg);
2418 
2419 	v = dummy_func5(a, a);
2420 	if (v != 0) {
2421 		v = -1;
2422 		goto fail;
2423 	}
2424 
2425 	v = dummy_func5(a, b);
2426 	if (v == 0)
2427 		goto fail;
2428 
2429 	v = 0;
2430 
2431 fail:
2432 
2433 	return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2434 }
2435 
2436 static const struct rte_bpf_xsym test_call5_xsym[] = {
2437 	[0] = {
2438 		.name = RTE_STR(dummy_func5),
2439 		.type = RTE_BPF_XTYPE_FUNC,
2440 		.func = {
2441 			.val = (void *)dummy_func5,
2442 			.nb_args = 2,
2443 			.args = {
2444 				[0] = {
2445 					.type = RTE_BPF_ARG_PTR,
2446 					.size = sizeof(char),
2447 				},
2448 				[1] = {
2449 					.type = RTE_BPF_ARG_PTR,
2450 					.size = sizeof(char),
2451 				},
2452 			},
2453 			.ret = {
2454 				.type = RTE_BPF_ARG_RAW,
2455 				.size = sizeof(uint32_t),
2456 			},
2457 		},
2458 	},
2459 };
2460 
2461 static const struct bpf_test tests[] = {
2462 	{
2463 		.name = "test_store1",
2464 		.arg_sz = sizeof(struct dummy_offset),
2465 		.prm = {
2466 			.ins = test_store1_prog,
2467 			.nb_ins = RTE_DIM(test_store1_prog),
2468 			.prog_arg = {
2469 				.type = RTE_BPF_ARG_PTR,
2470 				.size = sizeof(struct dummy_offset),
2471 			},
2472 		},
2473 		.prepare = test_store1_prepare,
2474 		.check_result = test_store1_check,
2475 	},
2476 	{
2477 		.name = "test_store2",
2478 		.arg_sz = sizeof(struct dummy_offset),
2479 		.prm = {
2480 			.ins = test_store2_prog,
2481 			.nb_ins = RTE_DIM(test_store2_prog),
2482 			.prog_arg = {
2483 				.type = RTE_BPF_ARG_PTR,
2484 				.size = sizeof(struct dummy_offset),
2485 			},
2486 		},
2487 		.prepare = test_store1_prepare,
2488 		.check_result = test_store1_check,
2489 	},
2490 	{
2491 		.name = "test_load1",
2492 		.arg_sz = sizeof(struct dummy_offset),
2493 		.prm = {
2494 			.ins = test_load1_prog,
2495 			.nb_ins = RTE_DIM(test_load1_prog),
2496 			.prog_arg = {
2497 				.type = RTE_BPF_ARG_PTR,
2498 				.size = sizeof(struct dummy_offset),
2499 			},
2500 		},
2501 		.prepare = test_load1_prepare,
2502 		.check_result = test_load1_check,
2503 	},
2504 	{
2505 		.name = "test_ldimm1",
2506 		.arg_sz = sizeof(struct dummy_offset),
2507 		.prm = {
2508 			.ins = test_ldimm1_prog,
2509 			.nb_ins = RTE_DIM(test_ldimm1_prog),
2510 			.prog_arg = {
2511 				.type = RTE_BPF_ARG_PTR,
2512 				.size = sizeof(struct dummy_offset),
2513 			},
2514 		},
2515 		.prepare = test_store1_prepare,
2516 		.check_result = test_ldimm1_check,
2517 	},
2518 	{
2519 		.name = "test_mul1",
2520 		.arg_sz = sizeof(struct dummy_vect8),
2521 		.prm = {
2522 			.ins = test_mul1_prog,
2523 			.nb_ins = RTE_DIM(test_mul1_prog),
2524 			.prog_arg = {
2525 				.type = RTE_BPF_ARG_PTR,
2526 				.size = sizeof(struct dummy_vect8),
2527 			},
2528 		},
2529 		.prepare = test_mul1_prepare,
2530 		.check_result = test_mul1_check,
2531 	},
2532 	{
2533 		.name = "test_shift1",
2534 		.arg_sz = sizeof(struct dummy_vect8),
2535 		.prm = {
2536 			.ins = test_shift1_prog,
2537 			.nb_ins = RTE_DIM(test_shift1_prog),
2538 			.prog_arg = {
2539 				.type = RTE_BPF_ARG_PTR,
2540 				.size = sizeof(struct dummy_vect8),
2541 			},
2542 		},
2543 		.prepare = test_shift1_prepare,
2544 		.check_result = test_shift1_check,
2545 	},
2546 	{
2547 		.name = "test_jump1",
2548 		.arg_sz = sizeof(struct dummy_vect8),
2549 		.prm = {
2550 			.ins = test_jump1_prog,
2551 			.nb_ins = RTE_DIM(test_jump1_prog),
2552 			.prog_arg = {
2553 				.type = RTE_BPF_ARG_PTR,
2554 				.size = sizeof(struct dummy_vect8),
2555 			},
2556 		},
2557 		.prepare = test_jump1_prepare,
2558 		.check_result = test_jump1_check,
2559 	},
2560 	{
2561 		.name = "test_jump2",
2562 		.arg_sz = sizeof(struct dummy_net),
2563 		.prm = {
2564 			.ins = test_jump2_prog,
2565 			.nb_ins = RTE_DIM(test_jump2_prog),
2566 			.prog_arg = {
2567 				.type = RTE_BPF_ARG_PTR,
2568 				.size = sizeof(struct dummy_net),
2569 			},
2570 		},
2571 		.prepare = test_jump2_prepare,
2572 		.check_result = test_jump2_check,
2573 	},
2574 	{
2575 		.name = "test_alu1",
2576 		.arg_sz = sizeof(struct dummy_vect8),
2577 		.prm = {
2578 			.ins = test_alu1_prog,
2579 			.nb_ins = RTE_DIM(test_alu1_prog),
2580 			.prog_arg = {
2581 				.type = RTE_BPF_ARG_PTR,
2582 				.size = sizeof(struct dummy_vect8),
2583 			},
2584 		},
2585 		.prepare = test_jump1_prepare,
2586 		.check_result = test_alu1_check,
2587 	},
2588 	{
2589 		.name = "test_bele1",
2590 		.arg_sz = sizeof(struct dummy_vect8),
2591 		.prm = {
2592 			.ins = test_bele1_prog,
2593 			.nb_ins = RTE_DIM(test_bele1_prog),
2594 			.prog_arg = {
2595 				.type = RTE_BPF_ARG_PTR,
2596 				.size = sizeof(struct dummy_vect8),
2597 			},
2598 		},
2599 		.prepare = test_bele1_prepare,
2600 		.check_result = test_bele1_check,
2601 	},
2602 	{
2603 		.name = "test_xadd1",
2604 		.arg_sz = sizeof(struct dummy_offset),
2605 		.prm = {
2606 			.ins = test_xadd1_prog,
2607 			.nb_ins = RTE_DIM(test_xadd1_prog),
2608 			.prog_arg = {
2609 				.type = RTE_BPF_ARG_PTR,
2610 				.size = sizeof(struct dummy_offset),
2611 			},
2612 		},
2613 		.prepare = test_store1_prepare,
2614 		.check_result = test_xadd1_check,
2615 	},
2616 	{
2617 		.name = "test_div1",
2618 		.arg_sz = sizeof(struct dummy_vect8),
2619 		.prm = {
2620 			.ins = test_div1_prog,
2621 			.nb_ins = RTE_DIM(test_div1_prog),
2622 			.prog_arg = {
2623 				.type = RTE_BPF_ARG_PTR,
2624 				.size = sizeof(struct dummy_vect8),
2625 			},
2626 		},
2627 		.prepare = test_mul1_prepare,
2628 		.check_result = test_div1_check,
2629 	},
2630 	{
2631 		.name = "test_call1",
2632 		.arg_sz = sizeof(struct dummy_offset),
2633 		.prm = {
2634 			.ins = test_call1_prog,
2635 			.nb_ins = RTE_DIM(test_call1_prog),
2636 			.prog_arg = {
2637 				.type = RTE_BPF_ARG_PTR,
2638 				.size = sizeof(struct dummy_offset),
2639 			},
2640 			.xsym = test_call1_xsym,
2641 			.nb_xsym = RTE_DIM(test_call1_xsym),
2642 		},
2643 		.prepare = test_load1_prepare,
2644 		.check_result = test_call1_check,
2645 		/* for now don't support function calls on 32 bit platform */
2646 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2647 	},
2648 	{
2649 		.name = "test_call2",
2650 		.arg_sz = sizeof(struct dummy_offset),
2651 		.prm = {
2652 			.ins = test_call2_prog,
2653 			.nb_ins = RTE_DIM(test_call2_prog),
2654 			.prog_arg = {
2655 				.type = RTE_BPF_ARG_PTR,
2656 				.size = sizeof(struct dummy_offset),
2657 			},
2658 			.xsym = test_call2_xsym,
2659 			.nb_xsym = RTE_DIM(test_call2_xsym),
2660 		},
2661 		.prepare = test_store1_prepare,
2662 		.check_result = test_call2_check,
2663 		/* for now don't support function calls on 32 bit platform */
2664 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2665 	},
2666 	{
2667 		.name = "test_call3",
2668 		.arg_sz = sizeof(struct dummy_vect8),
2669 		.prm = {
2670 			.ins = test_call3_prog,
2671 			.nb_ins = RTE_DIM(test_call3_prog),
2672 			.prog_arg = {
2673 				.type = RTE_BPF_ARG_PTR,
2674 				.size = sizeof(struct dummy_vect8),
2675 			},
2676 			.xsym = test_call3_xsym,
2677 			.nb_xsym = RTE_DIM(test_call3_xsym),
2678 		},
2679 		.prepare = test_call3_prepare,
2680 		.check_result = test_call3_check,
2681 		/* for now don't support function calls on 32 bit platform */
2682 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2683 	},
2684 	{
2685 		.name = "test_call4",
2686 		.arg_sz = sizeof(struct dummy_offset),
2687 		.prm = {
2688 			.ins = test_call4_prog,
2689 			.nb_ins = RTE_DIM(test_call4_prog),
2690 			.prog_arg = {
2691 				.type = RTE_BPF_ARG_PTR,
2692 				.size = 2 * sizeof(struct dummy_offset),
2693 			},
2694 			.xsym = test_call4_xsym,
2695 			.nb_xsym = RTE_DIM(test_call4_xsym),
2696 		},
2697 		.prepare = test_store1_prepare,
2698 		.check_result = test_call4_check,
2699 		/* for now don't support function calls on 32 bit platform */
2700 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2701 	},
2702 	{
2703 		.name = "test_call5",
2704 		.arg_sz = sizeof(struct dummy_offset),
2705 		.prm = {
2706 			.ins = test_call5_prog,
2707 			.nb_ins = RTE_DIM(test_call5_prog),
2708 			.prog_arg = {
2709 				.type = RTE_BPF_ARG_PTR,
2710 				.size = sizeof(struct dummy_offset),
2711 			},
2712 			.xsym = test_call5_xsym,
2713 			.nb_xsym = RTE_DIM(test_call5_xsym),
2714 		},
2715 		.prepare = test_store1_prepare,
2716 		.check_result = test_call5_check,
2717 		/* for now don't support function calls on 32 bit platform */
2718 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2719 	},
2720 };
2721 
2722 static int
2723 run_test(const struct bpf_test *tst)
2724 {
2725 	int32_t ret, rv;
2726 	int64_t rc;
2727 	struct rte_bpf *bpf;
2728 	struct rte_bpf_jit jit;
2729 	uint8_t tbuf[tst->arg_sz];
2730 
2731 	printf("%s(%s) start\n", __func__, tst->name);
2732 
2733 	bpf = rte_bpf_load(&tst->prm);
2734 	if (bpf == NULL) {
2735 		printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
2736 			__func__, __LINE__, rte_errno, strerror(rte_errno));
2737 		return -1;
2738 	}
2739 
2740 	tst->prepare(tbuf);
2741 
2742 	rc = rte_bpf_exec(bpf, tbuf);
2743 	ret = tst->check_result(rc, tbuf);
2744 	if (ret != 0) {
2745 		printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2746 			__func__, __LINE__, tst->name, ret, strerror(ret));
2747 	}
2748 
2749 	rte_bpf_get_jit(bpf, &jit);
2750 	if (jit.func == NULL)
2751 		return 0;
2752 
2753 	tst->prepare(tbuf);
2754 	rc = jit.func(tbuf);
2755 	rv = tst->check_result(rc, tbuf);
2756 	ret |= rv;
2757 	if (rv != 0) {
2758 		printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2759 			__func__, __LINE__, tst->name, rv, strerror(ret));
2760 	}
2761 
2762 	rte_bpf_destroy(bpf);
2763 	return ret;
2764 
2765 }
2766 
2767 static int
2768 test_bpf(void)
2769 {
2770 	int32_t rc, rv;
2771 	uint32_t i;
2772 
2773 	rc = 0;
2774 	for (i = 0; i != RTE_DIM(tests); i++) {
2775 		rv = run_test(tests + i);
2776 		if (tests[i].allow_fail == 0)
2777 			rc |= rv;
2778 	}
2779 
2780 	return rc;
2781 }
2782 
2783 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
2784