xref: /dpdk/app/test/test_bpf.c (revision ff4e52ef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
16 #include <rte_bpf.h>
17 #include <rte_ether.h>
18 #include <rte_ip.h>
19 
20 #include "test.h"
21 
22 /*
23  * Basic functional tests for librte_bpf.
24  * The main procedure - load eBPF program, execute it and
25  * compare restuls with expected values.
26  */
27 
28 struct dummy_offset {
29 	uint64_t u64;
30 	uint32_t u32;
31 	uint16_t u16;
32 	uint8_t  u8;
33 };
34 
35 struct dummy_vect8 {
36 	struct dummy_offset in[8];
37 	struct dummy_offset out[8];
38 };
39 
40 struct dummy_net {
41 	struct rte_ether_hdr eth_hdr;
42 	struct rte_vlan_hdr vlan_hdr;
43 	struct rte_ipv4_hdr ip_hdr;
44 };
45 
46 #define	DUMMY_MBUF_NUM	2
47 
48 /* first mbuf in the packet, should always be at offset 0 */
49 struct dummy_mbuf {
50 	struct rte_mbuf mb[DUMMY_MBUF_NUM];
51 	uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
52 };
53 
54 #define	TEST_FILL_1	0xDEADBEEF
55 
56 #define	TEST_MUL_1	21
57 #define TEST_MUL_2	-100
58 
59 #define TEST_SHIFT_1	15
60 #define TEST_SHIFT_2	33
61 
62 #define TEST_SHIFT32_MASK	(CHAR_BIT * sizeof(uint32_t) - 1)
63 #define TEST_SHIFT64_MASK	(CHAR_BIT * sizeof(uint64_t) - 1)
64 
65 #define TEST_JCC_1	0
66 #define TEST_JCC_2	-123
67 #define TEST_JCC_3	5678
68 #define TEST_JCC_4	TEST_FILL_1
69 
70 #define TEST_IMM_1	UINT64_MAX
71 #define TEST_IMM_2	((uint64_t)INT64_MIN)
72 #define TEST_IMM_3	((uint64_t)INT64_MAX + INT32_MAX)
73 #define TEST_IMM_4	((uint64_t)UINT32_MAX)
74 #define TEST_IMM_5	((uint64_t)UINT32_MAX + 1)
75 
76 #define TEST_MEMFROB	0x2a2a2a2a
77 
78 #define STRING_GEEK	0x6B656567
79 #define STRING_WEEK	0x6B656577
80 
81 #define TEST_NETMASK 0xffffff00
82 #define TEST_SUBNET  0xaca80200
83 
84 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
85 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
86 
87 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
88 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
89 
90 struct bpf_test {
91 	const char *name;
92 	size_t arg_sz;
93 	struct rte_bpf_prm prm;
94 	void (*prepare)(void *);
95 	int (*check_result)(uint64_t, const void *);
96 	uint32_t allow_fail;
97 };
98 
99 /*
100  * Compare return value and result data with expected ones.
101  * Report a failure if they don't match.
102  */
103 static int
104 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
105 	const void *exp_res, const void *ret_res, size_t res_sz)
106 {
107 	int32_t ret;
108 
109 	ret = 0;
110 	if (exp_rc != ret_rc) {
111 		printf("%s@%d: invalid return value, expected: 0x%" PRIx64
112 			",result: 0x%" PRIx64 "\n",
113 			func, __LINE__, exp_rc, ret_rc);
114 		ret |= -1;
115 	}
116 
117 	if (memcmp(exp_res, ret_res, res_sz) != 0) {
118 		printf("%s: invalid value\n", func);
119 		rte_memdump(stdout, "expected", exp_res, res_sz);
120 		rte_memdump(stdout, "result", ret_res, res_sz);
121 		ret |= -1;
122 	}
123 
124 	return ret;
125 }
126 
127 /* store immediate test-cases */
128 static const struct ebpf_insn test_store1_prog[] = {
129 	{
130 		.code = (BPF_ST | BPF_MEM | BPF_B),
131 		.dst_reg = EBPF_REG_1,
132 		.off = offsetof(struct dummy_offset, u8),
133 		.imm = TEST_FILL_1,
134 	},
135 	{
136 		.code = (BPF_ST | BPF_MEM | BPF_H),
137 		.dst_reg = EBPF_REG_1,
138 		.off = offsetof(struct dummy_offset, u16),
139 		.imm = TEST_FILL_1,
140 	},
141 	{
142 		.code = (BPF_ST | BPF_MEM | BPF_W),
143 		.dst_reg = EBPF_REG_1,
144 		.off = offsetof(struct dummy_offset, u32),
145 		.imm = TEST_FILL_1,
146 	},
147 	{
148 		.code = (BPF_ST | BPF_MEM | EBPF_DW),
149 		.dst_reg = EBPF_REG_1,
150 		.off = offsetof(struct dummy_offset, u64),
151 		.imm = TEST_FILL_1,
152 	},
153 	/* return 1 */
154 	{
155 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
156 		.dst_reg = EBPF_REG_0,
157 		.imm = 1,
158 	},
159 	{
160 		.code = (BPF_JMP | EBPF_EXIT),
161 	},
162 };
163 
164 static void
165 test_store1_prepare(void *arg)
166 {
167 	struct dummy_offset *df;
168 
169 	df = arg;
170 	memset(df, 0, sizeof(*df));
171 }
172 
173 static int
174 test_store1_check(uint64_t rc, const void *arg)
175 {
176 	const struct dummy_offset *dft;
177 	struct dummy_offset dfe;
178 
179 	dft = arg;
180 
181 	memset(&dfe, 0, sizeof(dfe));
182 	dfe.u64 = (int32_t)TEST_FILL_1;
183 	dfe.u32 = dfe.u64;
184 	dfe.u16 = dfe.u64;
185 	dfe.u8 = dfe.u64;
186 
187 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
188 }
189 
190 /* store register test-cases */
191 static const struct ebpf_insn test_store2_prog[] = {
192 
193 	{
194 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
195 		.dst_reg = EBPF_REG_2,
196 		.imm = TEST_FILL_1,
197 	},
198 	{
199 		.code = (BPF_STX | BPF_MEM | BPF_B),
200 		.dst_reg = EBPF_REG_1,
201 		.src_reg = EBPF_REG_2,
202 		.off = offsetof(struct dummy_offset, u8),
203 	},
204 	{
205 		.code = (BPF_STX | BPF_MEM | BPF_H),
206 		.dst_reg = EBPF_REG_1,
207 		.src_reg = EBPF_REG_2,
208 		.off = offsetof(struct dummy_offset, u16),
209 	},
210 	{
211 		.code = (BPF_STX | BPF_MEM | BPF_W),
212 		.dst_reg = EBPF_REG_1,
213 		.src_reg = EBPF_REG_2,
214 		.off = offsetof(struct dummy_offset, u32),
215 	},
216 	{
217 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
218 		.dst_reg = EBPF_REG_1,
219 		.src_reg = EBPF_REG_2,
220 		.off = offsetof(struct dummy_offset, u64),
221 	},
222 	/* return 1 */
223 	{
224 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
225 		.dst_reg = EBPF_REG_0,
226 		.imm = 1,
227 	},
228 	{
229 		.code = (BPF_JMP | EBPF_EXIT),
230 	},
231 };
232 
233 /* load test-cases */
234 static const struct ebpf_insn test_load1_prog[] = {
235 
236 	{
237 		.code = (BPF_LDX | BPF_MEM | BPF_B),
238 		.dst_reg = EBPF_REG_2,
239 		.src_reg = EBPF_REG_1,
240 		.off = offsetof(struct dummy_offset, u8),
241 	},
242 	{
243 		.code = (BPF_LDX | BPF_MEM | BPF_H),
244 		.dst_reg = EBPF_REG_3,
245 		.src_reg = EBPF_REG_1,
246 		.off = offsetof(struct dummy_offset, u16),
247 	},
248 	{
249 		.code = (BPF_LDX | BPF_MEM | BPF_W),
250 		.dst_reg = EBPF_REG_4,
251 		.src_reg = EBPF_REG_1,
252 		.off = offsetof(struct dummy_offset, u32),
253 	},
254 	{
255 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
256 		.dst_reg = EBPF_REG_0,
257 		.src_reg = EBPF_REG_1,
258 		.off = offsetof(struct dummy_offset, u64),
259 	},
260 	/* return sum */
261 	{
262 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
263 		.dst_reg = EBPF_REG_0,
264 		.src_reg = EBPF_REG_4,
265 	},
266 	{
267 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
268 		.dst_reg = EBPF_REG_0,
269 		.src_reg = EBPF_REG_3,
270 	},
271 	{
272 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
273 		.dst_reg = EBPF_REG_0,
274 		.src_reg = EBPF_REG_2,
275 	},
276 	{
277 		.code = (BPF_JMP | EBPF_EXIT),
278 	},
279 };
280 
281 static void
282 test_load1_prepare(void *arg)
283 {
284 	struct dummy_offset *df;
285 
286 	df = arg;
287 
288 	memset(df, 0, sizeof(*df));
289 	df->u64 = (int32_t)TEST_FILL_1;
290 	df->u32 = df->u64;
291 	df->u16 = df->u64;
292 	df->u8 = df->u64;
293 }
294 
295 static int
296 test_load1_check(uint64_t rc, const void *arg)
297 {
298 	uint64_t v;
299 	const struct dummy_offset *dft;
300 
301 	dft = arg;
302 	v = dft->u64;
303 	v += dft->u32;
304 	v += dft->u16;
305 	v += dft->u8;
306 
307 	return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
308 }
309 
310 /* load immediate test-cases */
311 static const struct ebpf_insn test_ldimm1_prog[] = {
312 
313 	{
314 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
315 		.dst_reg = EBPF_REG_0,
316 		.imm = (uint32_t)TEST_IMM_1,
317 	},
318 	{
319 		.imm = TEST_IMM_1 >> 32,
320 	},
321 	{
322 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
323 		.dst_reg = EBPF_REG_3,
324 		.imm = (uint32_t)TEST_IMM_2,
325 	},
326 	{
327 		.imm = TEST_IMM_2 >> 32,
328 	},
329 	{
330 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
331 		.dst_reg = EBPF_REG_5,
332 		.imm = (uint32_t)TEST_IMM_3,
333 	},
334 	{
335 		.imm = TEST_IMM_3 >> 32,
336 	},
337 	{
338 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
339 		.dst_reg = EBPF_REG_7,
340 		.imm = (uint32_t)TEST_IMM_4,
341 	},
342 	{
343 		.imm = TEST_IMM_4 >> 32,
344 	},
345 	{
346 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
347 		.dst_reg = EBPF_REG_9,
348 		.imm = (uint32_t)TEST_IMM_5,
349 	},
350 	{
351 		.imm = TEST_IMM_5 >> 32,
352 	},
353 	/* return sum */
354 	{
355 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
356 		.dst_reg = EBPF_REG_0,
357 		.src_reg = EBPF_REG_3,
358 	},
359 	{
360 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
361 		.dst_reg = EBPF_REG_0,
362 		.src_reg = EBPF_REG_5,
363 	},
364 	{
365 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
366 		.dst_reg = EBPF_REG_0,
367 		.src_reg = EBPF_REG_7,
368 	},
369 	{
370 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
371 		.dst_reg = EBPF_REG_0,
372 		.src_reg = EBPF_REG_9,
373 	},
374 	{
375 		.code = (BPF_JMP | EBPF_EXIT),
376 	},
377 };
378 
379 static int
380 test_ldimm1_check(uint64_t rc, const void *arg)
381 {
382 	uint64_t v1, v2;
383 
384 	v1 = TEST_IMM_1;
385 	v2 = TEST_IMM_2;
386 	v1 += v2;
387 	v2 = TEST_IMM_3;
388 	v1 += v2;
389 	v2 = TEST_IMM_4;
390 	v1 += v2;
391 	v2 = TEST_IMM_5;
392 	v1 += v2;
393 
394 	return cmp_res(__func__, v1, rc, arg, arg, 0);
395 }
396 
397 
398 /* alu mul test-cases */
399 static const struct ebpf_insn test_mul1_prog[] = {
400 
401 	{
402 		.code = (BPF_LDX | BPF_MEM | BPF_W),
403 		.dst_reg = EBPF_REG_2,
404 		.src_reg = EBPF_REG_1,
405 		.off = offsetof(struct dummy_vect8, in[0].u32),
406 	},
407 	{
408 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
409 		.dst_reg = EBPF_REG_3,
410 		.src_reg = EBPF_REG_1,
411 		.off = offsetof(struct dummy_vect8, in[1].u64),
412 	},
413 	{
414 		.code = (BPF_LDX | BPF_MEM | BPF_W),
415 		.dst_reg = EBPF_REG_4,
416 		.src_reg = EBPF_REG_1,
417 		.off = offsetof(struct dummy_vect8, in[2].u32),
418 	},
419 	{
420 		.code = (BPF_ALU | BPF_MUL | BPF_K),
421 		.dst_reg = EBPF_REG_2,
422 		.imm = TEST_MUL_1,
423 	},
424 	{
425 		.code = (EBPF_ALU64 | BPF_MUL | BPF_K),
426 		.dst_reg = EBPF_REG_3,
427 		.imm = TEST_MUL_2,
428 	},
429 	{
430 		.code = (BPF_ALU | BPF_MUL | BPF_X),
431 		.dst_reg = EBPF_REG_4,
432 		.src_reg = EBPF_REG_2,
433 	},
434 	{
435 		.code = (EBPF_ALU64 | BPF_MUL | BPF_X),
436 		.dst_reg = EBPF_REG_4,
437 		.src_reg = EBPF_REG_3,
438 	},
439 	{
440 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
441 		.dst_reg = EBPF_REG_1,
442 		.src_reg = EBPF_REG_2,
443 		.off = offsetof(struct dummy_vect8, out[0].u64),
444 	},
445 	{
446 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
447 		.dst_reg = EBPF_REG_1,
448 		.src_reg = EBPF_REG_3,
449 		.off = offsetof(struct dummy_vect8, out[1].u64),
450 	},
451 	{
452 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
453 		.dst_reg = EBPF_REG_1,
454 		.src_reg = EBPF_REG_4,
455 		.off = offsetof(struct dummy_vect8, out[2].u64),
456 	},
457 	/* return 1 */
458 	{
459 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
460 		.dst_reg = EBPF_REG_0,
461 		.imm = 1,
462 	},
463 	{
464 		.code = (BPF_JMP | EBPF_EXIT),
465 	},
466 };
467 
468 static void
469 test_mul1_prepare(void *arg)
470 {
471 	struct dummy_vect8 *dv;
472 	uint64_t v;
473 
474 	dv = arg;
475 
476 	v = rte_rand();
477 
478 	memset(dv, 0, sizeof(*dv));
479 	dv->in[0].u32 = v;
480 	dv->in[1].u64 = v << 12 | v >> 6;
481 	dv->in[2].u32 = -v;
482 }
483 
484 static int
485 test_mul1_check(uint64_t rc, const void *arg)
486 {
487 	uint64_t r2, r3, r4;
488 	const struct dummy_vect8 *dvt;
489 	struct dummy_vect8 dve;
490 
491 	dvt = arg;
492 	memset(&dve, 0, sizeof(dve));
493 
494 	r2 = dvt->in[0].u32;
495 	r3 = dvt->in[1].u64;
496 	r4 = dvt->in[2].u32;
497 
498 	r2 = (uint32_t)r2 * TEST_MUL_1;
499 	r3 *= TEST_MUL_2;
500 	r4 = (uint32_t)(r4 * r2);
501 	r4 *= r3;
502 
503 	dve.out[0].u64 = r2;
504 	dve.out[1].u64 = r3;
505 	dve.out[2].u64 = r4;
506 
507 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
508 }
509 
510 /* alu shift test-cases */
511 static const struct ebpf_insn test_shift1_prog[] = {
512 
513 	{
514 		.code = (BPF_LDX | BPF_MEM | BPF_W),
515 		.dst_reg = EBPF_REG_2,
516 		.src_reg = EBPF_REG_1,
517 		.off = offsetof(struct dummy_vect8, in[0].u32),
518 	},
519 	{
520 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
521 		.dst_reg = EBPF_REG_3,
522 		.src_reg = EBPF_REG_1,
523 		.off = offsetof(struct dummy_vect8, in[1].u64),
524 	},
525 	{
526 		.code = (BPF_LDX | BPF_MEM | BPF_W),
527 		.dst_reg = EBPF_REG_4,
528 		.src_reg = EBPF_REG_1,
529 		.off = offsetof(struct dummy_vect8, in[2].u32),
530 	},
531 	{
532 		.code = (BPF_ALU | BPF_LSH | BPF_K),
533 		.dst_reg = EBPF_REG_2,
534 		.imm = TEST_SHIFT_1,
535 	},
536 	{
537 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
538 		.dst_reg = EBPF_REG_3,
539 		.imm = TEST_SHIFT_2,
540 	},
541 	{
542 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
543 		.dst_reg = EBPF_REG_1,
544 		.src_reg = EBPF_REG_2,
545 		.off = offsetof(struct dummy_vect8, out[0].u64),
546 	},
547 	{
548 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
549 		.dst_reg = EBPF_REG_1,
550 		.src_reg = EBPF_REG_3,
551 		.off = offsetof(struct dummy_vect8, out[1].u64),
552 	},
553 	{
554 		.code = (BPF_ALU | BPF_AND | BPF_K),
555 		.dst_reg = EBPF_REG_4,
556 		.imm = TEST_SHIFT64_MASK,
557 	},
558 	{
559 		.code = (EBPF_ALU64 | BPF_LSH | BPF_X),
560 		.dst_reg = EBPF_REG_3,
561 		.src_reg = EBPF_REG_4,
562 	},
563 	{
564 		.code = (BPF_ALU | BPF_AND | BPF_K),
565 		.dst_reg = EBPF_REG_4,
566 		.imm = TEST_SHIFT32_MASK,
567 	},
568 	{
569 		.code = (BPF_ALU | BPF_RSH | BPF_X),
570 		.dst_reg = EBPF_REG_2,
571 		.src_reg = EBPF_REG_4,
572 	},
573 	{
574 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
575 		.dst_reg = EBPF_REG_1,
576 		.src_reg = EBPF_REG_2,
577 		.off = offsetof(struct dummy_vect8, out[2].u64),
578 	},
579 	{
580 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
581 		.dst_reg = EBPF_REG_1,
582 		.src_reg = EBPF_REG_3,
583 		.off = offsetof(struct dummy_vect8, out[3].u64),
584 	},
585 	{
586 		.code = (BPF_LDX | BPF_MEM | BPF_W),
587 		.dst_reg = EBPF_REG_2,
588 		.src_reg = EBPF_REG_1,
589 		.off = offsetof(struct dummy_vect8, in[0].u32),
590 	},
591 	{
592 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
593 		.dst_reg = EBPF_REG_3,
594 		.src_reg = EBPF_REG_1,
595 		.off = offsetof(struct dummy_vect8, in[1].u64),
596 	},
597 	{
598 		.code = (BPF_LDX | BPF_MEM | BPF_W),
599 		.dst_reg = EBPF_REG_4,
600 		.src_reg = EBPF_REG_1,
601 		.off = offsetof(struct dummy_vect8, in[2].u32),
602 	},
603 	{
604 		.code = (BPF_ALU | BPF_AND | BPF_K),
605 		.dst_reg = EBPF_REG_2,
606 		.imm = TEST_SHIFT64_MASK,
607 	},
608 	{
609 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
610 		.dst_reg = EBPF_REG_3,
611 		.src_reg = EBPF_REG_2,
612 	},
613 	{
614 		.code = (BPF_ALU | BPF_AND | BPF_K),
615 		.dst_reg = EBPF_REG_2,
616 		.imm = TEST_SHIFT32_MASK,
617 	},
618 	{
619 		.code = (BPF_ALU | BPF_LSH | BPF_X),
620 		.dst_reg = EBPF_REG_4,
621 		.src_reg = EBPF_REG_2,
622 	},
623 	{
624 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
625 		.dst_reg = EBPF_REG_1,
626 		.src_reg = EBPF_REG_4,
627 		.off = offsetof(struct dummy_vect8, out[4].u64),
628 	},
629 	{
630 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
631 		.dst_reg = EBPF_REG_1,
632 		.src_reg = EBPF_REG_3,
633 		.off = offsetof(struct dummy_vect8, out[5].u64),
634 	},
635 	/* return 1 */
636 	{
637 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
638 		.dst_reg = EBPF_REG_0,
639 		.imm = 1,
640 	},
641 	{
642 		.code = (BPF_JMP | EBPF_EXIT),
643 	},
644 };
645 
646 static void
647 test_shift1_prepare(void *arg)
648 {
649 	struct dummy_vect8 *dv;
650 	uint64_t v;
651 
652 	dv = arg;
653 
654 	v = rte_rand();
655 
656 	memset(dv, 0, sizeof(*dv));
657 	dv->in[0].u32 = v;
658 	dv->in[1].u64 = v << 12 | v >> 6;
659 	dv->in[2].u32 = (-v ^ 5);
660 }
661 
662 static int
663 test_shift1_check(uint64_t rc, const void *arg)
664 {
665 	uint64_t r2, r3, r4;
666 	const struct dummy_vect8 *dvt;
667 	struct dummy_vect8 dve;
668 
669 	dvt = arg;
670 	memset(&dve, 0, sizeof(dve));
671 
672 	r2 = dvt->in[0].u32;
673 	r3 = dvt->in[1].u64;
674 	r4 = dvt->in[2].u32;
675 
676 	r2 = (uint32_t)r2 << TEST_SHIFT_1;
677 	r3 = (int64_t)r3 >> TEST_SHIFT_2;
678 
679 	dve.out[0].u64 = r2;
680 	dve.out[1].u64 = r3;
681 
682 	r4 &= TEST_SHIFT64_MASK;
683 	r3 <<= r4;
684 	r4 &= TEST_SHIFT32_MASK;
685 	r2 = (uint32_t)r2 >> r4;
686 
687 	dve.out[2].u64 = r2;
688 	dve.out[3].u64 = r3;
689 
690 	r2 = dvt->in[0].u32;
691 	r3 = dvt->in[1].u64;
692 	r4 = dvt->in[2].u32;
693 
694 	r2 &= TEST_SHIFT64_MASK;
695 	r3 = (int64_t)r3 >> r2;
696 	r2 &= TEST_SHIFT32_MASK;
697 	r4 = (uint32_t)r4 << r2;
698 
699 	dve.out[4].u64 = r4;
700 	dve.out[5].u64 = r3;
701 
702 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
703 }
704 
705 /* jmp test-cases */
706 static const struct ebpf_insn test_jump1_prog[] = {
707 
708 	[0] = {
709 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
710 		.dst_reg = EBPF_REG_0,
711 		.imm = 0,
712 	},
713 	[1] = {
714 		.code = (BPF_LDX | BPF_MEM | BPF_W),
715 		.dst_reg = EBPF_REG_2,
716 		.src_reg = EBPF_REG_1,
717 		.off = offsetof(struct dummy_vect8, in[0].u32),
718 	},
719 	[2] = {
720 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
721 		.dst_reg = EBPF_REG_3,
722 		.src_reg = EBPF_REG_1,
723 		.off = offsetof(struct dummy_vect8, in[0].u64),
724 	},
725 	[3] = {
726 		.code = (BPF_LDX | BPF_MEM | BPF_W),
727 		.dst_reg = EBPF_REG_4,
728 		.src_reg = EBPF_REG_1,
729 		.off = offsetof(struct dummy_vect8, in[1].u32),
730 	},
731 	[4] = {
732 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
733 		.dst_reg = EBPF_REG_5,
734 		.src_reg = EBPF_REG_1,
735 		.off = offsetof(struct dummy_vect8, in[1].u64),
736 	},
737 	[5] = {
738 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
739 		.dst_reg = EBPF_REG_2,
740 		.imm = TEST_JCC_1,
741 		.off = 8,
742 	},
743 	[6] = {
744 		.code = (BPF_JMP | EBPF_JSLE | BPF_K),
745 		.dst_reg = EBPF_REG_3,
746 		.imm = TEST_JCC_2,
747 		.off = 9,
748 	},
749 	[7] = {
750 		.code = (BPF_JMP | BPF_JGT | BPF_K),
751 		.dst_reg = EBPF_REG_4,
752 		.imm = TEST_JCC_3,
753 		.off = 10,
754 	},
755 	[8] = {
756 		.code = (BPF_JMP | BPF_JSET | BPF_K),
757 		.dst_reg = EBPF_REG_5,
758 		.imm = TEST_JCC_4,
759 		.off = 11,
760 	},
761 	[9] = {
762 		.code = (BPF_JMP | EBPF_JNE | BPF_X),
763 		.dst_reg = EBPF_REG_2,
764 		.src_reg = EBPF_REG_3,
765 		.off = 12,
766 	},
767 	[10] = {
768 		.code = (BPF_JMP | EBPF_JSGT | BPF_X),
769 		.dst_reg = EBPF_REG_2,
770 		.src_reg = EBPF_REG_4,
771 		.off = 13,
772 	},
773 	[11] = {
774 		.code = (BPF_JMP | EBPF_JLE | BPF_X),
775 		.dst_reg = EBPF_REG_2,
776 		.src_reg = EBPF_REG_5,
777 		.off = 14,
778 	},
779 	[12] = {
780 		.code = (BPF_JMP | BPF_JSET | BPF_X),
781 		.dst_reg = EBPF_REG_3,
782 		.src_reg = EBPF_REG_5,
783 		.off = 15,
784 	},
785 	[13] = {
786 		.code = (BPF_JMP | EBPF_EXIT),
787 	},
788 	[14] = {
789 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
790 		.dst_reg = EBPF_REG_0,
791 		.imm = 0x1,
792 	},
793 	[15] = {
794 		.code = (BPF_JMP | BPF_JA),
795 		.off = -10,
796 	},
797 	[16] = {
798 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
799 		.dst_reg = EBPF_REG_0,
800 		.imm = 0x2,
801 	},
802 	[17] = {
803 		.code = (BPF_JMP | BPF_JA),
804 		.off = -11,
805 	},
806 	[18] = {
807 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
808 		.dst_reg = EBPF_REG_0,
809 		.imm = 0x4,
810 	},
811 	[19] = {
812 		.code = (BPF_JMP | BPF_JA),
813 		.off = -12,
814 	},
815 	[20] = {
816 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
817 		.dst_reg = EBPF_REG_0,
818 		.imm = 0x8,
819 	},
820 	[21] = {
821 		.code = (BPF_JMP | BPF_JA),
822 		.off = -13,
823 	},
824 	[22] = {
825 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
826 		.dst_reg = EBPF_REG_0,
827 		.imm = 0x10,
828 	},
829 	[23] = {
830 		.code = (BPF_JMP | BPF_JA),
831 		.off = -14,
832 	},
833 	[24] = {
834 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
835 		.dst_reg = EBPF_REG_0,
836 		.imm = 0x20,
837 	},
838 	[25] = {
839 		.code = (BPF_JMP | BPF_JA),
840 		.off = -15,
841 	},
842 	[26] = {
843 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
844 		.dst_reg = EBPF_REG_0,
845 		.imm = 0x40,
846 	},
847 	[27] = {
848 		.code = (BPF_JMP | BPF_JA),
849 		.off = -16,
850 	},
851 	[28] = {
852 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
853 		.dst_reg = EBPF_REG_0,
854 		.imm = 0x80,
855 	},
856 	[29] = {
857 		.code = (BPF_JMP | BPF_JA),
858 		.off = -17,
859 	},
860 };
861 
862 static void
863 test_jump1_prepare(void *arg)
864 {
865 	struct dummy_vect8 *dv;
866 	uint64_t v1, v2;
867 
868 	dv = arg;
869 
870 	v1 = rte_rand();
871 	v2 = rte_rand();
872 
873 	memset(dv, 0, sizeof(*dv));
874 	dv->in[0].u64 = v1;
875 	dv->in[1].u64 = v2;
876 	dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
877 	dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
878 }
879 
880 static int
881 test_jump1_check(uint64_t rc, const void *arg)
882 {
883 	uint64_t r2, r3, r4, r5, rv;
884 	const struct dummy_vect8 *dvt;
885 
886 	dvt = arg;
887 
888 	rv = 0;
889 	r2 = dvt->in[0].u32;
890 	r3 = dvt->in[0].u64;
891 	r4 = dvt->in[1].u32;
892 	r5 = dvt->in[1].u64;
893 
894 	if (r2 == TEST_JCC_1)
895 		rv |= 0x1;
896 	if ((int64_t)r3 <= TEST_JCC_2)
897 		rv |= 0x2;
898 	if (r4 > TEST_JCC_3)
899 		rv |= 0x4;
900 	if (r5 & TEST_JCC_4)
901 		rv |= 0x8;
902 	if (r2 != r3)
903 		rv |= 0x10;
904 	if ((int64_t)r2 > (int64_t)r4)
905 		rv |= 0x20;
906 	if (r2 <= r5)
907 		rv |= 0x40;
908 	if (r3 & r5)
909 		rv |= 0x80;
910 
911 	return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
912 }
913 
914 /* Jump test case - check ip4_dest in particular subnet */
915 static const struct ebpf_insn test_jump2_prog[] = {
916 
917 	[0] = {
918 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
919 		.dst_reg = EBPF_REG_2,
920 		.imm = 0xe,
921 	},
922 	[1] = {
923 		.code = (BPF_LDX | BPF_MEM | BPF_H),
924 		.dst_reg = EBPF_REG_3,
925 		.src_reg = EBPF_REG_1,
926 		.off = 12,
927 	},
928 	[2] = {
929 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
930 		.dst_reg = EBPF_REG_3,
931 		.off = 2,
932 		.imm = 0x81,
933 	},
934 	[3] = {
935 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
936 		.dst_reg = EBPF_REG_2,
937 		.imm = 0x12,
938 	},
939 	[4] = {
940 		.code = (BPF_LDX | BPF_MEM | BPF_H),
941 		.dst_reg = EBPF_REG_3,
942 		.src_reg = EBPF_REG_1,
943 		.off = 16,
944 	},
945 	[5] = {
946 		.code = (EBPF_ALU64 | BPF_AND | BPF_K),
947 		.dst_reg = EBPF_REG_3,
948 		.imm = 0xffff,
949 	},
950 	[6] = {
951 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
952 		.dst_reg = EBPF_REG_3,
953 		.off = 9,
954 		.imm = 0x8,
955 	},
956 	[7] = {
957 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
958 		.dst_reg = EBPF_REG_1,
959 		.src_reg = EBPF_REG_2,
960 	},
961 	[8] = {
962 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
963 		.dst_reg = EBPF_REG_0,
964 		.imm = 0,
965 	},
966 	[9] = {
967 		.code = (BPF_LDX | BPF_MEM | BPF_W),
968 		.dst_reg = EBPF_REG_1,
969 		.src_reg = EBPF_REG_1,
970 		.off = 16,
971 	},
972 	[10] = {
973 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
974 		.dst_reg = EBPF_REG_3,
975 		.imm = TEST_NETMASK,
976 	},
977 	[11] = {
978 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
979 		.dst_reg = EBPF_REG_3,
980 		.imm = sizeof(uint32_t) * CHAR_BIT,
981 	},
982 	[12] = {
983 		.code = (BPF_ALU | BPF_AND | BPF_X),
984 		.dst_reg = EBPF_REG_1,
985 		.src_reg = EBPF_REG_3,
986 	},
987 	[13] = {
988 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
989 		.dst_reg = EBPF_REG_3,
990 		.imm = TEST_SUBNET,
991 	},
992 	[14] = {
993 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
994 		.dst_reg = EBPF_REG_3,
995 		.imm = sizeof(uint32_t) * CHAR_BIT,
996 	},
997 	[15] = {
998 		.code = (BPF_JMP | BPF_JEQ | BPF_X),
999 		.dst_reg = EBPF_REG_1,
1000 		.src_reg = EBPF_REG_3,
1001 		.off = 1,
1002 	},
1003 	[16] = {
1004 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1005 		.dst_reg = EBPF_REG_0,
1006 		.imm = -1,
1007 	},
1008 	[17] = {
1009 		.code = (BPF_JMP | EBPF_EXIT),
1010 	},
1011 };
1012 
1013 /* Preparing a vlan packet */
1014 static void
1015 test_jump2_prepare(void *arg)
1016 {
1017 	struct dummy_net *dn;
1018 
1019 	dn = arg;
1020 	memset(dn, 0, sizeof(*dn));
1021 
1022 	/*
1023 	 * Initialize ether header.
1024 	 */
1025 	rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1026 			    &dn->eth_hdr.dst_addr);
1027 	rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1028 			    &dn->eth_hdr.src_addr);
1029 	dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1030 
1031 	/*
1032 	 * Initialize vlan header.
1033 	 */
1034 	dn->vlan_hdr.eth_proto =  rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1035 	dn->vlan_hdr.vlan_tci = 32;
1036 
1037 	/*
1038 	 * Initialize IP header.
1039 	 */
1040 	dn->ip_hdr.version_ihl   = 0x45;    /*IP_VERSION | IP_HDRLEN*/
1041 	dn->ip_hdr.time_to_live   = 64;   /* IP_DEFTTL */
1042 	dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1043 	dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1044 	dn->ip_hdr.total_length   = rte_cpu_to_be_16(60);
1045 	dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1046 	dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1047 }
1048 
1049 static int
1050 test_jump2_check(uint64_t rc, const void *arg)
1051 {
1052 	const struct rte_ether_hdr *eth_hdr = arg;
1053 	const struct rte_ipv4_hdr *ipv4_hdr;
1054 	const void *next = eth_hdr;
1055 	uint16_t eth_type;
1056 	uint64_t v = -1;
1057 
1058 	if (eth_hdr->ether_type == htons(0x8100)) {
1059 		const struct rte_vlan_hdr *vlan_hdr =
1060 			(const void *)(eth_hdr + 1);
1061 		eth_type = vlan_hdr->eth_proto;
1062 		next = vlan_hdr + 1;
1063 	} else {
1064 		eth_type = eth_hdr->ether_type;
1065 		next = eth_hdr + 1;
1066 	}
1067 
1068 	if (eth_type == htons(0x0800)) {
1069 		ipv4_hdr = next;
1070 		if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1071 		    rte_cpu_to_be_32(TEST_SUBNET)) {
1072 			v = 0;
1073 		}
1074 	}
1075 
1076 	return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1077 }
1078 
1079 /* alu (add, sub, and, or, xor, neg)  test-cases */
1080 static const struct ebpf_insn test_alu1_prog[] = {
1081 
1082 	{
1083 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1084 		.dst_reg = EBPF_REG_2,
1085 		.src_reg = EBPF_REG_1,
1086 		.off = offsetof(struct dummy_vect8, in[0].u32),
1087 	},
1088 	{
1089 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1090 		.dst_reg = EBPF_REG_3,
1091 		.src_reg = EBPF_REG_1,
1092 		.off = offsetof(struct dummy_vect8, in[0].u64),
1093 	},
1094 	{
1095 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1096 		.dst_reg = EBPF_REG_4,
1097 		.src_reg = EBPF_REG_1,
1098 		.off = offsetof(struct dummy_vect8, in[1].u32),
1099 	},
1100 	{
1101 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1102 		.dst_reg = EBPF_REG_5,
1103 		.src_reg = EBPF_REG_1,
1104 		.off = offsetof(struct dummy_vect8, in[1].u64),
1105 	},
1106 	{
1107 		.code = (BPF_ALU | BPF_AND | BPF_K),
1108 		.dst_reg = EBPF_REG_2,
1109 		.imm = TEST_FILL_1,
1110 	},
1111 	{
1112 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1113 		.dst_reg = EBPF_REG_3,
1114 		.imm = TEST_FILL_1,
1115 	},
1116 	{
1117 		.code = (BPF_ALU | BPF_XOR | BPF_K),
1118 		.dst_reg = EBPF_REG_4,
1119 		.imm = TEST_FILL_1,
1120 	},
1121 	{
1122 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1123 		.dst_reg = EBPF_REG_5,
1124 		.imm = TEST_FILL_1,
1125 	},
1126 	{
1127 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1128 		.dst_reg = EBPF_REG_1,
1129 		.src_reg = EBPF_REG_2,
1130 		.off = offsetof(struct dummy_vect8, out[0].u64),
1131 	},
1132 	{
1133 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1134 		.dst_reg = EBPF_REG_1,
1135 		.src_reg = EBPF_REG_3,
1136 		.off = offsetof(struct dummy_vect8, out[1].u64),
1137 	},
1138 	{
1139 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1140 		.dst_reg = EBPF_REG_1,
1141 		.src_reg = EBPF_REG_4,
1142 		.off = offsetof(struct dummy_vect8, out[2].u64),
1143 	},
1144 	{
1145 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1146 		.dst_reg = EBPF_REG_1,
1147 		.src_reg = EBPF_REG_5,
1148 		.off = offsetof(struct dummy_vect8, out[3].u64),
1149 	},
1150 	{
1151 		.code = (BPF_ALU | BPF_OR | BPF_X),
1152 		.dst_reg = EBPF_REG_2,
1153 		.src_reg = EBPF_REG_3,
1154 	},
1155 	{
1156 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1157 		.dst_reg = EBPF_REG_3,
1158 		.src_reg = EBPF_REG_4,
1159 	},
1160 	{
1161 		.code = (BPF_ALU | BPF_SUB | BPF_X),
1162 		.dst_reg = EBPF_REG_4,
1163 		.src_reg = EBPF_REG_5,
1164 	},
1165 	{
1166 		.code = (EBPF_ALU64 | BPF_AND | BPF_X),
1167 		.dst_reg = EBPF_REG_5,
1168 		.src_reg = EBPF_REG_2,
1169 	},
1170 	{
1171 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1172 		.dst_reg = EBPF_REG_1,
1173 		.src_reg = EBPF_REG_2,
1174 		.off = offsetof(struct dummy_vect8, out[4].u64),
1175 	},
1176 	{
1177 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1178 		.dst_reg = EBPF_REG_1,
1179 		.src_reg = EBPF_REG_3,
1180 		.off = offsetof(struct dummy_vect8, out[5].u64),
1181 	},
1182 	{
1183 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1184 		.dst_reg = EBPF_REG_1,
1185 		.src_reg = EBPF_REG_4,
1186 		.off = offsetof(struct dummy_vect8, out[6].u64),
1187 	},
1188 	{
1189 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1190 		.dst_reg = EBPF_REG_1,
1191 		.src_reg = EBPF_REG_5,
1192 		.off = offsetof(struct dummy_vect8, out[7].u64),
1193 	},
1194 	/* return (-r2 + (-r3)) */
1195 	{
1196 		.code = (BPF_ALU | BPF_NEG),
1197 		.dst_reg = EBPF_REG_2,
1198 	},
1199 	{
1200 		.code = (EBPF_ALU64 | BPF_NEG),
1201 		.dst_reg = EBPF_REG_3,
1202 	},
1203 	{
1204 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1205 		.dst_reg = EBPF_REG_2,
1206 		.src_reg = EBPF_REG_3,
1207 	},
1208 	{
1209 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1210 		.dst_reg = EBPF_REG_0,
1211 		.src_reg = EBPF_REG_2,
1212 	},
1213 	{
1214 		.code = (BPF_JMP | EBPF_EXIT),
1215 	},
1216 };
1217 
1218 static int
1219 test_alu1_check(uint64_t rc, const void *arg)
1220 {
1221 	uint64_t r2, r3, r4, r5, rv;
1222 	const struct dummy_vect8 *dvt;
1223 	struct dummy_vect8 dve;
1224 
1225 	dvt = arg;
1226 	memset(&dve, 0, sizeof(dve));
1227 
1228 	r2 = dvt->in[0].u32;
1229 	r3 = dvt->in[0].u64;
1230 	r4 = dvt->in[1].u32;
1231 	r5 = dvt->in[1].u64;
1232 
1233 	r2 = (uint32_t)r2 & TEST_FILL_1;
1234 	r3 |= (int32_t) TEST_FILL_1;
1235 	r4 = (uint32_t)r4 ^ TEST_FILL_1;
1236 	r5 += (int32_t)TEST_FILL_1;
1237 
1238 	dve.out[0].u64 = r2;
1239 	dve.out[1].u64 = r3;
1240 	dve.out[2].u64 = r4;
1241 	dve.out[3].u64 = r5;
1242 
1243 	r2 = (uint32_t)r2 | (uint32_t)r3;
1244 	r3 ^= r4;
1245 	r4 = (uint32_t)r4 - (uint32_t)r5;
1246 	r5 &= r2;
1247 
1248 	dve.out[4].u64 = r2;
1249 	dve.out[5].u64 = r3;
1250 	dve.out[6].u64 = r4;
1251 	dve.out[7].u64 = r5;
1252 
1253 	r2 = -(int32_t)r2;
1254 	rv = (uint32_t)r2;
1255 	r3 = -r3;
1256 	rv += r3;
1257 
1258 	return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1259 }
1260 
1261 /* endianness conversions (BE->LE/LE->BE)  test-cases */
1262 static const struct ebpf_insn test_bele1_prog[] = {
1263 
1264 	{
1265 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1266 		.dst_reg = EBPF_REG_2,
1267 		.src_reg = EBPF_REG_1,
1268 		.off = offsetof(struct dummy_vect8, in[0].u16),
1269 	},
1270 	{
1271 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1272 		.dst_reg = EBPF_REG_3,
1273 		.src_reg = EBPF_REG_1,
1274 		.off = offsetof(struct dummy_vect8, in[0].u32),
1275 	},
1276 	{
1277 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1278 		.dst_reg = EBPF_REG_4,
1279 		.src_reg = EBPF_REG_1,
1280 		.off = offsetof(struct dummy_vect8, in[0].u64),
1281 	},
1282 	{
1283 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1284 		.dst_reg = EBPF_REG_2,
1285 		.imm = sizeof(uint16_t) * CHAR_BIT,
1286 	},
1287 	{
1288 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1289 		.dst_reg = EBPF_REG_3,
1290 		.imm = sizeof(uint32_t) * CHAR_BIT,
1291 	},
1292 	{
1293 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1294 		.dst_reg = EBPF_REG_4,
1295 		.imm = sizeof(uint64_t) * CHAR_BIT,
1296 	},
1297 	{
1298 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1299 		.dst_reg = EBPF_REG_1,
1300 		.src_reg = EBPF_REG_2,
1301 		.off = offsetof(struct dummy_vect8, out[0].u64),
1302 	},
1303 	{
1304 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1305 		.dst_reg = EBPF_REG_1,
1306 		.src_reg = EBPF_REG_3,
1307 		.off = offsetof(struct dummy_vect8, out[1].u64),
1308 	},
1309 	{
1310 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1311 		.dst_reg = EBPF_REG_1,
1312 		.src_reg = EBPF_REG_4,
1313 		.off = offsetof(struct dummy_vect8, out[2].u64),
1314 	},
1315 	{
1316 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1317 		.dst_reg = EBPF_REG_2,
1318 		.src_reg = EBPF_REG_1,
1319 		.off = offsetof(struct dummy_vect8, in[0].u16),
1320 	},
1321 	{
1322 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1323 		.dst_reg = EBPF_REG_3,
1324 		.src_reg = EBPF_REG_1,
1325 		.off = offsetof(struct dummy_vect8, in[0].u32),
1326 	},
1327 	{
1328 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1329 		.dst_reg = EBPF_REG_4,
1330 		.src_reg = EBPF_REG_1,
1331 		.off = offsetof(struct dummy_vect8, in[0].u64),
1332 	},
1333 	{
1334 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1335 		.dst_reg = EBPF_REG_2,
1336 		.imm = sizeof(uint16_t) * CHAR_BIT,
1337 	},
1338 	{
1339 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1340 		.dst_reg = EBPF_REG_3,
1341 		.imm = sizeof(uint32_t) * CHAR_BIT,
1342 	},
1343 	{
1344 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1345 		.dst_reg = EBPF_REG_4,
1346 		.imm = sizeof(uint64_t) * CHAR_BIT,
1347 	},
1348 	{
1349 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1350 		.dst_reg = EBPF_REG_1,
1351 		.src_reg = EBPF_REG_2,
1352 		.off = offsetof(struct dummy_vect8, out[3].u64),
1353 	},
1354 	{
1355 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1356 		.dst_reg = EBPF_REG_1,
1357 		.src_reg = EBPF_REG_3,
1358 		.off = offsetof(struct dummy_vect8, out[4].u64),
1359 	},
1360 	{
1361 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1362 		.dst_reg = EBPF_REG_1,
1363 		.src_reg = EBPF_REG_4,
1364 		.off = offsetof(struct dummy_vect8, out[5].u64),
1365 	},
1366 	/* return 1 */
1367 	{
1368 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1369 		.dst_reg = EBPF_REG_0,
1370 		.imm = 1,
1371 	},
1372 	{
1373 		.code = (BPF_JMP | EBPF_EXIT),
1374 	},
1375 };
1376 
1377 static void
1378 test_bele1_prepare(void *arg)
1379 {
1380 	struct dummy_vect8 *dv;
1381 
1382 	dv = arg;
1383 
1384 	memset(dv, 0, sizeof(*dv));
1385 	dv->in[0].u64 = rte_rand();
1386 	dv->in[0].u32 = dv->in[0].u64;
1387 	dv->in[0].u16 = dv->in[0].u64;
1388 }
1389 
1390 static int
1391 test_bele1_check(uint64_t rc, const void *arg)
1392 {
1393 	uint64_t r2, r3, r4;
1394 	const struct dummy_vect8 *dvt;
1395 	struct dummy_vect8 dve;
1396 
1397 	dvt = arg;
1398 	memset(&dve, 0, sizeof(dve));
1399 
1400 	r2 = dvt->in[0].u16;
1401 	r3 = dvt->in[0].u32;
1402 	r4 = dvt->in[0].u64;
1403 
1404 	r2 =  rte_cpu_to_be_16(r2);
1405 	r3 =  rte_cpu_to_be_32(r3);
1406 	r4 =  rte_cpu_to_be_64(r4);
1407 
1408 	dve.out[0].u64 = r2;
1409 	dve.out[1].u64 = r3;
1410 	dve.out[2].u64 = r4;
1411 
1412 	r2 = dvt->in[0].u16;
1413 	r3 = dvt->in[0].u32;
1414 	r4 = dvt->in[0].u64;
1415 
1416 	r2 =  rte_cpu_to_le_16(r2);
1417 	r3 =  rte_cpu_to_le_32(r3);
1418 	r4 =  rte_cpu_to_le_64(r4);
1419 
1420 	dve.out[3].u64 = r2;
1421 	dve.out[4].u64 = r3;
1422 	dve.out[5].u64 = r4;
1423 
1424 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1425 }
1426 
1427 /* atomic add test-cases */
1428 static const struct ebpf_insn test_xadd1_prog[] = {
1429 
1430 	{
1431 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1432 		.dst_reg = EBPF_REG_2,
1433 		.imm = 1,
1434 	},
1435 	{
1436 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1437 		.dst_reg = EBPF_REG_1,
1438 		.src_reg = EBPF_REG_2,
1439 		.off = offsetof(struct dummy_offset, u32),
1440 	},
1441 	{
1442 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1443 		.dst_reg = EBPF_REG_1,
1444 		.src_reg = EBPF_REG_2,
1445 		.off = offsetof(struct dummy_offset, u64),
1446 	},
1447 	{
1448 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1449 		.dst_reg = EBPF_REG_3,
1450 		.imm = -1,
1451 	},
1452 	{
1453 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1454 		.dst_reg = EBPF_REG_1,
1455 		.src_reg = EBPF_REG_3,
1456 		.off = offsetof(struct dummy_offset, u32),
1457 	},
1458 	{
1459 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1460 		.dst_reg = EBPF_REG_1,
1461 		.src_reg = EBPF_REG_3,
1462 		.off = offsetof(struct dummy_offset, u64),
1463 	},
1464 	{
1465 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1466 		.dst_reg = EBPF_REG_4,
1467 		.imm = TEST_FILL_1,
1468 	},
1469 	{
1470 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1471 		.dst_reg = EBPF_REG_1,
1472 		.src_reg = EBPF_REG_4,
1473 		.off = offsetof(struct dummy_offset, u32),
1474 	},
1475 	{
1476 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1477 		.dst_reg = EBPF_REG_1,
1478 		.src_reg = EBPF_REG_4,
1479 		.off = offsetof(struct dummy_offset, u64),
1480 	},
1481 	{
1482 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1483 		.dst_reg = EBPF_REG_5,
1484 		.imm = TEST_MUL_1,
1485 	},
1486 	{
1487 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1488 		.dst_reg = EBPF_REG_1,
1489 		.src_reg = EBPF_REG_5,
1490 		.off = offsetof(struct dummy_offset, u32),
1491 	},
1492 	{
1493 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1494 		.dst_reg = EBPF_REG_1,
1495 		.src_reg = EBPF_REG_5,
1496 		.off = offsetof(struct dummy_offset, u64),
1497 	},
1498 	{
1499 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1500 		.dst_reg = EBPF_REG_6,
1501 		.imm = TEST_MUL_2,
1502 	},
1503 	{
1504 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1505 		.dst_reg = EBPF_REG_1,
1506 		.src_reg = EBPF_REG_6,
1507 		.off = offsetof(struct dummy_offset, u32),
1508 	},
1509 	{
1510 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1511 		.dst_reg = EBPF_REG_1,
1512 		.src_reg = EBPF_REG_6,
1513 		.off = offsetof(struct dummy_offset, u64),
1514 	},
1515 	{
1516 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1517 		.dst_reg = EBPF_REG_7,
1518 		.imm = TEST_JCC_2,
1519 	},
1520 	{
1521 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1522 		.dst_reg = EBPF_REG_1,
1523 		.src_reg = EBPF_REG_7,
1524 		.off = offsetof(struct dummy_offset, u32),
1525 	},
1526 	{
1527 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1528 		.dst_reg = EBPF_REG_1,
1529 		.src_reg = EBPF_REG_7,
1530 		.off = offsetof(struct dummy_offset, u64),
1531 	},
1532 	{
1533 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1534 		.dst_reg = EBPF_REG_8,
1535 		.imm = TEST_JCC_3,
1536 	},
1537 	{
1538 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1539 		.dst_reg = EBPF_REG_1,
1540 		.src_reg = EBPF_REG_8,
1541 		.off = offsetof(struct dummy_offset, u32),
1542 	},
1543 	{
1544 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1545 		.dst_reg = EBPF_REG_1,
1546 		.src_reg = EBPF_REG_8,
1547 		.off = offsetof(struct dummy_offset, u64),
1548 	},
1549 	/* return 1 */
1550 	{
1551 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1552 		.dst_reg = EBPF_REG_0,
1553 		.imm = 1,
1554 	},
1555 	{
1556 		.code = (BPF_JMP | EBPF_EXIT),
1557 	},
1558 };
1559 
1560 static int
1561 test_xadd1_check(uint64_t rc, const void *arg)
1562 {
1563 	uint64_t rv;
1564 	const struct dummy_offset *dft;
1565 	struct dummy_offset dfe;
1566 
1567 	dft = arg;
1568 	memset(&dfe, 0, sizeof(dfe));
1569 
1570 	rv = 1;
1571 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1572 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1573 
1574 	rv = -1;
1575 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1576 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1577 
1578 	rv = (int32_t)TEST_FILL_1;
1579 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1580 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1581 
1582 	rv = TEST_MUL_1;
1583 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1584 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1585 
1586 	rv = TEST_MUL_2;
1587 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1588 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1589 
1590 	rv = TEST_JCC_2;
1591 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1592 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1593 
1594 	rv = TEST_JCC_3;
1595 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1596 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1597 
1598 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1599 }
1600 
1601 /* alu div test-cases */
1602 static const struct ebpf_insn test_div1_prog[] = {
1603 
1604 	{
1605 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1606 		.dst_reg = EBPF_REG_2,
1607 		.src_reg = EBPF_REG_1,
1608 		.off = offsetof(struct dummy_vect8, in[0].u32),
1609 	},
1610 	{
1611 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1612 		.dst_reg = EBPF_REG_3,
1613 		.src_reg = EBPF_REG_1,
1614 		.off = offsetof(struct dummy_vect8, in[1].u64),
1615 	},
1616 	{
1617 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1618 		.dst_reg = EBPF_REG_4,
1619 		.src_reg = EBPF_REG_1,
1620 		.off = offsetof(struct dummy_vect8, in[2].u32),
1621 	},
1622 	{
1623 		.code = (BPF_ALU | BPF_DIV | BPF_K),
1624 		.dst_reg = EBPF_REG_2,
1625 		.imm = TEST_MUL_1,
1626 	},
1627 	{
1628 		.code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1629 		.dst_reg = EBPF_REG_3,
1630 		.imm = TEST_MUL_2,
1631 	},
1632 	{
1633 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1634 		.dst_reg = EBPF_REG_2,
1635 		.imm = 1,
1636 	},
1637 	{
1638 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1639 		.dst_reg = EBPF_REG_3,
1640 		.imm = 1,
1641 	},
1642 	{
1643 		.code = (BPF_ALU | BPF_MOD | BPF_X),
1644 		.dst_reg = EBPF_REG_4,
1645 		.src_reg = EBPF_REG_2,
1646 	},
1647 	{
1648 		.code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1649 		.dst_reg = EBPF_REG_4,
1650 		.src_reg = EBPF_REG_3,
1651 	},
1652 	{
1653 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1654 		.dst_reg = EBPF_REG_1,
1655 		.src_reg = EBPF_REG_2,
1656 		.off = offsetof(struct dummy_vect8, out[0].u64),
1657 	},
1658 	{
1659 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1660 		.dst_reg = EBPF_REG_1,
1661 		.src_reg = EBPF_REG_3,
1662 		.off = offsetof(struct dummy_vect8, out[1].u64),
1663 	},
1664 	{
1665 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1666 		.dst_reg = EBPF_REG_1,
1667 		.src_reg = EBPF_REG_4,
1668 		.off = offsetof(struct dummy_vect8, out[2].u64),
1669 	},
1670 	/* check that we can handle division by zero gracefully. */
1671 	{
1672 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1673 		.dst_reg = EBPF_REG_2,
1674 		.src_reg = EBPF_REG_1,
1675 		.off = offsetof(struct dummy_vect8, in[3].u32),
1676 	},
1677 	{
1678 		.code = (BPF_ALU | BPF_DIV | BPF_X),
1679 		.dst_reg = EBPF_REG_4,
1680 		.src_reg = EBPF_REG_2,
1681 	},
1682 	/* return 1 */
1683 	{
1684 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1685 		.dst_reg = EBPF_REG_0,
1686 		.imm = 1,
1687 	},
1688 	{
1689 		.code = (BPF_JMP | EBPF_EXIT),
1690 	},
1691 };
1692 
1693 static int
1694 test_div1_check(uint64_t rc, const void *arg)
1695 {
1696 	uint64_t r2, r3, r4;
1697 	const struct dummy_vect8 *dvt;
1698 	struct dummy_vect8 dve;
1699 
1700 	dvt = arg;
1701 	memset(&dve, 0, sizeof(dve));
1702 
1703 	r2 = dvt->in[0].u32;
1704 	r3 = dvt->in[1].u64;
1705 	r4 = dvt->in[2].u32;
1706 
1707 	r2 = (uint32_t)r2 / TEST_MUL_1;
1708 	r3 %= TEST_MUL_2;
1709 	r2 |= 1;
1710 	r3 |= 1;
1711 	r4 = (uint32_t)(r4 % r2);
1712 	r4 /= r3;
1713 
1714 	dve.out[0].u64 = r2;
1715 	dve.out[1].u64 = r3;
1716 	dve.out[2].u64 = r4;
1717 
1718 	/*
1719 	 * in the test prog we attempted to divide by zero.
1720 	 * so return value should return 0.
1721 	 */
1722 	return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1723 }
1724 
1725 /* call test-cases */
1726 static const struct ebpf_insn test_call1_prog[] = {
1727 
1728 	{
1729 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1730 		.dst_reg = EBPF_REG_2,
1731 		.src_reg = EBPF_REG_1,
1732 		.off = offsetof(struct dummy_offset, u32),
1733 	},
1734 	{
1735 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1736 		.dst_reg = EBPF_REG_3,
1737 		.src_reg = EBPF_REG_1,
1738 		.off = offsetof(struct dummy_offset, u64),
1739 	},
1740 	{
1741 		.code = (BPF_STX | BPF_MEM | BPF_W),
1742 		.dst_reg = EBPF_REG_10,
1743 		.src_reg = EBPF_REG_2,
1744 		.off = -4,
1745 	},
1746 	{
1747 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1748 		.dst_reg = EBPF_REG_10,
1749 		.src_reg = EBPF_REG_3,
1750 		.off = -16,
1751 	},
1752 	{
1753 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1754 		.dst_reg = EBPF_REG_2,
1755 		.src_reg = EBPF_REG_10,
1756 	},
1757 	{
1758 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1759 		.dst_reg = EBPF_REG_2,
1760 		.imm = 4,
1761 	},
1762 	{
1763 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1764 		.dst_reg = EBPF_REG_3,
1765 		.src_reg = EBPF_REG_10,
1766 	},
1767 	{
1768 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1769 		.dst_reg = EBPF_REG_3,
1770 		.imm = 16,
1771 	},
1772 	{
1773 		.code = (BPF_JMP | EBPF_CALL),
1774 		.imm = 0,
1775 	},
1776 	{
1777 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1778 		.dst_reg = EBPF_REG_2,
1779 		.src_reg = EBPF_REG_10,
1780 		.off = -4,
1781 	},
1782 	{
1783 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1784 		.dst_reg = EBPF_REG_0,
1785 		.src_reg = EBPF_REG_10,
1786 		.off = -16
1787 	},
1788 	{
1789 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1790 		.dst_reg = EBPF_REG_0,
1791 		.src_reg = EBPF_REG_2,
1792 	},
1793 	{
1794 		.code = (BPF_JMP | EBPF_EXIT),
1795 	},
1796 };
1797 
1798 static void
1799 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1800 {
1801 	const struct dummy_offset *dv;
1802 
1803 	dv = p;
1804 
1805 	v32[0] += dv->u16;
1806 	v64[0] += dv->u8;
1807 }
1808 
1809 static int
1810 test_call1_check(uint64_t rc, const void *arg)
1811 {
1812 	uint32_t v32;
1813 	uint64_t v64;
1814 	const struct dummy_offset *dv;
1815 
1816 	dv = arg;
1817 
1818 	v32 = dv->u32;
1819 	v64 = dv->u64;
1820 	dummy_func1(arg, &v32, &v64);
1821 	v64 += v32;
1822 
1823 	return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1824 }
1825 
1826 static const struct rte_bpf_xsym test_call1_xsym[] = {
1827 	{
1828 		.name = RTE_STR(dummy_func1),
1829 		.type = RTE_BPF_XTYPE_FUNC,
1830 		.func = {
1831 			.val = (void *)dummy_func1,
1832 			.nb_args = 3,
1833 			.args = {
1834 				[0] = {
1835 					.type = RTE_BPF_ARG_PTR,
1836 					.size = sizeof(struct dummy_offset),
1837 				},
1838 				[1] = {
1839 					.type = RTE_BPF_ARG_PTR,
1840 					.size = sizeof(uint32_t),
1841 				},
1842 				[2] = {
1843 					.type = RTE_BPF_ARG_PTR,
1844 					.size = sizeof(uint64_t),
1845 				},
1846 			},
1847 		},
1848 	},
1849 };
1850 
1851 static const struct ebpf_insn test_call2_prog[] = {
1852 
1853 	{
1854 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1855 		.dst_reg = EBPF_REG_1,
1856 		.src_reg = EBPF_REG_10,
1857 	},
1858 	{
1859 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1860 		.dst_reg = EBPF_REG_1,
1861 		.imm = -(int32_t)sizeof(struct dummy_offset),
1862 	},
1863 	{
1864 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1865 		.dst_reg = EBPF_REG_2,
1866 		.src_reg = EBPF_REG_10,
1867 	},
1868 	{
1869 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1870 		.dst_reg = EBPF_REG_2,
1871 		.imm = -2 * (int32_t)sizeof(struct dummy_offset),
1872 	},
1873 	{
1874 		.code = (BPF_JMP | EBPF_CALL),
1875 		.imm = 0,
1876 	},
1877 	{
1878 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1879 		.dst_reg = EBPF_REG_1,
1880 		.src_reg = EBPF_REG_10,
1881 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1882 			offsetof(struct dummy_offset, u64)),
1883 	},
1884 	{
1885 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1886 		.dst_reg = EBPF_REG_0,
1887 		.src_reg = EBPF_REG_10,
1888 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1889 			offsetof(struct dummy_offset, u32)),
1890 	},
1891 	{
1892 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1893 		.dst_reg = EBPF_REG_0,
1894 		.src_reg = EBPF_REG_1,
1895 	},
1896 	{
1897 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1898 		.dst_reg = EBPF_REG_1,
1899 		.src_reg = EBPF_REG_10,
1900 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1901 			offsetof(struct dummy_offset, u16)),
1902 	},
1903 	{
1904 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1905 		.dst_reg = EBPF_REG_0,
1906 		.src_reg = EBPF_REG_1,
1907 	},
1908 	{
1909 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1910 		.dst_reg = EBPF_REG_1,
1911 		.src_reg = EBPF_REG_10,
1912 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1913 			offsetof(struct dummy_offset, u8)),
1914 	},
1915 	{
1916 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1917 		.dst_reg = EBPF_REG_0,
1918 		.src_reg = EBPF_REG_1,
1919 	},
1920 	{
1921 		.code = (BPF_JMP | EBPF_EXIT),
1922 	},
1923 
1924 };
1925 
1926 static void
1927 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1928 {
1929 	uint64_t v;
1930 
1931 	v = 0;
1932 	a->u64 = v++;
1933 	a->u32 = v++;
1934 	a->u16 = v++;
1935 	a->u8 = v++;
1936 	b->u64 = v++;
1937 	b->u32 = v++;
1938 	b->u16 = v++;
1939 	b->u8 = v++;
1940 }
1941 
1942 static int
1943 test_call2_check(uint64_t rc, const void *arg)
1944 {
1945 	uint64_t v;
1946 	struct dummy_offset a, b;
1947 
1948 	RTE_SET_USED(arg);
1949 
1950 	dummy_func2(&a, &b);
1951 	v = a.u64 + a.u32 + b.u16 + b.u8;
1952 
1953 	return cmp_res(__func__, v, rc, arg, arg, 0);
1954 }
1955 
1956 static const struct rte_bpf_xsym test_call2_xsym[] = {
1957 	{
1958 		.name = RTE_STR(dummy_func2),
1959 		.type = RTE_BPF_XTYPE_FUNC,
1960 		.func = {
1961 			.val = (void *)dummy_func2,
1962 			.nb_args = 2,
1963 			.args = {
1964 				[0] = {
1965 					.type = RTE_BPF_ARG_PTR,
1966 					.size = sizeof(struct dummy_offset),
1967 				},
1968 				[1] = {
1969 					.type = RTE_BPF_ARG_PTR,
1970 					.size = sizeof(struct dummy_offset),
1971 				},
1972 			},
1973 		},
1974 	},
1975 };
1976 
1977 static const struct ebpf_insn test_call3_prog[] = {
1978 
1979 	{
1980 		.code = (BPF_JMP | EBPF_CALL),
1981 		.imm = 0,
1982 	},
1983 	{
1984 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1985 		.dst_reg = EBPF_REG_2,
1986 		.src_reg = EBPF_REG_0,
1987 		.off = offsetof(struct dummy_offset, u8),
1988 	},
1989 	{
1990 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1991 		.dst_reg = EBPF_REG_3,
1992 		.src_reg = EBPF_REG_0,
1993 		.off = offsetof(struct dummy_offset, u16),
1994 	},
1995 	{
1996 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1997 		.dst_reg = EBPF_REG_4,
1998 		.src_reg = EBPF_REG_0,
1999 		.off = offsetof(struct dummy_offset, u32),
2000 	},
2001 	{
2002 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
2003 		.dst_reg = EBPF_REG_0,
2004 		.src_reg = EBPF_REG_0,
2005 		.off = offsetof(struct dummy_offset, u64),
2006 	},
2007 	/* return sum */
2008 	{
2009 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2010 		.dst_reg = EBPF_REG_0,
2011 		.src_reg = EBPF_REG_4,
2012 	},
2013 	{
2014 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2015 		.dst_reg = EBPF_REG_0,
2016 		.src_reg = EBPF_REG_3,
2017 	},
2018 	{
2019 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2020 		.dst_reg = EBPF_REG_0,
2021 		.src_reg = EBPF_REG_2,
2022 	},
2023 	{
2024 		.code = (BPF_JMP | EBPF_EXIT),
2025 	},
2026 };
2027 
2028 static const struct dummy_offset *
2029 dummy_func3(const struct dummy_vect8 *p)
2030 {
2031 	return &p->in[RTE_DIM(p->in) - 1];
2032 }
2033 
2034 static void
2035 test_call3_prepare(void *arg)
2036 {
2037 	struct dummy_vect8 *pv;
2038 	struct dummy_offset *df;
2039 
2040 	pv = arg;
2041 	df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2042 
2043 	memset(pv, 0, sizeof(*pv));
2044 	df->u64 = (int32_t)TEST_FILL_1;
2045 	df->u32 = df->u64;
2046 	df->u16 = df->u64;
2047 	df->u8 = df->u64;
2048 }
2049 
2050 static int
2051 test_call3_check(uint64_t rc, const void *arg)
2052 {
2053 	uint64_t v;
2054 	const struct dummy_vect8 *pv;
2055 	const struct dummy_offset *dft;
2056 
2057 	pv = arg;
2058 	dft = dummy_func3(pv);
2059 
2060 	v = dft->u64;
2061 	v += dft->u32;
2062 	v += dft->u16;
2063 	v += dft->u8;
2064 
2065 	return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2066 }
2067 
2068 static const struct rte_bpf_xsym test_call3_xsym[] = {
2069 	{
2070 		.name = RTE_STR(dummy_func3),
2071 		.type = RTE_BPF_XTYPE_FUNC,
2072 		.func = {
2073 			.val = (void *)dummy_func3,
2074 			.nb_args = 1,
2075 			.args = {
2076 				[0] = {
2077 					.type = RTE_BPF_ARG_PTR,
2078 					.size = sizeof(struct dummy_vect8),
2079 				},
2080 			},
2081 			.ret = {
2082 				.type = RTE_BPF_ARG_PTR,
2083 				.size = sizeof(struct dummy_offset),
2084 			},
2085 		},
2086 	},
2087 };
2088 
2089 /* Test for stack corruption in multiple function calls */
2090 static const struct ebpf_insn test_call4_prog[] = {
2091 	{
2092 		.code = (BPF_ST | BPF_MEM | BPF_B),
2093 		.dst_reg = EBPF_REG_10,
2094 		.off = -4,
2095 		.imm = 1,
2096 	},
2097 	{
2098 		.code = (BPF_ST | BPF_MEM | BPF_B),
2099 		.dst_reg = EBPF_REG_10,
2100 		.off = -3,
2101 		.imm = 2,
2102 	},
2103 	{
2104 		.code = (BPF_ST | BPF_MEM | BPF_B),
2105 		.dst_reg = EBPF_REG_10,
2106 		.off = -2,
2107 		.imm = 3,
2108 	},
2109 	{
2110 		.code = (BPF_ST | BPF_MEM | BPF_B),
2111 		.dst_reg = EBPF_REG_10,
2112 		.off = -1,
2113 		.imm = 4,
2114 	},
2115 	{
2116 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2117 		.dst_reg = EBPF_REG_1,
2118 		.src_reg = EBPF_REG_10,
2119 	},
2120 	{
2121 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2122 		.dst_reg = EBPF_REG_2,
2123 		.imm = 4,
2124 	},
2125 	{
2126 		.code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2127 		.dst_reg = EBPF_REG_1,
2128 		.src_reg = EBPF_REG_2,
2129 	},
2130 	{
2131 		.code = (BPF_JMP | EBPF_CALL),
2132 		.imm = 0,
2133 	},
2134 	{
2135 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2136 		.dst_reg = EBPF_REG_1,
2137 		.src_reg = EBPF_REG_10,
2138 		.off = -4,
2139 	},
2140 	{
2141 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2142 		.dst_reg = EBPF_REG_2,
2143 		.src_reg = EBPF_REG_10,
2144 		.off = -3,
2145 	},
2146 	{
2147 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2148 		.dst_reg = EBPF_REG_3,
2149 		.src_reg = EBPF_REG_10,
2150 		.off = -2,
2151 	},
2152 	{
2153 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2154 		.dst_reg = EBPF_REG_4,
2155 		.src_reg = EBPF_REG_10,
2156 		.off = -1,
2157 	},
2158 	{
2159 		.code = (BPF_JMP | EBPF_CALL),
2160 		.imm = 1,
2161 	},
2162 	{
2163 		.code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2164 		.dst_reg = EBPF_REG_0,
2165 		.imm = TEST_MEMFROB,
2166 	},
2167 	{
2168 		.code = (BPF_JMP | EBPF_EXIT),
2169 	},
2170 };
2171 
2172 /* Gathering the bytes together */
2173 static uint32_t
2174 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2175 {
2176 	return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2177 }
2178 
2179 /* Implementation of memfrob */
2180 static uint32_t
2181 dummy_func4_0(uint32_t *s, uint8_t n)
2182 {
2183 	char *p = (char *) s;
2184 	while (n-- > 0)
2185 		*p++ ^= 42;
2186 	return *s;
2187 }
2188 
2189 
2190 static int
2191 test_call4_check(uint64_t rc, const void *arg)
2192 {
2193 	uint8_t a[4] = {1, 2, 3, 4};
2194 	uint32_t s, v = 0;
2195 
2196 	RTE_SET_USED(arg);
2197 
2198 	s = dummy_func4_0((uint32_t *)a, 4);
2199 
2200 	s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2201 
2202 	v = s ^ TEST_MEMFROB;
2203 
2204 	return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2205 }
2206 
2207 static const struct rte_bpf_xsym test_call4_xsym[] = {
2208 	[0] = {
2209 		.name = RTE_STR(dummy_func4_0),
2210 		.type = RTE_BPF_XTYPE_FUNC,
2211 		.func = {
2212 			.val = (void *)dummy_func4_0,
2213 			.nb_args = 2,
2214 			.args = {
2215 				[0] = {
2216 					.type = RTE_BPF_ARG_PTR,
2217 					.size = 4 * sizeof(uint8_t),
2218 				},
2219 				[1] = {
2220 					.type = RTE_BPF_ARG_RAW,
2221 					.size = sizeof(uint8_t),
2222 				},
2223 			},
2224 			.ret = {
2225 				.type = RTE_BPF_ARG_RAW,
2226 				.size = sizeof(uint32_t),
2227 			},
2228 		},
2229 	},
2230 	[1] = {
2231 		.name = RTE_STR(dummy_func4_1),
2232 		.type = RTE_BPF_XTYPE_FUNC,
2233 		.func = {
2234 			.val = (void *)dummy_func4_1,
2235 			.nb_args = 4,
2236 			.args = {
2237 				[0] = {
2238 					.type = RTE_BPF_ARG_RAW,
2239 					.size = sizeof(uint8_t),
2240 				},
2241 				[1] = {
2242 					.type = RTE_BPF_ARG_RAW,
2243 					.size = sizeof(uint8_t),
2244 				},
2245 				[2] = {
2246 					.type = RTE_BPF_ARG_RAW,
2247 					.size = sizeof(uint8_t),
2248 				},
2249 				[3] = {
2250 					.type = RTE_BPF_ARG_RAW,
2251 					.size = sizeof(uint8_t),
2252 				},
2253 			},
2254 			.ret = {
2255 				.type = RTE_BPF_ARG_RAW,
2256 				.size = sizeof(uint32_t),
2257 			},
2258 		},
2259 	},
2260 };
2261 
2262 /* string compare test case */
2263 static const struct ebpf_insn test_call5_prog[] = {
2264 
2265 	[0] = {
2266 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2267 		.dst_reg = EBPF_REG_1,
2268 		.imm = STRING_GEEK,
2269 	},
2270 	[1] = {
2271 		.code = (BPF_STX | BPF_MEM | BPF_W),
2272 		.dst_reg = EBPF_REG_10,
2273 		.src_reg = EBPF_REG_1,
2274 		.off = -8,
2275 	},
2276 	[2] = {
2277 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2278 		.dst_reg = EBPF_REG_6,
2279 		.imm = 0,
2280 	},
2281 	[3] = {
2282 		.code = (BPF_STX | BPF_MEM | BPF_B),
2283 		.dst_reg = EBPF_REG_10,
2284 		.src_reg = EBPF_REG_6,
2285 		.off = -4,
2286 	},
2287 	[4] = {
2288 		.code = (BPF_STX | BPF_MEM | BPF_W),
2289 		.dst_reg = EBPF_REG_10,
2290 		.src_reg = EBPF_REG_6,
2291 		.off = -12,
2292 	},
2293 	[5] = {
2294 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2295 		.dst_reg = EBPF_REG_1,
2296 		.imm = STRING_WEEK,
2297 	},
2298 	[6] = {
2299 		.code = (BPF_STX | BPF_MEM | BPF_W),
2300 		.dst_reg = EBPF_REG_10,
2301 		.src_reg = EBPF_REG_1,
2302 		.off = -16,
2303 	},
2304 	[7] = {
2305 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2306 		.dst_reg = EBPF_REG_1,
2307 		.src_reg = EBPF_REG_10,
2308 	},
2309 	[8] = {
2310 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2311 		.dst_reg = EBPF_REG_1,
2312 		.imm = -8,
2313 	},
2314 	[9] = {
2315 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2316 		.dst_reg = EBPF_REG_2,
2317 		.src_reg = EBPF_REG_1,
2318 	},
2319 	[10] = {
2320 		.code = (BPF_JMP | EBPF_CALL),
2321 		.imm = 0,
2322 	},
2323 	[11] = {
2324 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2325 		.dst_reg = EBPF_REG_1,
2326 		.src_reg = EBPF_REG_0,
2327 	},
2328 	[12] = {
2329 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
2330 		.dst_reg = EBPF_REG_0,
2331 		.imm = -1,
2332 	},
2333 	[13] = {
2334 		.code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2335 		.dst_reg = EBPF_REG_1,
2336 		.imm = 0x20,
2337 	},
2338 	[14] = {
2339 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2340 		.dst_reg = EBPF_REG_1,
2341 		.imm = 0x20,
2342 	},
2343 	[15] = {
2344 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
2345 		.dst_reg = EBPF_REG_1,
2346 		.off = 11,
2347 		.imm = 0,
2348 	},
2349 	[16] = {
2350 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2351 		.dst_reg = EBPF_REG_1,
2352 		.src_reg = EBPF_REG_10,
2353 	},
2354 	[17] = {
2355 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2356 		.dst_reg = EBPF_REG_1,
2357 		.imm = -8,
2358 	},
2359 	[18] = {
2360 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2361 		.dst_reg = EBPF_REG_2,
2362 		.src_reg = EBPF_REG_10,
2363 	},
2364 	[19] = {
2365 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2366 		.dst_reg = EBPF_REG_2,
2367 		.imm = -16,
2368 	},
2369 	[20] = {
2370 		.code = (BPF_JMP | EBPF_CALL),
2371 		.imm = 0,
2372 	},
2373 	[21] = {
2374 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2375 		.dst_reg = EBPF_REG_1,
2376 		.src_reg = EBPF_REG_0,
2377 	},
2378 	[22] = {
2379 		.code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2380 		.dst_reg = EBPF_REG_1,
2381 		.imm = 0x20,
2382 	},
2383 	[23] = {
2384 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2385 		.dst_reg = EBPF_REG_1,
2386 		.imm = 0x20,
2387 	},
2388 	[24] = {
2389 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2390 		.dst_reg = EBPF_REG_0,
2391 		.src_reg = EBPF_REG_1,
2392 	},
2393 	[25] = {
2394 		.code = (BPF_JMP | BPF_JEQ | BPF_X),
2395 		.dst_reg = EBPF_REG_1,
2396 		.src_reg = EBPF_REG_6,
2397 		.off = 1,
2398 	},
2399 	[26] = {
2400 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2401 		.dst_reg = EBPF_REG_0,
2402 		.imm = 0,
2403 	},
2404 	[27] = {
2405 		.code = (BPF_JMP | EBPF_EXIT),
2406 	},
2407 };
2408 
2409 /* String comparision impelementation, return 0 if equal else difference */
2410 static uint32_t
2411 dummy_func5(const char *s1, const char *s2)
2412 {
2413 	while (*s1 && (*s1 == *s2)) {
2414 		s1++;
2415 		s2++;
2416 	}
2417 	return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2418 }
2419 
2420 static int
2421 test_call5_check(uint64_t rc, const void *arg)
2422 {
2423 	char a[] = "geek";
2424 	char b[] = "week";
2425 	uint32_t v;
2426 
2427 	RTE_SET_USED(arg);
2428 
2429 	v = dummy_func5(a, a);
2430 	if (v != 0) {
2431 		v = -1;
2432 		goto fail;
2433 	}
2434 
2435 	v = dummy_func5(a, b);
2436 	if (v == 0)
2437 		goto fail;
2438 
2439 	v = 0;
2440 
2441 fail:
2442 	return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2443 }
2444 
2445 static const struct rte_bpf_xsym test_call5_xsym[] = {
2446 	[0] = {
2447 		.name = RTE_STR(dummy_func5),
2448 		.type = RTE_BPF_XTYPE_FUNC,
2449 		.func = {
2450 			.val = (void *)dummy_func5,
2451 			.nb_args = 2,
2452 			.args = {
2453 				[0] = {
2454 					.type = RTE_BPF_ARG_PTR,
2455 					.size = sizeof(char),
2456 				},
2457 				[1] = {
2458 					.type = RTE_BPF_ARG_PTR,
2459 					.size = sizeof(char),
2460 				},
2461 			},
2462 			.ret = {
2463 				.type = RTE_BPF_ARG_RAW,
2464 				.size = sizeof(uint32_t),
2465 			},
2466 		},
2467 	},
2468 };
2469 
2470 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2471 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2472 
2473 	/* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2474 	{
2475 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2476 		.dst_reg = EBPF_REG_6,
2477 		.src_reg = EBPF_REG_1,
2478 	},
2479 	/* load IPv4 version and IHL */
2480 	{
2481 		.code = (BPF_LD | BPF_ABS | BPF_B),
2482 		.imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2483 	},
2484 	/* check IP version */
2485 	{
2486 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2487 		.dst_reg = EBPF_REG_2,
2488 		.src_reg = EBPF_REG_0,
2489 	},
2490 	{
2491 		.code = (BPF_ALU | BPF_AND | BPF_K),
2492 		.dst_reg = EBPF_REG_2,
2493 		.imm = 0xf0,
2494 	},
2495 	{
2496 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
2497 		.dst_reg = EBPF_REG_2,
2498 		.imm = IPVERSION << 4,
2499 		.off = 2,
2500 	},
2501 	/* invalid IP version, return 0 */
2502 	{
2503 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2504 		.dst_reg = EBPF_REG_0,
2505 		.src_reg = EBPF_REG_0,
2506 	},
2507 	{
2508 		.code = (BPF_JMP | EBPF_EXIT),
2509 	},
2510 	/* load 3-rd byte of IP data */
2511 	{
2512 		.code = (BPF_ALU | BPF_AND | BPF_K),
2513 		.dst_reg = EBPF_REG_0,
2514 		.imm = RTE_IPV4_HDR_IHL_MASK,
2515 	},
2516 	{
2517 		.code = (BPF_ALU | BPF_LSH | BPF_K),
2518 		.dst_reg = EBPF_REG_0,
2519 		.imm = 2,
2520 	},
2521 	{
2522 		.code = (BPF_LD | BPF_IND | BPF_B),
2523 		.src_reg = EBPF_REG_0,
2524 		.imm = 3,
2525 	},
2526 	{
2527 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2528 		.dst_reg = EBPF_REG_7,
2529 		.src_reg = EBPF_REG_0,
2530 	},
2531 	/* load IPv4 src addr */
2532 	{
2533 		.code = (BPF_LD | BPF_ABS | BPF_W),
2534 		.imm = offsetof(struct rte_ipv4_hdr, src_addr),
2535 	},
2536 	{
2537 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2538 		.dst_reg = EBPF_REG_7,
2539 		.src_reg = EBPF_REG_0,
2540 	},
2541 	/* load IPv4 total length */
2542 	{
2543 		.code = (BPF_LD | BPF_ABS | BPF_H),
2544 		.imm = offsetof(struct rte_ipv4_hdr, total_length),
2545 	},
2546 	{
2547 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2548 		.dst_reg = EBPF_REG_8,
2549 		.src_reg = EBPF_REG_0,
2550 	},
2551 	/* load last 4 bytes of IP data */
2552 	{
2553 		.code = (BPF_LD | BPF_IND | BPF_W),
2554 		.src_reg = EBPF_REG_8,
2555 		.imm = -(int32_t)sizeof(uint32_t),
2556 	},
2557 	{
2558 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2559 		.dst_reg = EBPF_REG_7,
2560 		.src_reg = EBPF_REG_0,
2561 	},
2562 	/* load 2 bytes from the middle of IP data */
2563 	{
2564 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2565 		.dst_reg = EBPF_REG_8,
2566 		.imm = 1,
2567 	},
2568 	{
2569 		.code = (BPF_LD | BPF_IND | BPF_H),
2570 		.src_reg = EBPF_REG_8,
2571 	},
2572 	{
2573 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2574 		.dst_reg = EBPF_REG_0,
2575 		.src_reg = EBPF_REG_7,
2576 	},
2577 	{
2578 		.code = (BPF_JMP | EBPF_EXIT),
2579 	},
2580 };
2581 
2582 static void
2583 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2584 	uint32_t data_len)
2585 {
2586 	uint32_t i;
2587 	uint8_t *db;
2588 
2589 	mb->buf_addr = buf;
2590 	mb->buf_iova = (uintptr_t)buf;
2591 	mb->buf_len = buf_len;
2592 	rte_mbuf_refcnt_set(mb, 1);
2593 
2594 	/* set pool pointer to dummy value, test doesn't use it */
2595 	mb->pool = (void *)buf;
2596 
2597 	rte_pktmbuf_reset(mb);
2598 	db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2599 
2600 	for (i = 0; i != data_len; i++)
2601 		db[i] = i;
2602 }
2603 
2604 static void
2605 test_ld_mbuf1_prepare(void *arg)
2606 {
2607 	struct dummy_mbuf *dm;
2608 	struct rte_ipv4_hdr *ph;
2609 
2610 	const uint32_t plen = 400;
2611 	const struct rte_ipv4_hdr iph = {
2612 		.version_ihl = RTE_IPV4_VHL_DEF,
2613 		.total_length = rte_cpu_to_be_16(plen),
2614 		.time_to_live = IPDEFTTL,
2615 		.next_proto_id = IPPROTO_RAW,
2616 		.src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2617 		.dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2618 	};
2619 
2620 	dm = arg;
2621 	memset(dm, 0, sizeof(*dm));
2622 
2623 	dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2624 		plen / 2 + 1);
2625 	dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2626 		plen / 2 - 1);
2627 
2628 	rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2629 
2630 	ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2631 	memcpy(ph, &iph, sizeof(iph));
2632 }
2633 
2634 static uint64_t
2635 test_ld_mbuf1(const struct rte_mbuf *pkt)
2636 {
2637 	uint64_t n, v;
2638 	const uint8_t *p8;
2639 	const uint16_t *p16;
2640 	const uint32_t *p32;
2641 	struct dummy_offset dof;
2642 
2643 	/* load IPv4 version and IHL */
2644 	p8 = rte_pktmbuf_read(pkt,
2645 		offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2646 		&dof);
2647 	if (p8 == NULL)
2648 		return 0;
2649 
2650 	/* check IP version */
2651 	if ((p8[0] & 0xf0) != IPVERSION << 4)
2652 		return 0;
2653 
2654 	n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2655 
2656 	/* load 3-rd byte of IP data */
2657 	p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2658 	if (p8 == NULL)
2659 		return 0;
2660 
2661 	v = p8[0];
2662 
2663 	/* load IPv4 src addr */
2664 	p32 = rte_pktmbuf_read(pkt,
2665 		offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2666 		&dof);
2667 	if (p32 == NULL)
2668 		return 0;
2669 
2670 	v += rte_be_to_cpu_32(p32[0]);
2671 
2672 	/* load IPv4 total length */
2673 	p16 = rte_pktmbuf_read(pkt,
2674 		offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2675 		&dof);
2676 	if (p16 == NULL)
2677 		return 0;
2678 
2679 	n = rte_be_to_cpu_16(p16[0]);
2680 
2681 	/* load last 4 bytes of IP data */
2682 	p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2683 	if (p32 == NULL)
2684 		return 0;
2685 
2686 	v += rte_be_to_cpu_32(p32[0]);
2687 
2688 	/* load 2 bytes from the middle of IP data */
2689 	p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2690 	if (p16 == NULL)
2691 		return 0;
2692 
2693 	v += rte_be_to_cpu_16(p16[0]);
2694 	return v;
2695 }
2696 
2697 static int
2698 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2699 {
2700 	const struct dummy_mbuf *dm;
2701 	uint64_t v;
2702 
2703 	dm = arg;
2704 	v = test_ld_mbuf1(dm->mb);
2705 	return cmp_res(__func__, v, rc, arg, arg, 0);
2706 }
2707 
2708 /*
2709  * same as ld_mbuf1, but then trancate the mbuf by 1B,
2710  * so load of last 4B fail.
2711  */
2712 static void
2713 test_ld_mbuf2_prepare(void *arg)
2714 {
2715 	struct dummy_mbuf *dm;
2716 
2717 	test_ld_mbuf1_prepare(arg);
2718 	dm = arg;
2719 	rte_pktmbuf_trim(dm->mb, 1);
2720 }
2721 
2722 static int
2723 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2724 {
2725 	return cmp_res(__func__, 0, rc, arg, arg, 0);
2726 }
2727 
2728 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2729 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2730 
2731 	/* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2732 	{
2733 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2734 		.dst_reg = EBPF_REG_6,
2735 		.src_reg = EBPF_REG_1,
2736 	},
2737 	/* load IPv4 version and IHL */
2738 	{
2739 		.code = (BPF_LD | BPF_ABS | BPF_B),
2740 		.imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2741 	},
2742 	/* check IP version */
2743 	{
2744 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2745 		.dst_reg = EBPF_REG_2,
2746 		.src_reg = EBPF_REG_0,
2747 	},
2748 	{
2749 		.code = (BPF_ALU | BPF_AND | BPF_K),
2750 		.dst_reg = EBPF_REG_2,
2751 		.imm = 0xf0,
2752 	},
2753 	{
2754 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
2755 		.dst_reg = EBPF_REG_2,
2756 		.imm = IPVERSION << 4,
2757 		.off = 2,
2758 	},
2759 	/* invalid IP version, return 0 */
2760 	{
2761 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2762 		.dst_reg = EBPF_REG_0,
2763 		.src_reg = EBPF_REG_0,
2764 	},
2765 	{
2766 		.code = (BPF_JMP | EBPF_EXIT),
2767 	},
2768 	/* load 3-rd byte of IP data */
2769 	{
2770 		.code = (BPF_ALU | BPF_AND | BPF_K),
2771 		.dst_reg = EBPF_REG_0,
2772 		.imm = RTE_IPV4_HDR_IHL_MASK,
2773 	},
2774 	{
2775 		.code = (BPF_ALU | BPF_LSH | BPF_K),
2776 		.dst_reg = EBPF_REG_0,
2777 		.imm = 2,
2778 	},
2779 	{
2780 		.code = (BPF_LD | BPF_IND | BPF_B),
2781 		.src_reg = EBPF_REG_0,
2782 		.imm = 3,
2783 	},
2784 	{
2785 		.code = (BPF_STX | BPF_MEM | BPF_B),
2786 		.dst_reg = EBPF_REG_10,
2787 		.src_reg = EBPF_REG_0,
2788 		.off = (int16_t)(offsetof(struct dummy_offset, u8) -
2789 			sizeof(struct dummy_offset)),
2790 	},
2791 	/* load IPv4 src addr */
2792 	{
2793 		.code = (BPF_LD | BPF_ABS | BPF_W),
2794 		.imm = offsetof(struct rte_ipv4_hdr, src_addr),
2795 	},
2796 	{
2797 		.code = (BPF_STX | BPF_MEM | BPF_W),
2798 		.dst_reg = EBPF_REG_10,
2799 		.src_reg = EBPF_REG_0,
2800 		.off = (int16_t)(offsetof(struct dummy_offset, u32) -
2801 			sizeof(struct dummy_offset)),
2802 	},
2803 	/* load IPv4 total length */
2804 	{
2805 		.code = (BPF_LD | BPF_ABS | BPF_H),
2806 		.imm = offsetof(struct rte_ipv4_hdr, total_length),
2807 	},
2808 	{
2809 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2810 		.dst_reg = EBPF_REG_8,
2811 		.src_reg = EBPF_REG_0,
2812 	},
2813 	/* load last 4 bytes of IP data */
2814 	{
2815 		.code = (BPF_LD | BPF_IND | BPF_W),
2816 		.src_reg = EBPF_REG_8,
2817 		.imm = -(int32_t)sizeof(uint32_t),
2818 	},
2819 	{
2820 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
2821 		.dst_reg = EBPF_REG_10,
2822 		.src_reg = EBPF_REG_0,
2823 		.off = (int16_t)(offsetof(struct dummy_offset, u64) -
2824 			sizeof(struct dummy_offset)),
2825 	},
2826 	/* load 2 bytes from the middle of IP data */
2827 	{
2828 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2829 		.dst_reg = EBPF_REG_8,
2830 		.imm = 1,
2831 	},
2832 	{
2833 		.code = (BPF_LD | BPF_IND | BPF_H),
2834 		.src_reg = EBPF_REG_8,
2835 	},
2836 	{
2837 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
2838 		.dst_reg = EBPF_REG_1,
2839 		.src_reg = EBPF_REG_10,
2840 		.off = (int16_t)(offsetof(struct dummy_offset, u64) -
2841 			sizeof(struct dummy_offset)),
2842 	},
2843 	{
2844 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2845 		.dst_reg = EBPF_REG_0,
2846 		.src_reg = EBPF_REG_1,
2847 	},
2848 	{
2849 		.code = (BPF_LDX | BPF_MEM | BPF_W),
2850 		.dst_reg = EBPF_REG_1,
2851 		.src_reg = EBPF_REG_10,
2852 		.off = (int16_t)(offsetof(struct dummy_offset, u32) -
2853 			sizeof(struct dummy_offset)),
2854 	},
2855 	{
2856 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2857 		.dst_reg = EBPF_REG_0,
2858 		.src_reg = EBPF_REG_1,
2859 	},
2860 	{
2861 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2862 		.dst_reg = EBPF_REG_1,
2863 		.src_reg = EBPF_REG_10,
2864 		.off = (int16_t)(offsetof(struct dummy_offset, u8) -
2865 			sizeof(struct dummy_offset)),
2866 	},
2867 	{
2868 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2869 		.dst_reg = EBPF_REG_0,
2870 		.src_reg = EBPF_REG_1,
2871 	},
2872 	{
2873 		.code = (BPF_JMP | EBPF_EXIT),
2874 	},
2875 };
2876 
2877 /* all bpf test cases */
2878 static const struct bpf_test tests[] = {
2879 	{
2880 		.name = "test_store1",
2881 		.arg_sz = sizeof(struct dummy_offset),
2882 		.prm = {
2883 			.ins = test_store1_prog,
2884 			.nb_ins = RTE_DIM(test_store1_prog),
2885 			.prog_arg = {
2886 				.type = RTE_BPF_ARG_PTR,
2887 				.size = sizeof(struct dummy_offset),
2888 			},
2889 		},
2890 		.prepare = test_store1_prepare,
2891 		.check_result = test_store1_check,
2892 	},
2893 	{
2894 		.name = "test_store2",
2895 		.arg_sz = sizeof(struct dummy_offset),
2896 		.prm = {
2897 			.ins = test_store2_prog,
2898 			.nb_ins = RTE_DIM(test_store2_prog),
2899 			.prog_arg = {
2900 				.type = RTE_BPF_ARG_PTR,
2901 				.size = sizeof(struct dummy_offset),
2902 			},
2903 		},
2904 		.prepare = test_store1_prepare,
2905 		.check_result = test_store1_check,
2906 	},
2907 	{
2908 		.name = "test_load1",
2909 		.arg_sz = sizeof(struct dummy_offset),
2910 		.prm = {
2911 			.ins = test_load1_prog,
2912 			.nb_ins = RTE_DIM(test_load1_prog),
2913 			.prog_arg = {
2914 				.type = RTE_BPF_ARG_PTR,
2915 				.size = sizeof(struct dummy_offset),
2916 			},
2917 		},
2918 		.prepare = test_load1_prepare,
2919 		.check_result = test_load1_check,
2920 	},
2921 	{
2922 		.name = "test_ldimm1",
2923 		.arg_sz = sizeof(struct dummy_offset),
2924 		.prm = {
2925 			.ins = test_ldimm1_prog,
2926 			.nb_ins = RTE_DIM(test_ldimm1_prog),
2927 			.prog_arg = {
2928 				.type = RTE_BPF_ARG_PTR,
2929 				.size = sizeof(struct dummy_offset),
2930 			},
2931 		},
2932 		.prepare = test_store1_prepare,
2933 		.check_result = test_ldimm1_check,
2934 	},
2935 	{
2936 		.name = "test_mul1",
2937 		.arg_sz = sizeof(struct dummy_vect8),
2938 		.prm = {
2939 			.ins = test_mul1_prog,
2940 			.nb_ins = RTE_DIM(test_mul1_prog),
2941 			.prog_arg = {
2942 				.type = RTE_BPF_ARG_PTR,
2943 				.size = sizeof(struct dummy_vect8),
2944 			},
2945 		},
2946 		.prepare = test_mul1_prepare,
2947 		.check_result = test_mul1_check,
2948 	},
2949 	{
2950 		.name = "test_shift1",
2951 		.arg_sz = sizeof(struct dummy_vect8),
2952 		.prm = {
2953 			.ins = test_shift1_prog,
2954 			.nb_ins = RTE_DIM(test_shift1_prog),
2955 			.prog_arg = {
2956 				.type = RTE_BPF_ARG_PTR,
2957 				.size = sizeof(struct dummy_vect8),
2958 			},
2959 		},
2960 		.prepare = test_shift1_prepare,
2961 		.check_result = test_shift1_check,
2962 	},
2963 	{
2964 		.name = "test_jump1",
2965 		.arg_sz = sizeof(struct dummy_vect8),
2966 		.prm = {
2967 			.ins = test_jump1_prog,
2968 			.nb_ins = RTE_DIM(test_jump1_prog),
2969 			.prog_arg = {
2970 				.type = RTE_BPF_ARG_PTR,
2971 				.size = sizeof(struct dummy_vect8),
2972 			},
2973 		},
2974 		.prepare = test_jump1_prepare,
2975 		.check_result = test_jump1_check,
2976 	},
2977 	{
2978 		.name = "test_jump2",
2979 		.arg_sz = sizeof(struct dummy_net),
2980 		.prm = {
2981 			.ins = test_jump2_prog,
2982 			.nb_ins = RTE_DIM(test_jump2_prog),
2983 			.prog_arg = {
2984 				.type = RTE_BPF_ARG_PTR,
2985 				.size = sizeof(struct dummy_net),
2986 			},
2987 		},
2988 		.prepare = test_jump2_prepare,
2989 		.check_result = test_jump2_check,
2990 	},
2991 	{
2992 		.name = "test_alu1",
2993 		.arg_sz = sizeof(struct dummy_vect8),
2994 		.prm = {
2995 			.ins = test_alu1_prog,
2996 			.nb_ins = RTE_DIM(test_alu1_prog),
2997 			.prog_arg = {
2998 				.type = RTE_BPF_ARG_PTR,
2999 				.size = sizeof(struct dummy_vect8),
3000 			},
3001 		},
3002 		.prepare = test_jump1_prepare,
3003 		.check_result = test_alu1_check,
3004 	},
3005 	{
3006 		.name = "test_bele1",
3007 		.arg_sz = sizeof(struct dummy_vect8),
3008 		.prm = {
3009 			.ins = test_bele1_prog,
3010 			.nb_ins = RTE_DIM(test_bele1_prog),
3011 			.prog_arg = {
3012 				.type = RTE_BPF_ARG_PTR,
3013 				.size = sizeof(struct dummy_vect8),
3014 			},
3015 		},
3016 		.prepare = test_bele1_prepare,
3017 		.check_result = test_bele1_check,
3018 	},
3019 	{
3020 		.name = "test_xadd1",
3021 		.arg_sz = sizeof(struct dummy_offset),
3022 		.prm = {
3023 			.ins = test_xadd1_prog,
3024 			.nb_ins = RTE_DIM(test_xadd1_prog),
3025 			.prog_arg = {
3026 				.type = RTE_BPF_ARG_PTR,
3027 				.size = sizeof(struct dummy_offset),
3028 			},
3029 		},
3030 		.prepare = test_store1_prepare,
3031 		.check_result = test_xadd1_check,
3032 	},
3033 	{
3034 		.name = "test_div1",
3035 		.arg_sz = sizeof(struct dummy_vect8),
3036 		.prm = {
3037 			.ins = test_div1_prog,
3038 			.nb_ins = RTE_DIM(test_div1_prog),
3039 			.prog_arg = {
3040 				.type = RTE_BPF_ARG_PTR,
3041 				.size = sizeof(struct dummy_vect8),
3042 			},
3043 		},
3044 		.prepare = test_mul1_prepare,
3045 		.check_result = test_div1_check,
3046 	},
3047 	{
3048 		.name = "test_call1",
3049 		.arg_sz = sizeof(struct dummy_offset),
3050 		.prm = {
3051 			.ins = test_call1_prog,
3052 			.nb_ins = RTE_DIM(test_call1_prog),
3053 			.prog_arg = {
3054 				.type = RTE_BPF_ARG_PTR,
3055 				.size = sizeof(struct dummy_offset),
3056 			},
3057 			.xsym = test_call1_xsym,
3058 			.nb_xsym = RTE_DIM(test_call1_xsym),
3059 		},
3060 		.prepare = test_load1_prepare,
3061 		.check_result = test_call1_check,
3062 		/* for now don't support function calls on 32 bit platform */
3063 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3064 	},
3065 	{
3066 		.name = "test_call2",
3067 		.arg_sz = sizeof(struct dummy_offset),
3068 		.prm = {
3069 			.ins = test_call2_prog,
3070 			.nb_ins = RTE_DIM(test_call2_prog),
3071 			.prog_arg = {
3072 				.type = RTE_BPF_ARG_PTR,
3073 				.size = sizeof(struct dummy_offset),
3074 			},
3075 			.xsym = test_call2_xsym,
3076 			.nb_xsym = RTE_DIM(test_call2_xsym),
3077 		},
3078 		.prepare = test_store1_prepare,
3079 		.check_result = test_call2_check,
3080 		/* for now don't support function calls on 32 bit platform */
3081 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3082 	},
3083 	{
3084 		.name = "test_call3",
3085 		.arg_sz = sizeof(struct dummy_vect8),
3086 		.prm = {
3087 			.ins = test_call3_prog,
3088 			.nb_ins = RTE_DIM(test_call3_prog),
3089 			.prog_arg = {
3090 				.type = RTE_BPF_ARG_PTR,
3091 				.size = sizeof(struct dummy_vect8),
3092 			},
3093 			.xsym = test_call3_xsym,
3094 			.nb_xsym = RTE_DIM(test_call3_xsym),
3095 		},
3096 		.prepare = test_call3_prepare,
3097 		.check_result = test_call3_check,
3098 		/* for now don't support function calls on 32 bit platform */
3099 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3100 	},
3101 	{
3102 		.name = "test_call4",
3103 		.arg_sz = sizeof(struct dummy_offset),
3104 		.prm = {
3105 			.ins = test_call4_prog,
3106 			.nb_ins = RTE_DIM(test_call4_prog),
3107 			.prog_arg = {
3108 				.type = RTE_BPF_ARG_PTR,
3109 				.size = 2 * sizeof(struct dummy_offset),
3110 			},
3111 			.xsym = test_call4_xsym,
3112 			.nb_xsym = RTE_DIM(test_call4_xsym),
3113 		},
3114 		.prepare = test_store1_prepare,
3115 		.check_result = test_call4_check,
3116 		/* for now don't support function calls on 32 bit platform */
3117 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3118 	},
3119 	{
3120 		.name = "test_call5",
3121 		.arg_sz = sizeof(struct dummy_offset),
3122 		.prm = {
3123 			.ins = test_call5_prog,
3124 			.nb_ins = RTE_DIM(test_call5_prog),
3125 			.prog_arg = {
3126 				.type = RTE_BPF_ARG_PTR,
3127 				.size = sizeof(struct dummy_offset),
3128 			},
3129 			.xsym = test_call5_xsym,
3130 			.nb_xsym = RTE_DIM(test_call5_xsym),
3131 		},
3132 		.prepare = test_store1_prepare,
3133 		.check_result = test_call5_check,
3134 		/* for now don't support function calls on 32 bit platform */
3135 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3136 	},
3137 	{
3138 		.name = "test_ld_mbuf1",
3139 		.arg_sz = sizeof(struct dummy_mbuf),
3140 		.prm = {
3141 			.ins = test_ld_mbuf1_prog,
3142 			.nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3143 			.prog_arg = {
3144 				.type = RTE_BPF_ARG_PTR_MBUF,
3145 				.buf_size = sizeof(struct dummy_mbuf),
3146 			},
3147 		},
3148 		.prepare = test_ld_mbuf1_prepare,
3149 		.check_result = test_ld_mbuf1_check,
3150 		/* mbuf as input argument is not supported on 32 bit platform */
3151 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3152 	},
3153 	{
3154 		.name = "test_ld_mbuf2",
3155 		.arg_sz = sizeof(struct dummy_mbuf),
3156 		.prm = {
3157 			.ins = test_ld_mbuf1_prog,
3158 			.nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3159 			.prog_arg = {
3160 				.type = RTE_BPF_ARG_PTR_MBUF,
3161 				.buf_size = sizeof(struct dummy_mbuf),
3162 			},
3163 		},
3164 		.prepare = test_ld_mbuf2_prepare,
3165 		.check_result = test_ld_mbuf2_check,
3166 		/* mbuf as input argument is not supported on 32 bit platform */
3167 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3168 	},
3169 	{
3170 		.name = "test_ld_mbuf3",
3171 		.arg_sz = sizeof(struct dummy_mbuf),
3172 		.prm = {
3173 			.ins = test_ld_mbuf3_prog,
3174 			.nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3175 			.prog_arg = {
3176 				.type = RTE_BPF_ARG_PTR_MBUF,
3177 				.buf_size = sizeof(struct dummy_mbuf),
3178 			},
3179 		},
3180 		.prepare = test_ld_mbuf1_prepare,
3181 		.check_result = test_ld_mbuf1_check,
3182 		/* mbuf as input argument is not supported on 32 bit platform */
3183 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3184 	},
3185 };
3186 
3187 static int
3188 run_test(const struct bpf_test *tst)
3189 {
3190 	int32_t ret, rv;
3191 	int64_t rc;
3192 	struct rte_bpf *bpf;
3193 	struct rte_bpf_jit jit;
3194 	uint8_t tbuf[tst->arg_sz];
3195 
3196 	printf("%s(%s) start\n", __func__, tst->name);
3197 
3198 	bpf = rte_bpf_load(&tst->prm);
3199 	if (bpf == NULL) {
3200 		printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3201 			__func__, __LINE__, rte_errno, strerror(rte_errno));
3202 		return -1;
3203 	}
3204 
3205 	tst->prepare(tbuf);
3206 	rc = rte_bpf_exec(bpf, tbuf);
3207 	ret = tst->check_result(rc, tbuf);
3208 	if (ret != 0) {
3209 		printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3210 			__func__, __LINE__, tst->name, ret, strerror(ret));
3211 	}
3212 
3213 	/* repeat the same test with jit, when possible */
3214 	rte_bpf_get_jit(bpf, &jit);
3215 	if (jit.func != NULL) {
3216 
3217 		tst->prepare(tbuf);
3218 		rc = jit.func(tbuf);
3219 		rv = tst->check_result(rc, tbuf);
3220 		ret |= rv;
3221 		if (rv != 0) {
3222 			printf("%s@%d: check_result(%s) failed, "
3223 				"error: %d(%s);\n",
3224 				__func__, __LINE__, tst->name,
3225 				rv, strerror(rv));
3226 		}
3227 	}
3228 
3229 	rte_bpf_destroy(bpf);
3230 	return ret;
3231 
3232 }
3233 
3234 static int
3235 test_bpf(void)
3236 {
3237 	int32_t rc, rv;
3238 	uint32_t i;
3239 
3240 	rc = 0;
3241 	for (i = 0; i != RTE_DIM(tests); i++) {
3242 		rv = run_test(tests + i);
3243 		if (tests[i].allow_fail == 0)
3244 			rc |= rv;
3245 	}
3246 
3247 	return rc;
3248 }
3249 
3250 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
3251