xref: /dpdk/app/test/test_bpf.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
16 #include <rte_bpf.h>
17 #include <rte_ether.h>
18 #include <rte_ip.h>
19 
20 #include "test.h"
21 
22 /*
23  * Basic functional tests for librte_bpf.
24  * The main procedure - load eBPF program, execute it and
25  * compare restuls with expected values.
26  */
27 
28 struct dummy_offset {
29 	uint64_t u64;
30 	uint32_t u32;
31 	uint16_t u16;
32 	uint8_t  u8;
33 };
34 
35 struct dummy_vect8 {
36 	struct dummy_offset in[8];
37 	struct dummy_offset out[8];
38 };
39 
40 struct dummy_net {
41 	struct rte_ether_hdr eth_hdr;
42 	struct rte_vlan_hdr vlan_hdr;
43 	struct rte_ipv4_hdr ip_hdr;
44 };
45 
46 #define	DUMMY_MBUF_NUM	2
47 
48 /* first mbuf in the packet, should always be at offset 0 */
49 struct dummy_mbuf {
50 	struct rte_mbuf mb[DUMMY_MBUF_NUM];
51 	uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
52 };
53 
54 #define	TEST_FILL_1	0xDEADBEEF
55 
56 #define	TEST_MUL_1	21
57 #define TEST_MUL_2	-100
58 
59 #define TEST_SHIFT_1	15
60 #define TEST_SHIFT_2	33
61 
62 #define TEST_JCC_1	0
63 #define TEST_JCC_2	-123
64 #define TEST_JCC_3	5678
65 #define TEST_JCC_4	TEST_FILL_1
66 
67 #define TEST_IMM_1	UINT64_MAX
68 #define TEST_IMM_2	((uint64_t)INT64_MIN)
69 #define TEST_IMM_3	((uint64_t)INT64_MAX + INT32_MAX)
70 #define TEST_IMM_4	((uint64_t)UINT32_MAX)
71 #define TEST_IMM_5	((uint64_t)UINT32_MAX + 1)
72 
73 #define TEST_MEMFROB	0x2a2a2a2a
74 
75 #define STRING_GEEK	0x6B656567
76 #define STRING_WEEK	0x6B656577
77 
78 #define TEST_NETMASK 0xffffff00
79 #define TEST_SUBNET  0xaca80200
80 
81 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
82 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
83 
84 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
85 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
86 
87 struct bpf_test {
88 	const char *name;
89 	size_t arg_sz;
90 	struct rte_bpf_prm prm;
91 	void (*prepare)(void *);
92 	int (*check_result)(uint64_t, const void *);
93 	uint32_t allow_fail;
94 };
95 
96 /*
97  * Compare return value and result data with expected ones.
98  * Report a failure if they don't match.
99  */
100 static int
101 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
102 	const void *exp_res, const void *ret_res, size_t res_sz)
103 {
104 	int32_t ret;
105 
106 	ret = 0;
107 	if (exp_rc != ret_rc) {
108 		printf("%s@%d: invalid return value, expected: 0x%" PRIx64
109 			",result: 0x%" PRIx64 "\n",
110 			func, __LINE__, exp_rc, ret_rc);
111 		ret |= -1;
112 	}
113 
114 	if (memcmp(exp_res, ret_res, res_sz) != 0) {
115 		printf("%s: invalid value\n", func);
116 		rte_memdump(stdout, "expected", exp_res, res_sz);
117 		rte_memdump(stdout, "result", ret_res, res_sz);
118 		ret |= -1;
119 	}
120 
121 	return ret;
122 }
123 
124 /* store immediate test-cases */
125 static const struct ebpf_insn test_store1_prog[] = {
126 	{
127 		.code = (BPF_ST | BPF_MEM | BPF_B),
128 		.dst_reg = EBPF_REG_1,
129 		.off = offsetof(struct dummy_offset, u8),
130 		.imm = TEST_FILL_1,
131 	},
132 	{
133 		.code = (BPF_ST | BPF_MEM | BPF_H),
134 		.dst_reg = EBPF_REG_1,
135 		.off = offsetof(struct dummy_offset, u16),
136 		.imm = TEST_FILL_1,
137 	},
138 	{
139 		.code = (BPF_ST | BPF_MEM | BPF_W),
140 		.dst_reg = EBPF_REG_1,
141 		.off = offsetof(struct dummy_offset, u32),
142 		.imm = TEST_FILL_1,
143 	},
144 	{
145 		.code = (BPF_ST | BPF_MEM | EBPF_DW),
146 		.dst_reg = EBPF_REG_1,
147 		.off = offsetof(struct dummy_offset, u64),
148 		.imm = TEST_FILL_1,
149 	},
150 	/* return 1 */
151 	{
152 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
153 		.dst_reg = EBPF_REG_0,
154 		.imm = 1,
155 	},
156 	{
157 		.code = (BPF_JMP | EBPF_EXIT),
158 	},
159 };
160 
161 static void
162 test_store1_prepare(void *arg)
163 {
164 	struct dummy_offset *df;
165 
166 	df = arg;
167 	memset(df, 0, sizeof(*df));
168 }
169 
170 static int
171 test_store1_check(uint64_t rc, const void *arg)
172 {
173 	const struct dummy_offset *dft;
174 	struct dummy_offset dfe;
175 
176 	dft = arg;
177 
178 	memset(&dfe, 0, sizeof(dfe));
179 	dfe.u64 = (int32_t)TEST_FILL_1;
180 	dfe.u32 = dfe.u64;
181 	dfe.u16 = dfe.u64;
182 	dfe.u8 = dfe.u64;
183 
184 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
185 }
186 
187 /* store register test-cases */
188 static const struct ebpf_insn test_store2_prog[] = {
189 
190 	{
191 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
192 		.dst_reg = EBPF_REG_2,
193 		.imm = TEST_FILL_1,
194 	},
195 	{
196 		.code = (BPF_STX | BPF_MEM | BPF_B),
197 		.dst_reg = EBPF_REG_1,
198 		.src_reg = EBPF_REG_2,
199 		.off = offsetof(struct dummy_offset, u8),
200 	},
201 	{
202 		.code = (BPF_STX | BPF_MEM | BPF_H),
203 		.dst_reg = EBPF_REG_1,
204 		.src_reg = EBPF_REG_2,
205 		.off = offsetof(struct dummy_offset, u16),
206 	},
207 	{
208 		.code = (BPF_STX | BPF_MEM | BPF_W),
209 		.dst_reg = EBPF_REG_1,
210 		.src_reg = EBPF_REG_2,
211 		.off = offsetof(struct dummy_offset, u32),
212 	},
213 	{
214 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
215 		.dst_reg = EBPF_REG_1,
216 		.src_reg = EBPF_REG_2,
217 		.off = offsetof(struct dummy_offset, u64),
218 	},
219 	/* return 1 */
220 	{
221 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
222 		.dst_reg = EBPF_REG_0,
223 		.imm = 1,
224 	},
225 	{
226 		.code = (BPF_JMP | EBPF_EXIT),
227 	},
228 };
229 
230 /* load test-cases */
231 static const struct ebpf_insn test_load1_prog[] = {
232 
233 	{
234 		.code = (BPF_LDX | BPF_MEM | BPF_B),
235 		.dst_reg = EBPF_REG_2,
236 		.src_reg = EBPF_REG_1,
237 		.off = offsetof(struct dummy_offset, u8),
238 	},
239 	{
240 		.code = (BPF_LDX | BPF_MEM | BPF_H),
241 		.dst_reg = EBPF_REG_3,
242 		.src_reg = EBPF_REG_1,
243 		.off = offsetof(struct dummy_offset, u16),
244 	},
245 	{
246 		.code = (BPF_LDX | BPF_MEM | BPF_W),
247 		.dst_reg = EBPF_REG_4,
248 		.src_reg = EBPF_REG_1,
249 		.off = offsetof(struct dummy_offset, u32),
250 	},
251 	{
252 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
253 		.dst_reg = EBPF_REG_0,
254 		.src_reg = EBPF_REG_1,
255 		.off = offsetof(struct dummy_offset, u64),
256 	},
257 	/* return sum */
258 	{
259 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
260 		.dst_reg = EBPF_REG_0,
261 		.src_reg = EBPF_REG_4,
262 	},
263 	{
264 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
265 		.dst_reg = EBPF_REG_0,
266 		.src_reg = EBPF_REG_3,
267 	},
268 	{
269 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
270 		.dst_reg = EBPF_REG_0,
271 		.src_reg = EBPF_REG_2,
272 	},
273 	{
274 		.code = (BPF_JMP | EBPF_EXIT),
275 	},
276 };
277 
278 static void
279 test_load1_prepare(void *arg)
280 {
281 	struct dummy_offset *df;
282 
283 	df = arg;
284 
285 	memset(df, 0, sizeof(*df));
286 	df->u64 = (int32_t)TEST_FILL_1;
287 	df->u32 = df->u64;
288 	df->u16 = df->u64;
289 	df->u8 = df->u64;
290 }
291 
292 static int
293 test_load1_check(uint64_t rc, const void *arg)
294 {
295 	uint64_t v;
296 	const struct dummy_offset *dft;
297 
298 	dft = arg;
299 	v = dft->u64;
300 	v += dft->u32;
301 	v += dft->u16;
302 	v += dft->u8;
303 
304 	return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
305 }
306 
307 /* load immediate test-cases */
308 static const struct ebpf_insn test_ldimm1_prog[] = {
309 
310 	{
311 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
312 		.dst_reg = EBPF_REG_0,
313 		.imm = (uint32_t)TEST_IMM_1,
314 	},
315 	{
316 		.imm = TEST_IMM_1 >> 32,
317 	},
318 	{
319 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
320 		.dst_reg = EBPF_REG_3,
321 		.imm = (uint32_t)TEST_IMM_2,
322 	},
323 	{
324 		.imm = TEST_IMM_2 >> 32,
325 	},
326 	{
327 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
328 		.dst_reg = EBPF_REG_5,
329 		.imm = (uint32_t)TEST_IMM_3,
330 	},
331 	{
332 		.imm = TEST_IMM_3 >> 32,
333 	},
334 	{
335 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
336 		.dst_reg = EBPF_REG_7,
337 		.imm = (uint32_t)TEST_IMM_4,
338 	},
339 	{
340 		.imm = TEST_IMM_4 >> 32,
341 	},
342 	{
343 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
344 		.dst_reg = EBPF_REG_9,
345 		.imm = (uint32_t)TEST_IMM_5,
346 	},
347 	{
348 		.imm = TEST_IMM_5 >> 32,
349 	},
350 	/* return sum */
351 	{
352 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
353 		.dst_reg = EBPF_REG_0,
354 		.src_reg = EBPF_REG_3,
355 	},
356 	{
357 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
358 		.dst_reg = EBPF_REG_0,
359 		.src_reg = EBPF_REG_5,
360 	},
361 	{
362 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
363 		.dst_reg = EBPF_REG_0,
364 		.src_reg = EBPF_REG_7,
365 	},
366 	{
367 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
368 		.dst_reg = EBPF_REG_0,
369 		.src_reg = EBPF_REG_9,
370 	},
371 	{
372 		.code = (BPF_JMP | EBPF_EXIT),
373 	},
374 };
375 
376 static int
377 test_ldimm1_check(uint64_t rc, const void *arg)
378 {
379 	uint64_t v1, v2;
380 
381 	v1 = TEST_IMM_1;
382 	v2 = TEST_IMM_2;
383 	v1 += v2;
384 	v2 = TEST_IMM_3;
385 	v1 += v2;
386 	v2 = TEST_IMM_4;
387 	v1 += v2;
388 	v2 = TEST_IMM_5;
389 	v1 += v2;
390 
391 	return cmp_res(__func__, v1, rc, arg, arg, 0);
392 }
393 
394 
395 /* alu mul test-cases */
396 static const struct ebpf_insn test_mul1_prog[] = {
397 
398 	{
399 		.code = (BPF_LDX | BPF_MEM | BPF_W),
400 		.dst_reg = EBPF_REG_2,
401 		.src_reg = EBPF_REG_1,
402 		.off = offsetof(struct dummy_vect8, in[0].u32),
403 	},
404 	{
405 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
406 		.dst_reg = EBPF_REG_3,
407 		.src_reg = EBPF_REG_1,
408 		.off = offsetof(struct dummy_vect8, in[1].u64),
409 	},
410 	{
411 		.code = (BPF_LDX | BPF_MEM | BPF_W),
412 		.dst_reg = EBPF_REG_4,
413 		.src_reg = EBPF_REG_1,
414 		.off = offsetof(struct dummy_vect8, in[2].u32),
415 	},
416 	{
417 		.code = (BPF_ALU | BPF_MUL | BPF_K),
418 		.dst_reg = EBPF_REG_2,
419 		.imm = TEST_MUL_1,
420 	},
421 	{
422 		.code = (EBPF_ALU64 | BPF_MUL | BPF_K),
423 		.dst_reg = EBPF_REG_3,
424 		.imm = TEST_MUL_2,
425 	},
426 	{
427 		.code = (BPF_ALU | BPF_MUL | BPF_X),
428 		.dst_reg = EBPF_REG_4,
429 		.src_reg = EBPF_REG_2,
430 	},
431 	{
432 		.code = (EBPF_ALU64 | BPF_MUL | BPF_X),
433 		.dst_reg = EBPF_REG_4,
434 		.src_reg = EBPF_REG_3,
435 	},
436 	{
437 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
438 		.dst_reg = EBPF_REG_1,
439 		.src_reg = EBPF_REG_2,
440 		.off = offsetof(struct dummy_vect8, out[0].u64),
441 	},
442 	{
443 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
444 		.dst_reg = EBPF_REG_1,
445 		.src_reg = EBPF_REG_3,
446 		.off = offsetof(struct dummy_vect8, out[1].u64),
447 	},
448 	{
449 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
450 		.dst_reg = EBPF_REG_1,
451 		.src_reg = EBPF_REG_4,
452 		.off = offsetof(struct dummy_vect8, out[2].u64),
453 	},
454 	/* return 1 */
455 	{
456 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
457 		.dst_reg = EBPF_REG_0,
458 		.imm = 1,
459 	},
460 	{
461 		.code = (BPF_JMP | EBPF_EXIT),
462 	},
463 };
464 
465 static void
466 test_mul1_prepare(void *arg)
467 {
468 	struct dummy_vect8 *dv;
469 	uint64_t v;
470 
471 	dv = arg;
472 
473 	v = rte_rand();
474 
475 	memset(dv, 0, sizeof(*dv));
476 	dv->in[0].u32 = v;
477 	dv->in[1].u64 = v << 12 | v >> 6;
478 	dv->in[2].u32 = -v;
479 }
480 
481 static int
482 test_mul1_check(uint64_t rc, const void *arg)
483 {
484 	uint64_t r2, r3, r4;
485 	const struct dummy_vect8 *dvt;
486 	struct dummy_vect8 dve;
487 
488 	dvt = arg;
489 	memset(&dve, 0, sizeof(dve));
490 
491 	r2 = dvt->in[0].u32;
492 	r3 = dvt->in[1].u64;
493 	r4 = dvt->in[2].u32;
494 
495 	r2 = (uint32_t)r2 * TEST_MUL_1;
496 	r3 *= TEST_MUL_2;
497 	r4 = (uint32_t)(r4 * r2);
498 	r4 *= r3;
499 
500 	dve.out[0].u64 = r2;
501 	dve.out[1].u64 = r3;
502 	dve.out[2].u64 = r4;
503 
504 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
505 }
506 
507 /* alu shift test-cases */
508 static const struct ebpf_insn test_shift1_prog[] = {
509 
510 	{
511 		.code = (BPF_LDX | BPF_MEM | BPF_W),
512 		.dst_reg = EBPF_REG_2,
513 		.src_reg = EBPF_REG_1,
514 		.off = offsetof(struct dummy_vect8, in[0].u32),
515 	},
516 	{
517 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
518 		.dst_reg = EBPF_REG_3,
519 		.src_reg = EBPF_REG_1,
520 		.off = offsetof(struct dummy_vect8, in[1].u64),
521 	},
522 	{
523 		.code = (BPF_LDX | BPF_MEM | BPF_W),
524 		.dst_reg = EBPF_REG_4,
525 		.src_reg = EBPF_REG_1,
526 		.off = offsetof(struct dummy_vect8, in[2].u32),
527 	},
528 	{
529 		.code = (BPF_ALU | BPF_LSH | BPF_K),
530 		.dst_reg = EBPF_REG_2,
531 		.imm = TEST_SHIFT_1,
532 	},
533 	{
534 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
535 		.dst_reg = EBPF_REG_3,
536 		.imm = TEST_SHIFT_2,
537 	},
538 	{
539 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
540 		.dst_reg = EBPF_REG_1,
541 		.src_reg = EBPF_REG_2,
542 		.off = offsetof(struct dummy_vect8, out[0].u64),
543 	},
544 	{
545 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
546 		.dst_reg = EBPF_REG_1,
547 		.src_reg = EBPF_REG_3,
548 		.off = offsetof(struct dummy_vect8, out[1].u64),
549 	},
550 	{
551 		.code = (BPF_ALU | BPF_RSH | BPF_X),
552 		.dst_reg = EBPF_REG_2,
553 		.src_reg = EBPF_REG_4,
554 	},
555 	{
556 		.code = (EBPF_ALU64 | BPF_LSH | BPF_X),
557 		.dst_reg = EBPF_REG_3,
558 		.src_reg = EBPF_REG_4,
559 	},
560 	{
561 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
562 		.dst_reg = EBPF_REG_1,
563 		.src_reg = EBPF_REG_2,
564 		.off = offsetof(struct dummy_vect8, out[2].u64),
565 	},
566 	{
567 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
568 		.dst_reg = EBPF_REG_1,
569 		.src_reg = EBPF_REG_3,
570 		.off = offsetof(struct dummy_vect8, out[3].u64),
571 	},
572 	{
573 		.code = (BPF_LDX | BPF_MEM | BPF_W),
574 		.dst_reg = EBPF_REG_2,
575 		.src_reg = EBPF_REG_1,
576 		.off = offsetof(struct dummy_vect8, in[0].u32),
577 	},
578 	{
579 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
580 		.dst_reg = EBPF_REG_3,
581 		.src_reg = EBPF_REG_1,
582 		.off = offsetof(struct dummy_vect8, in[1].u64),
583 	},
584 	{
585 		.code = (BPF_LDX | BPF_MEM | BPF_W),
586 		.dst_reg = EBPF_REG_4,
587 		.src_reg = EBPF_REG_1,
588 		.off = offsetof(struct dummy_vect8, in[2].u32),
589 	},
590 	{
591 		.code = (BPF_ALU | BPF_AND | BPF_K),
592 		.dst_reg = EBPF_REG_2,
593 		.imm = sizeof(uint64_t) * CHAR_BIT - 1,
594 	},
595 	{
596 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
597 		.dst_reg = EBPF_REG_3,
598 		.src_reg = EBPF_REG_2,
599 	},
600 	{
601 		.code = (BPF_ALU | BPF_AND | BPF_K),
602 		.dst_reg = EBPF_REG_2,
603 		.imm = sizeof(uint32_t) * CHAR_BIT - 1,
604 	},
605 	{
606 		.code = (BPF_ALU | BPF_LSH | BPF_X),
607 		.dst_reg = EBPF_REG_4,
608 		.src_reg = EBPF_REG_2,
609 	},
610 	{
611 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
612 		.dst_reg = EBPF_REG_1,
613 		.src_reg = EBPF_REG_4,
614 		.off = offsetof(struct dummy_vect8, out[4].u64),
615 	},
616 	{
617 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
618 		.dst_reg = EBPF_REG_1,
619 		.src_reg = EBPF_REG_3,
620 		.off = offsetof(struct dummy_vect8, out[5].u64),
621 	},
622 	/* return 1 */
623 	{
624 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
625 		.dst_reg = EBPF_REG_0,
626 		.imm = 1,
627 	},
628 	{
629 		.code = (BPF_JMP | EBPF_EXIT),
630 	},
631 };
632 
633 static void
634 test_shift1_prepare(void *arg)
635 {
636 	struct dummy_vect8 *dv;
637 	uint64_t v;
638 
639 	dv = arg;
640 
641 	v = rte_rand();
642 
643 	memset(dv, 0, sizeof(*dv));
644 	dv->in[0].u32 = v;
645 	dv->in[1].u64 = v << 12 | v >> 6;
646 	dv->in[2].u32 = (-v ^ 5);
647 }
648 
649 static int
650 test_shift1_check(uint64_t rc, const void *arg)
651 {
652 	uint64_t r2, r3, r4;
653 	const struct dummy_vect8 *dvt;
654 	struct dummy_vect8 dve;
655 
656 	dvt = arg;
657 	memset(&dve, 0, sizeof(dve));
658 
659 	r2 = dvt->in[0].u32;
660 	r3 = dvt->in[1].u64;
661 	r4 = dvt->in[2].u32;
662 
663 	r2 = (uint32_t)r2 << TEST_SHIFT_1;
664 	r3 = (int64_t)r3 >> TEST_SHIFT_2;
665 
666 	dve.out[0].u64 = r2;
667 	dve.out[1].u64 = r3;
668 
669 	r2 = (uint32_t)r2 >> r4;
670 	r3 <<= r4;
671 
672 	dve.out[2].u64 = r2;
673 	dve.out[3].u64 = r3;
674 
675 	r2 = dvt->in[0].u32;
676 	r3 = dvt->in[1].u64;
677 	r4 = dvt->in[2].u32;
678 
679 	r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
680 	r3 = (int64_t)r3 >> r2;
681 	r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
682 	r4 = (uint32_t)r4 << r2;
683 
684 	dve.out[4].u64 = r4;
685 	dve.out[5].u64 = r3;
686 
687 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
688 }
689 
690 /* jmp test-cases */
691 static const struct ebpf_insn test_jump1_prog[] = {
692 
693 	[0] = {
694 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
695 		.dst_reg = EBPF_REG_0,
696 		.imm = 0,
697 	},
698 	[1] = {
699 		.code = (BPF_LDX | BPF_MEM | BPF_W),
700 		.dst_reg = EBPF_REG_2,
701 		.src_reg = EBPF_REG_1,
702 		.off = offsetof(struct dummy_vect8, in[0].u32),
703 	},
704 	[2] = {
705 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
706 		.dst_reg = EBPF_REG_3,
707 		.src_reg = EBPF_REG_1,
708 		.off = offsetof(struct dummy_vect8, in[0].u64),
709 	},
710 	[3] = {
711 		.code = (BPF_LDX | BPF_MEM | BPF_W),
712 		.dst_reg = EBPF_REG_4,
713 		.src_reg = EBPF_REG_1,
714 		.off = offsetof(struct dummy_vect8, in[1].u32),
715 	},
716 	[4] = {
717 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
718 		.dst_reg = EBPF_REG_5,
719 		.src_reg = EBPF_REG_1,
720 		.off = offsetof(struct dummy_vect8, in[1].u64),
721 	},
722 	[5] = {
723 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
724 		.dst_reg = EBPF_REG_2,
725 		.imm = TEST_JCC_1,
726 		.off = 8,
727 	},
728 	[6] = {
729 		.code = (BPF_JMP | EBPF_JSLE | BPF_K),
730 		.dst_reg = EBPF_REG_3,
731 		.imm = TEST_JCC_2,
732 		.off = 9,
733 	},
734 	[7] = {
735 		.code = (BPF_JMP | BPF_JGT | BPF_K),
736 		.dst_reg = EBPF_REG_4,
737 		.imm = TEST_JCC_3,
738 		.off = 10,
739 	},
740 	[8] = {
741 		.code = (BPF_JMP | BPF_JSET | BPF_K),
742 		.dst_reg = EBPF_REG_5,
743 		.imm = TEST_JCC_4,
744 		.off = 11,
745 	},
746 	[9] = {
747 		.code = (BPF_JMP | EBPF_JNE | BPF_X),
748 		.dst_reg = EBPF_REG_2,
749 		.src_reg = EBPF_REG_3,
750 		.off = 12,
751 	},
752 	[10] = {
753 		.code = (BPF_JMP | EBPF_JSGT | BPF_X),
754 		.dst_reg = EBPF_REG_2,
755 		.src_reg = EBPF_REG_4,
756 		.off = 13,
757 	},
758 	[11] = {
759 		.code = (BPF_JMP | EBPF_JLE | BPF_X),
760 		.dst_reg = EBPF_REG_2,
761 		.src_reg = EBPF_REG_5,
762 		.off = 14,
763 	},
764 	[12] = {
765 		.code = (BPF_JMP | BPF_JSET | BPF_X),
766 		.dst_reg = EBPF_REG_3,
767 		.src_reg = EBPF_REG_5,
768 		.off = 15,
769 	},
770 	[13] = {
771 		.code = (BPF_JMP | EBPF_EXIT),
772 	},
773 	[14] = {
774 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
775 		.dst_reg = EBPF_REG_0,
776 		.imm = 0x1,
777 	},
778 	[15] = {
779 		.code = (BPF_JMP | BPF_JA),
780 		.off = -10,
781 	},
782 	[16] = {
783 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
784 		.dst_reg = EBPF_REG_0,
785 		.imm = 0x2,
786 	},
787 	[17] = {
788 		.code = (BPF_JMP | BPF_JA),
789 		.off = -11,
790 	},
791 	[18] = {
792 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
793 		.dst_reg = EBPF_REG_0,
794 		.imm = 0x4,
795 	},
796 	[19] = {
797 		.code = (BPF_JMP | BPF_JA),
798 		.off = -12,
799 	},
800 	[20] = {
801 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
802 		.dst_reg = EBPF_REG_0,
803 		.imm = 0x8,
804 	},
805 	[21] = {
806 		.code = (BPF_JMP | BPF_JA),
807 		.off = -13,
808 	},
809 	[22] = {
810 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
811 		.dst_reg = EBPF_REG_0,
812 		.imm = 0x10,
813 	},
814 	[23] = {
815 		.code = (BPF_JMP | BPF_JA),
816 		.off = -14,
817 	},
818 	[24] = {
819 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
820 		.dst_reg = EBPF_REG_0,
821 		.imm = 0x20,
822 	},
823 	[25] = {
824 		.code = (BPF_JMP | BPF_JA),
825 		.off = -15,
826 	},
827 	[26] = {
828 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
829 		.dst_reg = EBPF_REG_0,
830 		.imm = 0x40,
831 	},
832 	[27] = {
833 		.code = (BPF_JMP | BPF_JA),
834 		.off = -16,
835 	},
836 	[28] = {
837 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
838 		.dst_reg = EBPF_REG_0,
839 		.imm = 0x80,
840 	},
841 	[29] = {
842 		.code = (BPF_JMP | BPF_JA),
843 		.off = -17,
844 	},
845 };
846 
847 static void
848 test_jump1_prepare(void *arg)
849 {
850 	struct dummy_vect8 *dv;
851 	uint64_t v1, v2;
852 
853 	dv = arg;
854 
855 	v1 = rte_rand();
856 	v2 = rte_rand();
857 
858 	memset(dv, 0, sizeof(*dv));
859 	dv->in[0].u64 = v1;
860 	dv->in[1].u64 = v2;
861 	dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
862 	dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
863 }
864 
865 static int
866 test_jump1_check(uint64_t rc, const void *arg)
867 {
868 	uint64_t r2, r3, r4, r5, rv;
869 	const struct dummy_vect8 *dvt;
870 
871 	dvt = arg;
872 
873 	rv = 0;
874 	r2 = dvt->in[0].u32;
875 	r3 = dvt->in[0].u64;
876 	r4 = dvt->in[1].u32;
877 	r5 = dvt->in[1].u64;
878 
879 	if (r2 == TEST_JCC_1)
880 		rv |= 0x1;
881 	if ((int64_t)r3 <= TEST_JCC_2)
882 		rv |= 0x2;
883 	if (r4 > TEST_JCC_3)
884 		rv |= 0x4;
885 	if (r5 & TEST_JCC_4)
886 		rv |= 0x8;
887 	if (r2 != r3)
888 		rv |= 0x10;
889 	if ((int64_t)r2 > (int64_t)r4)
890 		rv |= 0x20;
891 	if (r2 <= r5)
892 		rv |= 0x40;
893 	if (r3 & r5)
894 		rv |= 0x80;
895 
896 	return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
897 }
898 
899 /* Jump test case - check ip4_dest in particular subnet */
900 static const struct ebpf_insn test_jump2_prog[] = {
901 
902 	[0] = {
903 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
904 		.dst_reg = EBPF_REG_2,
905 		.imm = 0xe,
906 	},
907 	[1] = {
908 		.code = (BPF_LDX | BPF_MEM | BPF_H),
909 		.dst_reg = EBPF_REG_3,
910 		.src_reg = EBPF_REG_1,
911 		.off = 12,
912 	},
913 	[2] = {
914 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
915 		.dst_reg = EBPF_REG_3,
916 		.off = 2,
917 		.imm = 0x81,
918 	},
919 	[3] = {
920 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
921 		.dst_reg = EBPF_REG_2,
922 		.imm = 0x12,
923 	},
924 	[4] = {
925 		.code = (BPF_LDX | BPF_MEM | BPF_H),
926 		.dst_reg = EBPF_REG_3,
927 		.src_reg = EBPF_REG_1,
928 		.off = 16,
929 	},
930 	[5] = {
931 		.code = (EBPF_ALU64 | BPF_AND | BPF_K),
932 		.dst_reg = EBPF_REG_3,
933 		.imm = 0xffff,
934 	},
935 	[6] = {
936 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
937 		.dst_reg = EBPF_REG_3,
938 		.off = 9,
939 		.imm = 0x8,
940 	},
941 	[7] = {
942 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
943 		.dst_reg = EBPF_REG_1,
944 		.src_reg = EBPF_REG_2,
945 	},
946 	[8] = {
947 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
948 		.dst_reg = EBPF_REG_0,
949 		.imm = 0,
950 	},
951 	[9] = {
952 		.code = (BPF_LDX | BPF_MEM | BPF_W),
953 		.dst_reg = EBPF_REG_1,
954 		.src_reg = EBPF_REG_1,
955 		.off = 16,
956 	},
957 	[10] = {
958 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
959 		.dst_reg = EBPF_REG_3,
960 		.imm = TEST_NETMASK,
961 	},
962 	[11] = {
963 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
964 		.dst_reg = EBPF_REG_3,
965 		.imm = sizeof(uint32_t) * CHAR_BIT,
966 	},
967 	[12] = {
968 		.code = (BPF_ALU | BPF_AND | BPF_X),
969 		.dst_reg = EBPF_REG_1,
970 		.src_reg = EBPF_REG_3,
971 	},
972 	[13] = {
973 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
974 		.dst_reg = EBPF_REG_3,
975 		.imm = TEST_SUBNET,
976 	},
977 	[14] = {
978 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
979 		.dst_reg = EBPF_REG_3,
980 		.imm = sizeof(uint32_t) * CHAR_BIT,
981 	},
982 	[15] = {
983 		.code = (BPF_JMP | BPF_JEQ | BPF_X),
984 		.dst_reg = EBPF_REG_1,
985 		.src_reg = EBPF_REG_3,
986 		.off = 1,
987 	},
988 	[16] = {
989 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
990 		.dst_reg = EBPF_REG_0,
991 		.imm = -1,
992 	},
993 	[17] = {
994 		.code = (BPF_JMP | EBPF_EXIT),
995 	},
996 };
997 
998 /* Preparing a vlan packet */
999 static void
1000 test_jump2_prepare(void *arg)
1001 {
1002 	struct dummy_net *dn;
1003 
1004 	dn = arg;
1005 	memset(dn, 0, sizeof(*dn));
1006 
1007 	/*
1008 	 * Initialize ether header.
1009 	 */
1010 	rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1011 			    &dn->eth_hdr.d_addr);
1012 	rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1013 			    &dn->eth_hdr.s_addr);
1014 	dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1015 
1016 	/*
1017 	 * Initialize vlan header.
1018 	 */
1019 	dn->vlan_hdr.eth_proto =  rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1020 	dn->vlan_hdr.vlan_tci = 32;
1021 
1022 	/*
1023 	 * Initialize IP header.
1024 	 */
1025 	dn->ip_hdr.version_ihl   = 0x45;    /*IP_VERSION | IP_HDRLEN*/
1026 	dn->ip_hdr.time_to_live   = 64;   /* IP_DEFTTL */
1027 	dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1028 	dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1029 	dn->ip_hdr.total_length   = rte_cpu_to_be_16(60);
1030 	dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1031 	dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1032 }
1033 
1034 static int
1035 test_jump2_check(uint64_t rc, const void *arg)
1036 {
1037 	const struct rte_ether_hdr *eth_hdr = arg;
1038 	const struct rte_ipv4_hdr *ipv4_hdr;
1039 	const void *next = eth_hdr;
1040 	uint16_t eth_type;
1041 	uint64_t v = -1;
1042 
1043 	if (eth_hdr->ether_type == htons(0x8100)) {
1044 		const struct rte_vlan_hdr *vlan_hdr =
1045 			(const void *)(eth_hdr + 1);
1046 		eth_type = vlan_hdr->eth_proto;
1047 		next = vlan_hdr + 1;
1048 	} else {
1049 		eth_type = eth_hdr->ether_type;
1050 		next = eth_hdr + 1;
1051 	}
1052 
1053 	if (eth_type == htons(0x0800)) {
1054 		ipv4_hdr = next;
1055 		if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1056 		    rte_cpu_to_be_32(TEST_SUBNET)) {
1057 			v = 0;
1058 		}
1059 	}
1060 
1061 	return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1062 }
1063 
1064 /* alu (add, sub, and, or, xor, neg)  test-cases */
1065 static const struct ebpf_insn test_alu1_prog[] = {
1066 
1067 	{
1068 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1069 		.dst_reg = EBPF_REG_2,
1070 		.src_reg = EBPF_REG_1,
1071 		.off = offsetof(struct dummy_vect8, in[0].u32),
1072 	},
1073 	{
1074 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1075 		.dst_reg = EBPF_REG_3,
1076 		.src_reg = EBPF_REG_1,
1077 		.off = offsetof(struct dummy_vect8, in[0].u64),
1078 	},
1079 	{
1080 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1081 		.dst_reg = EBPF_REG_4,
1082 		.src_reg = EBPF_REG_1,
1083 		.off = offsetof(struct dummy_vect8, in[1].u32),
1084 	},
1085 	{
1086 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1087 		.dst_reg = EBPF_REG_5,
1088 		.src_reg = EBPF_REG_1,
1089 		.off = offsetof(struct dummy_vect8, in[1].u64),
1090 	},
1091 	{
1092 		.code = (BPF_ALU | BPF_AND | BPF_K),
1093 		.dst_reg = EBPF_REG_2,
1094 		.imm = TEST_FILL_1,
1095 	},
1096 	{
1097 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1098 		.dst_reg = EBPF_REG_3,
1099 		.imm = TEST_FILL_1,
1100 	},
1101 	{
1102 		.code = (BPF_ALU | BPF_XOR | BPF_K),
1103 		.dst_reg = EBPF_REG_4,
1104 		.imm = TEST_FILL_1,
1105 	},
1106 	{
1107 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1108 		.dst_reg = EBPF_REG_5,
1109 		.imm = TEST_FILL_1,
1110 	},
1111 	{
1112 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1113 		.dst_reg = EBPF_REG_1,
1114 		.src_reg = EBPF_REG_2,
1115 		.off = offsetof(struct dummy_vect8, out[0].u64),
1116 	},
1117 	{
1118 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1119 		.dst_reg = EBPF_REG_1,
1120 		.src_reg = EBPF_REG_3,
1121 		.off = offsetof(struct dummy_vect8, out[1].u64),
1122 	},
1123 	{
1124 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1125 		.dst_reg = EBPF_REG_1,
1126 		.src_reg = EBPF_REG_4,
1127 		.off = offsetof(struct dummy_vect8, out[2].u64),
1128 	},
1129 	{
1130 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1131 		.dst_reg = EBPF_REG_1,
1132 		.src_reg = EBPF_REG_5,
1133 		.off = offsetof(struct dummy_vect8, out[3].u64),
1134 	},
1135 	{
1136 		.code = (BPF_ALU | BPF_OR | BPF_X),
1137 		.dst_reg = EBPF_REG_2,
1138 		.src_reg = EBPF_REG_3,
1139 	},
1140 	{
1141 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1142 		.dst_reg = EBPF_REG_3,
1143 		.src_reg = EBPF_REG_4,
1144 	},
1145 	{
1146 		.code = (BPF_ALU | BPF_SUB | BPF_X),
1147 		.dst_reg = EBPF_REG_4,
1148 		.src_reg = EBPF_REG_5,
1149 	},
1150 	{
1151 		.code = (EBPF_ALU64 | BPF_AND | BPF_X),
1152 		.dst_reg = EBPF_REG_5,
1153 		.src_reg = EBPF_REG_2,
1154 	},
1155 	{
1156 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1157 		.dst_reg = EBPF_REG_1,
1158 		.src_reg = EBPF_REG_2,
1159 		.off = offsetof(struct dummy_vect8, out[4].u64),
1160 	},
1161 	{
1162 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1163 		.dst_reg = EBPF_REG_1,
1164 		.src_reg = EBPF_REG_3,
1165 		.off = offsetof(struct dummy_vect8, out[5].u64),
1166 	},
1167 	{
1168 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1169 		.dst_reg = EBPF_REG_1,
1170 		.src_reg = EBPF_REG_4,
1171 		.off = offsetof(struct dummy_vect8, out[6].u64),
1172 	},
1173 	{
1174 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1175 		.dst_reg = EBPF_REG_1,
1176 		.src_reg = EBPF_REG_5,
1177 		.off = offsetof(struct dummy_vect8, out[7].u64),
1178 	},
1179 	/* return (-r2 + (-r3)) */
1180 	{
1181 		.code = (BPF_ALU | BPF_NEG),
1182 		.dst_reg = EBPF_REG_2,
1183 	},
1184 	{
1185 		.code = (EBPF_ALU64 | BPF_NEG),
1186 		.dst_reg = EBPF_REG_3,
1187 	},
1188 	{
1189 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1190 		.dst_reg = EBPF_REG_2,
1191 		.src_reg = EBPF_REG_3,
1192 	},
1193 	{
1194 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1195 		.dst_reg = EBPF_REG_0,
1196 		.src_reg = EBPF_REG_2,
1197 	},
1198 	{
1199 		.code = (BPF_JMP | EBPF_EXIT),
1200 	},
1201 };
1202 
1203 static int
1204 test_alu1_check(uint64_t rc, const void *arg)
1205 {
1206 	uint64_t r2, r3, r4, r5, rv;
1207 	const struct dummy_vect8 *dvt;
1208 	struct dummy_vect8 dve;
1209 
1210 	dvt = arg;
1211 	memset(&dve, 0, sizeof(dve));
1212 
1213 	r2 = dvt->in[0].u32;
1214 	r3 = dvt->in[0].u64;
1215 	r4 = dvt->in[1].u32;
1216 	r5 = dvt->in[1].u64;
1217 
1218 	r2 = (uint32_t)r2 & TEST_FILL_1;
1219 	r3 |= (int32_t) TEST_FILL_1;
1220 	r4 = (uint32_t)r4 ^ TEST_FILL_1;
1221 	r5 += (int32_t)TEST_FILL_1;
1222 
1223 	dve.out[0].u64 = r2;
1224 	dve.out[1].u64 = r3;
1225 	dve.out[2].u64 = r4;
1226 	dve.out[3].u64 = r5;
1227 
1228 	r2 = (uint32_t)r2 | (uint32_t)r3;
1229 	r3 ^= r4;
1230 	r4 = (uint32_t)r4 - (uint32_t)r5;
1231 	r5 &= r2;
1232 
1233 	dve.out[4].u64 = r2;
1234 	dve.out[5].u64 = r3;
1235 	dve.out[6].u64 = r4;
1236 	dve.out[7].u64 = r5;
1237 
1238 	r2 = -(int32_t)r2;
1239 	rv = (uint32_t)r2;
1240 	r3 = -r3;
1241 	rv += r3;
1242 
1243 	return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1244 }
1245 
1246 /* endianness conversions (BE->LE/LE->BE)  test-cases */
1247 static const struct ebpf_insn test_bele1_prog[] = {
1248 
1249 	{
1250 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1251 		.dst_reg = EBPF_REG_2,
1252 		.src_reg = EBPF_REG_1,
1253 		.off = offsetof(struct dummy_vect8, in[0].u16),
1254 	},
1255 	{
1256 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1257 		.dst_reg = EBPF_REG_3,
1258 		.src_reg = EBPF_REG_1,
1259 		.off = offsetof(struct dummy_vect8, in[0].u32),
1260 	},
1261 	{
1262 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1263 		.dst_reg = EBPF_REG_4,
1264 		.src_reg = EBPF_REG_1,
1265 		.off = offsetof(struct dummy_vect8, in[0].u64),
1266 	},
1267 	{
1268 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1269 		.dst_reg = EBPF_REG_2,
1270 		.imm = sizeof(uint16_t) * CHAR_BIT,
1271 	},
1272 	{
1273 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1274 		.dst_reg = EBPF_REG_3,
1275 		.imm = sizeof(uint32_t) * CHAR_BIT,
1276 	},
1277 	{
1278 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1279 		.dst_reg = EBPF_REG_4,
1280 		.imm = sizeof(uint64_t) * CHAR_BIT,
1281 	},
1282 	{
1283 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1284 		.dst_reg = EBPF_REG_1,
1285 		.src_reg = EBPF_REG_2,
1286 		.off = offsetof(struct dummy_vect8, out[0].u64),
1287 	},
1288 	{
1289 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1290 		.dst_reg = EBPF_REG_1,
1291 		.src_reg = EBPF_REG_3,
1292 		.off = offsetof(struct dummy_vect8, out[1].u64),
1293 	},
1294 	{
1295 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1296 		.dst_reg = EBPF_REG_1,
1297 		.src_reg = EBPF_REG_4,
1298 		.off = offsetof(struct dummy_vect8, out[2].u64),
1299 	},
1300 	{
1301 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1302 		.dst_reg = EBPF_REG_2,
1303 		.src_reg = EBPF_REG_1,
1304 		.off = offsetof(struct dummy_vect8, in[0].u16),
1305 	},
1306 	{
1307 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1308 		.dst_reg = EBPF_REG_3,
1309 		.src_reg = EBPF_REG_1,
1310 		.off = offsetof(struct dummy_vect8, in[0].u32),
1311 	},
1312 	{
1313 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1314 		.dst_reg = EBPF_REG_4,
1315 		.src_reg = EBPF_REG_1,
1316 		.off = offsetof(struct dummy_vect8, in[0].u64),
1317 	},
1318 	{
1319 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1320 		.dst_reg = EBPF_REG_2,
1321 		.imm = sizeof(uint16_t) * CHAR_BIT,
1322 	},
1323 	{
1324 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1325 		.dst_reg = EBPF_REG_3,
1326 		.imm = sizeof(uint32_t) * CHAR_BIT,
1327 	},
1328 	{
1329 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1330 		.dst_reg = EBPF_REG_4,
1331 		.imm = sizeof(uint64_t) * CHAR_BIT,
1332 	},
1333 	{
1334 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1335 		.dst_reg = EBPF_REG_1,
1336 		.src_reg = EBPF_REG_2,
1337 		.off = offsetof(struct dummy_vect8, out[3].u64),
1338 	},
1339 	{
1340 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1341 		.dst_reg = EBPF_REG_1,
1342 		.src_reg = EBPF_REG_3,
1343 		.off = offsetof(struct dummy_vect8, out[4].u64),
1344 	},
1345 	{
1346 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1347 		.dst_reg = EBPF_REG_1,
1348 		.src_reg = EBPF_REG_4,
1349 		.off = offsetof(struct dummy_vect8, out[5].u64),
1350 	},
1351 	/* return 1 */
1352 	{
1353 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1354 		.dst_reg = EBPF_REG_0,
1355 		.imm = 1,
1356 	},
1357 	{
1358 		.code = (BPF_JMP | EBPF_EXIT),
1359 	},
1360 };
1361 
1362 static void
1363 test_bele1_prepare(void *arg)
1364 {
1365 	struct dummy_vect8 *dv;
1366 
1367 	dv = arg;
1368 
1369 	memset(dv, 0, sizeof(*dv));
1370 	dv->in[0].u64 = rte_rand();
1371 	dv->in[0].u32 = dv->in[0].u64;
1372 	dv->in[0].u16 = dv->in[0].u64;
1373 }
1374 
1375 static int
1376 test_bele1_check(uint64_t rc, const void *arg)
1377 {
1378 	uint64_t r2, r3, r4;
1379 	const struct dummy_vect8 *dvt;
1380 	struct dummy_vect8 dve;
1381 
1382 	dvt = arg;
1383 	memset(&dve, 0, sizeof(dve));
1384 
1385 	r2 = dvt->in[0].u16;
1386 	r3 = dvt->in[0].u32;
1387 	r4 = dvt->in[0].u64;
1388 
1389 	r2 =  rte_cpu_to_be_16(r2);
1390 	r3 =  rte_cpu_to_be_32(r3);
1391 	r4 =  rte_cpu_to_be_64(r4);
1392 
1393 	dve.out[0].u64 = r2;
1394 	dve.out[1].u64 = r3;
1395 	dve.out[2].u64 = r4;
1396 
1397 	r2 = dvt->in[0].u16;
1398 	r3 = dvt->in[0].u32;
1399 	r4 = dvt->in[0].u64;
1400 
1401 	r2 =  rte_cpu_to_le_16(r2);
1402 	r3 =  rte_cpu_to_le_32(r3);
1403 	r4 =  rte_cpu_to_le_64(r4);
1404 
1405 	dve.out[3].u64 = r2;
1406 	dve.out[4].u64 = r3;
1407 	dve.out[5].u64 = r4;
1408 
1409 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1410 }
1411 
1412 /* atomic add test-cases */
1413 static const struct ebpf_insn test_xadd1_prog[] = {
1414 
1415 	{
1416 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1417 		.dst_reg = EBPF_REG_2,
1418 		.imm = 1,
1419 	},
1420 	{
1421 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1422 		.dst_reg = EBPF_REG_1,
1423 		.src_reg = EBPF_REG_2,
1424 		.off = offsetof(struct dummy_offset, u32),
1425 	},
1426 	{
1427 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1428 		.dst_reg = EBPF_REG_1,
1429 		.src_reg = EBPF_REG_2,
1430 		.off = offsetof(struct dummy_offset, u64),
1431 	},
1432 	{
1433 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1434 		.dst_reg = EBPF_REG_3,
1435 		.imm = -1,
1436 	},
1437 	{
1438 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1439 		.dst_reg = EBPF_REG_1,
1440 		.src_reg = EBPF_REG_3,
1441 		.off = offsetof(struct dummy_offset, u32),
1442 	},
1443 	{
1444 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1445 		.dst_reg = EBPF_REG_1,
1446 		.src_reg = EBPF_REG_3,
1447 		.off = offsetof(struct dummy_offset, u64),
1448 	},
1449 	{
1450 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1451 		.dst_reg = EBPF_REG_4,
1452 		.imm = TEST_FILL_1,
1453 	},
1454 	{
1455 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1456 		.dst_reg = EBPF_REG_1,
1457 		.src_reg = EBPF_REG_4,
1458 		.off = offsetof(struct dummy_offset, u32),
1459 	},
1460 	{
1461 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1462 		.dst_reg = EBPF_REG_1,
1463 		.src_reg = EBPF_REG_4,
1464 		.off = offsetof(struct dummy_offset, u64),
1465 	},
1466 	{
1467 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1468 		.dst_reg = EBPF_REG_5,
1469 		.imm = TEST_MUL_1,
1470 	},
1471 	{
1472 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1473 		.dst_reg = EBPF_REG_1,
1474 		.src_reg = EBPF_REG_5,
1475 		.off = offsetof(struct dummy_offset, u32),
1476 	},
1477 	{
1478 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1479 		.dst_reg = EBPF_REG_1,
1480 		.src_reg = EBPF_REG_5,
1481 		.off = offsetof(struct dummy_offset, u64),
1482 	},
1483 	{
1484 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1485 		.dst_reg = EBPF_REG_6,
1486 		.imm = TEST_MUL_2,
1487 	},
1488 	{
1489 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1490 		.dst_reg = EBPF_REG_1,
1491 		.src_reg = EBPF_REG_6,
1492 		.off = offsetof(struct dummy_offset, u32),
1493 	},
1494 	{
1495 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1496 		.dst_reg = EBPF_REG_1,
1497 		.src_reg = EBPF_REG_6,
1498 		.off = offsetof(struct dummy_offset, u64),
1499 	},
1500 	{
1501 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1502 		.dst_reg = EBPF_REG_7,
1503 		.imm = TEST_JCC_2,
1504 	},
1505 	{
1506 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1507 		.dst_reg = EBPF_REG_1,
1508 		.src_reg = EBPF_REG_7,
1509 		.off = offsetof(struct dummy_offset, u32),
1510 	},
1511 	{
1512 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1513 		.dst_reg = EBPF_REG_1,
1514 		.src_reg = EBPF_REG_7,
1515 		.off = offsetof(struct dummy_offset, u64),
1516 	},
1517 	{
1518 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1519 		.dst_reg = EBPF_REG_8,
1520 		.imm = TEST_JCC_3,
1521 	},
1522 	{
1523 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1524 		.dst_reg = EBPF_REG_1,
1525 		.src_reg = EBPF_REG_8,
1526 		.off = offsetof(struct dummy_offset, u32),
1527 	},
1528 	{
1529 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1530 		.dst_reg = EBPF_REG_1,
1531 		.src_reg = EBPF_REG_8,
1532 		.off = offsetof(struct dummy_offset, u64),
1533 	},
1534 	/* return 1 */
1535 	{
1536 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1537 		.dst_reg = EBPF_REG_0,
1538 		.imm = 1,
1539 	},
1540 	{
1541 		.code = (BPF_JMP | EBPF_EXIT),
1542 	},
1543 };
1544 
1545 static int
1546 test_xadd1_check(uint64_t rc, const void *arg)
1547 {
1548 	uint64_t rv;
1549 	const struct dummy_offset *dft;
1550 	struct dummy_offset dfe;
1551 
1552 	dft = arg;
1553 	memset(&dfe, 0, sizeof(dfe));
1554 
1555 	rv = 1;
1556 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1557 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1558 
1559 	rv = -1;
1560 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1561 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1562 
1563 	rv = (int32_t)TEST_FILL_1;
1564 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1565 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1566 
1567 	rv = TEST_MUL_1;
1568 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1569 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1570 
1571 	rv = TEST_MUL_2;
1572 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1573 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1574 
1575 	rv = TEST_JCC_2;
1576 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1577 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1578 
1579 	rv = TEST_JCC_3;
1580 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1581 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1582 
1583 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1584 }
1585 
1586 /* alu div test-cases */
1587 static const struct ebpf_insn test_div1_prog[] = {
1588 
1589 	{
1590 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1591 		.dst_reg = EBPF_REG_2,
1592 		.src_reg = EBPF_REG_1,
1593 		.off = offsetof(struct dummy_vect8, in[0].u32),
1594 	},
1595 	{
1596 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1597 		.dst_reg = EBPF_REG_3,
1598 		.src_reg = EBPF_REG_1,
1599 		.off = offsetof(struct dummy_vect8, in[1].u64),
1600 	},
1601 	{
1602 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1603 		.dst_reg = EBPF_REG_4,
1604 		.src_reg = EBPF_REG_1,
1605 		.off = offsetof(struct dummy_vect8, in[2].u32),
1606 	},
1607 	{
1608 		.code = (BPF_ALU | BPF_DIV | BPF_K),
1609 		.dst_reg = EBPF_REG_2,
1610 		.imm = TEST_MUL_1,
1611 	},
1612 	{
1613 		.code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1614 		.dst_reg = EBPF_REG_3,
1615 		.imm = TEST_MUL_2,
1616 	},
1617 	{
1618 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1619 		.dst_reg = EBPF_REG_2,
1620 		.imm = 1,
1621 	},
1622 	{
1623 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1624 		.dst_reg = EBPF_REG_3,
1625 		.imm = 1,
1626 	},
1627 	{
1628 		.code = (BPF_ALU | BPF_MOD | BPF_X),
1629 		.dst_reg = EBPF_REG_4,
1630 		.src_reg = EBPF_REG_2,
1631 	},
1632 	{
1633 		.code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1634 		.dst_reg = EBPF_REG_4,
1635 		.src_reg = EBPF_REG_3,
1636 	},
1637 	{
1638 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1639 		.dst_reg = EBPF_REG_1,
1640 		.src_reg = EBPF_REG_2,
1641 		.off = offsetof(struct dummy_vect8, out[0].u64),
1642 	},
1643 	{
1644 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1645 		.dst_reg = EBPF_REG_1,
1646 		.src_reg = EBPF_REG_3,
1647 		.off = offsetof(struct dummy_vect8, out[1].u64),
1648 	},
1649 	{
1650 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1651 		.dst_reg = EBPF_REG_1,
1652 		.src_reg = EBPF_REG_4,
1653 		.off = offsetof(struct dummy_vect8, out[2].u64),
1654 	},
1655 	/* check that we can handle division by zero gracefully. */
1656 	{
1657 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1658 		.dst_reg = EBPF_REG_2,
1659 		.src_reg = EBPF_REG_1,
1660 		.off = offsetof(struct dummy_vect8, in[3].u32),
1661 	},
1662 	{
1663 		.code = (BPF_ALU | BPF_DIV | BPF_X),
1664 		.dst_reg = EBPF_REG_4,
1665 		.src_reg = EBPF_REG_2,
1666 	},
1667 	/* return 1 */
1668 	{
1669 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1670 		.dst_reg = EBPF_REG_0,
1671 		.imm = 1,
1672 	},
1673 	{
1674 		.code = (BPF_JMP | EBPF_EXIT),
1675 	},
1676 };
1677 
1678 static int
1679 test_div1_check(uint64_t rc, const void *arg)
1680 {
1681 	uint64_t r2, r3, r4;
1682 	const struct dummy_vect8 *dvt;
1683 	struct dummy_vect8 dve;
1684 
1685 	dvt = arg;
1686 	memset(&dve, 0, sizeof(dve));
1687 
1688 	r2 = dvt->in[0].u32;
1689 	r3 = dvt->in[1].u64;
1690 	r4 = dvt->in[2].u32;
1691 
1692 	r2 = (uint32_t)r2 / TEST_MUL_1;
1693 	r3 %= TEST_MUL_2;
1694 	r2 |= 1;
1695 	r3 |= 1;
1696 	r4 = (uint32_t)(r4 % r2);
1697 	r4 /= r3;
1698 
1699 	dve.out[0].u64 = r2;
1700 	dve.out[1].u64 = r3;
1701 	dve.out[2].u64 = r4;
1702 
1703 	/*
1704 	 * in the test prog we attempted to divide by zero.
1705 	 * so return value should return 0.
1706 	 */
1707 	return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1708 }
1709 
1710 /* call test-cases */
1711 static const struct ebpf_insn test_call1_prog[] = {
1712 
1713 	{
1714 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1715 		.dst_reg = EBPF_REG_2,
1716 		.src_reg = EBPF_REG_1,
1717 		.off = offsetof(struct dummy_offset, u32),
1718 	},
1719 	{
1720 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1721 		.dst_reg = EBPF_REG_3,
1722 		.src_reg = EBPF_REG_1,
1723 		.off = offsetof(struct dummy_offset, u64),
1724 	},
1725 	{
1726 		.code = (BPF_STX | BPF_MEM | BPF_W),
1727 		.dst_reg = EBPF_REG_10,
1728 		.src_reg = EBPF_REG_2,
1729 		.off = -4,
1730 	},
1731 	{
1732 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1733 		.dst_reg = EBPF_REG_10,
1734 		.src_reg = EBPF_REG_3,
1735 		.off = -16,
1736 	},
1737 	{
1738 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1739 		.dst_reg = EBPF_REG_2,
1740 		.src_reg = EBPF_REG_10,
1741 	},
1742 	{
1743 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1744 		.dst_reg = EBPF_REG_2,
1745 		.imm = 4,
1746 	},
1747 	{
1748 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1749 		.dst_reg = EBPF_REG_3,
1750 		.src_reg = EBPF_REG_10,
1751 	},
1752 	{
1753 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1754 		.dst_reg = EBPF_REG_3,
1755 		.imm = 16,
1756 	},
1757 	{
1758 		.code = (BPF_JMP | EBPF_CALL),
1759 		.imm = 0,
1760 	},
1761 	{
1762 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1763 		.dst_reg = EBPF_REG_2,
1764 		.src_reg = EBPF_REG_10,
1765 		.off = -4,
1766 	},
1767 	{
1768 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1769 		.dst_reg = EBPF_REG_0,
1770 		.src_reg = EBPF_REG_10,
1771 		.off = -16
1772 	},
1773 	{
1774 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1775 		.dst_reg = EBPF_REG_0,
1776 		.src_reg = EBPF_REG_2,
1777 	},
1778 	{
1779 		.code = (BPF_JMP | EBPF_EXIT),
1780 	},
1781 };
1782 
1783 static void
1784 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1785 {
1786 	const struct dummy_offset *dv;
1787 
1788 	dv = p;
1789 
1790 	v32[0] += dv->u16;
1791 	v64[0] += dv->u8;
1792 }
1793 
1794 static int
1795 test_call1_check(uint64_t rc, const void *arg)
1796 {
1797 	uint32_t v32;
1798 	uint64_t v64;
1799 	const struct dummy_offset *dv;
1800 
1801 	dv = arg;
1802 
1803 	v32 = dv->u32;
1804 	v64 = dv->u64;
1805 	dummy_func1(arg, &v32, &v64);
1806 	v64 += v32;
1807 
1808 	return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1809 }
1810 
1811 static const struct rte_bpf_xsym test_call1_xsym[] = {
1812 	{
1813 		.name = RTE_STR(dummy_func1),
1814 		.type = RTE_BPF_XTYPE_FUNC,
1815 		.func = {
1816 			.val = (void *)dummy_func1,
1817 			.nb_args = 3,
1818 			.args = {
1819 				[0] = {
1820 					.type = RTE_BPF_ARG_PTR,
1821 					.size = sizeof(struct dummy_offset),
1822 				},
1823 				[1] = {
1824 					.type = RTE_BPF_ARG_PTR,
1825 					.size = sizeof(uint32_t),
1826 				},
1827 				[2] = {
1828 					.type = RTE_BPF_ARG_PTR,
1829 					.size = sizeof(uint64_t),
1830 				},
1831 			},
1832 		},
1833 	},
1834 };
1835 
1836 static const struct ebpf_insn test_call2_prog[] = {
1837 
1838 	{
1839 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1840 		.dst_reg = EBPF_REG_1,
1841 		.src_reg = EBPF_REG_10,
1842 	},
1843 	{
1844 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1845 		.dst_reg = EBPF_REG_1,
1846 		.imm = -(int32_t)sizeof(struct dummy_offset),
1847 	},
1848 	{
1849 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1850 		.dst_reg = EBPF_REG_2,
1851 		.src_reg = EBPF_REG_10,
1852 	},
1853 	{
1854 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1855 		.dst_reg = EBPF_REG_2,
1856 		.imm = -2 * (int32_t)sizeof(struct dummy_offset),
1857 	},
1858 	{
1859 		.code = (BPF_JMP | EBPF_CALL),
1860 		.imm = 0,
1861 	},
1862 	{
1863 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1864 		.dst_reg = EBPF_REG_1,
1865 		.src_reg = EBPF_REG_10,
1866 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1867 			offsetof(struct dummy_offset, u64)),
1868 	},
1869 	{
1870 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1871 		.dst_reg = EBPF_REG_0,
1872 		.src_reg = EBPF_REG_10,
1873 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1874 			offsetof(struct dummy_offset, u32)),
1875 	},
1876 	{
1877 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1878 		.dst_reg = EBPF_REG_0,
1879 		.src_reg = EBPF_REG_1,
1880 	},
1881 	{
1882 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1883 		.dst_reg = EBPF_REG_1,
1884 		.src_reg = EBPF_REG_10,
1885 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1886 			offsetof(struct dummy_offset, u16)),
1887 	},
1888 	{
1889 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1890 		.dst_reg = EBPF_REG_0,
1891 		.src_reg = EBPF_REG_1,
1892 	},
1893 	{
1894 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1895 		.dst_reg = EBPF_REG_1,
1896 		.src_reg = EBPF_REG_10,
1897 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1898 			offsetof(struct dummy_offset, u8)),
1899 	},
1900 	{
1901 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1902 		.dst_reg = EBPF_REG_0,
1903 		.src_reg = EBPF_REG_1,
1904 	},
1905 	{
1906 		.code = (BPF_JMP | EBPF_EXIT),
1907 	},
1908 
1909 };
1910 
1911 static void
1912 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1913 {
1914 	uint64_t v;
1915 
1916 	v = 0;
1917 	a->u64 = v++;
1918 	a->u32 = v++;
1919 	a->u16 = v++;
1920 	a->u8 = v++;
1921 	b->u64 = v++;
1922 	b->u32 = v++;
1923 	b->u16 = v++;
1924 	b->u8 = v++;
1925 }
1926 
1927 static int
1928 test_call2_check(uint64_t rc, const void *arg)
1929 {
1930 	uint64_t v;
1931 	struct dummy_offset a, b;
1932 
1933 	RTE_SET_USED(arg);
1934 
1935 	dummy_func2(&a, &b);
1936 	v = a.u64 + a.u32 + b.u16 + b.u8;
1937 
1938 	return cmp_res(__func__, v, rc, arg, arg, 0);
1939 }
1940 
1941 static const struct rte_bpf_xsym test_call2_xsym[] = {
1942 	{
1943 		.name = RTE_STR(dummy_func2),
1944 		.type = RTE_BPF_XTYPE_FUNC,
1945 		.func = {
1946 			.val = (void *)dummy_func2,
1947 			.nb_args = 2,
1948 			.args = {
1949 				[0] = {
1950 					.type = RTE_BPF_ARG_PTR,
1951 					.size = sizeof(struct dummy_offset),
1952 				},
1953 				[1] = {
1954 					.type = RTE_BPF_ARG_PTR,
1955 					.size = sizeof(struct dummy_offset),
1956 				},
1957 			},
1958 		},
1959 	},
1960 };
1961 
1962 static const struct ebpf_insn test_call3_prog[] = {
1963 
1964 	{
1965 		.code = (BPF_JMP | EBPF_CALL),
1966 		.imm = 0,
1967 	},
1968 	{
1969 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1970 		.dst_reg = EBPF_REG_2,
1971 		.src_reg = EBPF_REG_0,
1972 		.off = offsetof(struct dummy_offset, u8),
1973 	},
1974 	{
1975 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1976 		.dst_reg = EBPF_REG_3,
1977 		.src_reg = EBPF_REG_0,
1978 		.off = offsetof(struct dummy_offset, u16),
1979 	},
1980 	{
1981 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1982 		.dst_reg = EBPF_REG_4,
1983 		.src_reg = EBPF_REG_0,
1984 		.off = offsetof(struct dummy_offset, u32),
1985 	},
1986 	{
1987 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1988 		.dst_reg = EBPF_REG_0,
1989 		.src_reg = EBPF_REG_0,
1990 		.off = offsetof(struct dummy_offset, u64),
1991 	},
1992 	/* return sum */
1993 	{
1994 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1995 		.dst_reg = EBPF_REG_0,
1996 		.src_reg = EBPF_REG_4,
1997 	},
1998 	{
1999 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2000 		.dst_reg = EBPF_REG_0,
2001 		.src_reg = EBPF_REG_3,
2002 	},
2003 	{
2004 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2005 		.dst_reg = EBPF_REG_0,
2006 		.src_reg = EBPF_REG_2,
2007 	},
2008 	{
2009 		.code = (BPF_JMP | EBPF_EXIT),
2010 	},
2011 };
2012 
2013 static const struct dummy_offset *
2014 dummy_func3(const struct dummy_vect8 *p)
2015 {
2016 	return &p->in[RTE_DIM(p->in) - 1];
2017 }
2018 
2019 static void
2020 test_call3_prepare(void *arg)
2021 {
2022 	struct dummy_vect8 *pv;
2023 	struct dummy_offset *df;
2024 
2025 	pv = arg;
2026 	df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2027 
2028 	memset(pv, 0, sizeof(*pv));
2029 	df->u64 = (int32_t)TEST_FILL_1;
2030 	df->u32 = df->u64;
2031 	df->u16 = df->u64;
2032 	df->u8 = df->u64;
2033 }
2034 
2035 static int
2036 test_call3_check(uint64_t rc, const void *arg)
2037 {
2038 	uint64_t v;
2039 	const struct dummy_vect8 *pv;
2040 	const struct dummy_offset *dft;
2041 
2042 	pv = arg;
2043 	dft = dummy_func3(pv);
2044 
2045 	v = dft->u64;
2046 	v += dft->u32;
2047 	v += dft->u16;
2048 	v += dft->u8;
2049 
2050 	return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2051 }
2052 
2053 static const struct rte_bpf_xsym test_call3_xsym[] = {
2054 	{
2055 		.name = RTE_STR(dummy_func3),
2056 		.type = RTE_BPF_XTYPE_FUNC,
2057 		.func = {
2058 			.val = (void *)dummy_func3,
2059 			.nb_args = 1,
2060 			.args = {
2061 				[0] = {
2062 					.type = RTE_BPF_ARG_PTR,
2063 					.size = sizeof(struct dummy_vect8),
2064 				},
2065 			},
2066 			.ret = {
2067 				.type = RTE_BPF_ARG_PTR,
2068 				.size = sizeof(struct dummy_offset),
2069 			},
2070 		},
2071 	},
2072 };
2073 
2074 /* Test for stack corruption in multiple function calls */
2075 static const struct ebpf_insn test_call4_prog[] = {
2076 	{
2077 		.code = (BPF_ST | BPF_MEM | BPF_B),
2078 		.dst_reg = EBPF_REG_10,
2079 		.off = -4,
2080 		.imm = 1,
2081 	},
2082 	{
2083 		.code = (BPF_ST | BPF_MEM | BPF_B),
2084 		.dst_reg = EBPF_REG_10,
2085 		.off = -3,
2086 		.imm = 2,
2087 	},
2088 	{
2089 		.code = (BPF_ST | BPF_MEM | BPF_B),
2090 		.dst_reg = EBPF_REG_10,
2091 		.off = -2,
2092 		.imm = 3,
2093 	},
2094 	{
2095 		.code = (BPF_ST | BPF_MEM | BPF_B),
2096 		.dst_reg = EBPF_REG_10,
2097 		.off = -1,
2098 		.imm = 4,
2099 	},
2100 	{
2101 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2102 		.dst_reg = EBPF_REG_1,
2103 		.src_reg = EBPF_REG_10,
2104 	},
2105 	{
2106 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2107 		.dst_reg = EBPF_REG_2,
2108 		.imm = 4,
2109 	},
2110 	{
2111 		.code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2112 		.dst_reg = EBPF_REG_1,
2113 		.src_reg = EBPF_REG_2,
2114 	},
2115 	{
2116 		.code = (BPF_JMP | EBPF_CALL),
2117 		.imm = 0,
2118 	},
2119 	{
2120 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2121 		.dst_reg = EBPF_REG_1,
2122 		.src_reg = EBPF_REG_10,
2123 		.off = -4,
2124 	},
2125 	{
2126 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2127 		.dst_reg = EBPF_REG_2,
2128 		.src_reg = EBPF_REG_10,
2129 		.off = -3,
2130 	},
2131 	{
2132 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2133 		.dst_reg = EBPF_REG_3,
2134 		.src_reg = EBPF_REG_10,
2135 		.off = -2,
2136 	},
2137 	{
2138 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2139 		.dst_reg = EBPF_REG_4,
2140 		.src_reg = EBPF_REG_10,
2141 		.off = -1,
2142 	},
2143 	{
2144 		.code = (BPF_JMP | EBPF_CALL),
2145 		.imm = 1,
2146 	},
2147 	{
2148 		.code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2149 		.dst_reg = EBPF_REG_0,
2150 		.imm = TEST_MEMFROB,
2151 	},
2152 	{
2153 		.code = (BPF_JMP | EBPF_EXIT),
2154 	},
2155 };
2156 
2157 /* Gathering the bytes together */
2158 static uint32_t
2159 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2160 {
2161 	return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2162 }
2163 
2164 /* Implementation of memfrob */
2165 static uint32_t
2166 dummy_func4_0(uint32_t *s, uint8_t n)
2167 {
2168 	char *p = (char *) s;
2169 	while (n-- > 0)
2170 		*p++ ^= 42;
2171 	return *s;
2172 }
2173 
2174 
2175 static int
2176 test_call4_check(uint64_t rc, const void *arg)
2177 {
2178 	uint8_t a[4] = {1, 2, 3, 4};
2179 	uint32_t s, v = 0;
2180 
2181 	RTE_SET_USED(arg);
2182 
2183 	s = dummy_func4_0((uint32_t *)a, 4);
2184 
2185 	s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2186 
2187 	v = s ^ TEST_MEMFROB;
2188 
2189 	return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2190 }
2191 
2192 static const struct rte_bpf_xsym test_call4_xsym[] = {
2193 	[0] = {
2194 		.name = RTE_STR(dummy_func4_0),
2195 		.type = RTE_BPF_XTYPE_FUNC,
2196 		.func = {
2197 			.val = (void *)dummy_func4_0,
2198 			.nb_args = 2,
2199 			.args = {
2200 				[0] = {
2201 					.type = RTE_BPF_ARG_PTR,
2202 					.size = 4 * sizeof(uint8_t),
2203 				},
2204 				[1] = {
2205 					.type = RTE_BPF_ARG_RAW,
2206 					.size = sizeof(uint8_t),
2207 				},
2208 			},
2209 			.ret = {
2210 				.type = RTE_BPF_ARG_RAW,
2211 				.size = sizeof(uint32_t),
2212 			},
2213 		},
2214 	},
2215 	[1] = {
2216 		.name = RTE_STR(dummy_func4_1),
2217 		.type = RTE_BPF_XTYPE_FUNC,
2218 		.func = {
2219 			.val = (void *)dummy_func4_1,
2220 			.nb_args = 4,
2221 			.args = {
2222 				[0] = {
2223 					.type = RTE_BPF_ARG_RAW,
2224 					.size = sizeof(uint8_t),
2225 				},
2226 				[1] = {
2227 					.type = RTE_BPF_ARG_RAW,
2228 					.size = sizeof(uint8_t),
2229 				},
2230 				[2] = {
2231 					.type = RTE_BPF_ARG_RAW,
2232 					.size = sizeof(uint8_t),
2233 				},
2234 				[3] = {
2235 					.type = RTE_BPF_ARG_RAW,
2236 					.size = sizeof(uint8_t),
2237 				},
2238 			},
2239 			.ret = {
2240 				.type = RTE_BPF_ARG_RAW,
2241 				.size = sizeof(uint32_t),
2242 			},
2243 		},
2244 	},
2245 };
2246 
2247 /* string compare test case */
2248 static const struct ebpf_insn test_call5_prog[] = {
2249 
2250 	[0] = {
2251 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2252 		.dst_reg = EBPF_REG_1,
2253 		.imm = STRING_GEEK,
2254 	},
2255 	[1] = {
2256 		.code = (BPF_STX | BPF_MEM | BPF_W),
2257 		.dst_reg = EBPF_REG_10,
2258 		.src_reg = EBPF_REG_1,
2259 		.off = -8,
2260 	},
2261 	[2] = {
2262 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2263 		.dst_reg = EBPF_REG_6,
2264 		.imm = 0,
2265 	},
2266 	[3] = {
2267 		.code = (BPF_STX | BPF_MEM | BPF_B),
2268 		.dst_reg = EBPF_REG_10,
2269 		.src_reg = EBPF_REG_6,
2270 		.off = -4,
2271 	},
2272 	[4] = {
2273 		.code = (BPF_STX | BPF_MEM | BPF_W),
2274 		.dst_reg = EBPF_REG_10,
2275 		.src_reg = EBPF_REG_6,
2276 		.off = -12,
2277 	},
2278 	[5] = {
2279 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2280 		.dst_reg = EBPF_REG_1,
2281 		.imm = STRING_WEEK,
2282 	},
2283 	[6] = {
2284 		.code = (BPF_STX | BPF_MEM | BPF_W),
2285 		.dst_reg = EBPF_REG_10,
2286 		.src_reg = EBPF_REG_1,
2287 		.off = -16,
2288 	},
2289 	[7] = {
2290 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2291 		.dst_reg = EBPF_REG_1,
2292 		.src_reg = EBPF_REG_10,
2293 	},
2294 	[8] = {
2295 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2296 		.dst_reg = EBPF_REG_1,
2297 		.imm = -8,
2298 	},
2299 	[9] = {
2300 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2301 		.dst_reg = EBPF_REG_2,
2302 		.src_reg = EBPF_REG_1,
2303 	},
2304 	[10] = {
2305 		.code = (BPF_JMP | EBPF_CALL),
2306 		.imm = 0,
2307 	},
2308 	[11] = {
2309 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2310 		.dst_reg = EBPF_REG_1,
2311 		.src_reg = EBPF_REG_0,
2312 	},
2313 	[12] = {
2314 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
2315 		.dst_reg = EBPF_REG_0,
2316 		.imm = -1,
2317 	},
2318 	[13] = {
2319 		.code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2320 		.dst_reg = EBPF_REG_1,
2321 		.imm = 0x20,
2322 	},
2323 	[14] = {
2324 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2325 		.dst_reg = EBPF_REG_1,
2326 		.imm = 0x20,
2327 	},
2328 	[15] = {
2329 		.code = (BPF_JMP | EBPF_JNE | BPF_K),
2330 		.dst_reg = EBPF_REG_1,
2331 		.off = 11,
2332 		.imm = 0,
2333 	},
2334 	[16] = {
2335 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2336 		.dst_reg = EBPF_REG_1,
2337 		.src_reg = EBPF_REG_10,
2338 	},
2339 	[17] = {
2340 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2341 		.dst_reg = EBPF_REG_1,
2342 		.imm = -8,
2343 	},
2344 	[18] = {
2345 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2346 		.dst_reg = EBPF_REG_2,
2347 		.src_reg = EBPF_REG_10,
2348 	},
2349 	[19] = {
2350 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2351 		.dst_reg = EBPF_REG_2,
2352 		.imm = -16,
2353 	},
2354 	[20] = {
2355 		.code = (BPF_JMP | EBPF_CALL),
2356 		.imm = 0,
2357 	},
2358 	[21] = {
2359 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2360 		.dst_reg = EBPF_REG_1,
2361 		.src_reg = EBPF_REG_0,
2362 	},
2363 	[22] = {
2364 		.code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2365 		.dst_reg = EBPF_REG_1,
2366 		.imm = 0x20,
2367 	},
2368 	[23] = {
2369 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2370 		.dst_reg = EBPF_REG_1,
2371 		.imm = 0x20,
2372 	},
2373 	[24] = {
2374 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2375 		.dst_reg = EBPF_REG_0,
2376 		.src_reg = EBPF_REG_1,
2377 	},
2378 	[25] = {
2379 		.code = (BPF_JMP | BPF_JEQ | BPF_X),
2380 		.dst_reg = EBPF_REG_1,
2381 		.src_reg = EBPF_REG_6,
2382 		.off = 1,
2383 	},
2384 	[26] = {
2385 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2386 		.dst_reg = EBPF_REG_0,
2387 		.imm = 0,
2388 	},
2389 	[27] = {
2390 		.code = (BPF_JMP | EBPF_EXIT),
2391 	},
2392 };
2393 
2394 /* String comparision impelementation, return 0 if equal else difference */
2395 static uint32_t
2396 dummy_func5(const char *s1, const char *s2)
2397 {
2398 	while (*s1 && (*s1 == *s2)) {
2399 		s1++;
2400 		s2++;
2401 	}
2402 	return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2403 }
2404 
2405 static int
2406 test_call5_check(uint64_t rc, const void *arg)
2407 {
2408 	char a[] = "geek";
2409 	char b[] = "week";
2410 	uint32_t v;
2411 
2412 	RTE_SET_USED(arg);
2413 
2414 	v = dummy_func5(a, a);
2415 	if (v != 0) {
2416 		v = -1;
2417 		goto fail;
2418 	}
2419 
2420 	v = dummy_func5(a, b);
2421 	if (v == 0)
2422 		goto fail;
2423 
2424 	v = 0;
2425 
2426 fail:
2427 	return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2428 }
2429 
2430 static const struct rte_bpf_xsym test_call5_xsym[] = {
2431 	[0] = {
2432 		.name = RTE_STR(dummy_func5),
2433 		.type = RTE_BPF_XTYPE_FUNC,
2434 		.func = {
2435 			.val = (void *)dummy_func5,
2436 			.nb_args = 2,
2437 			.args = {
2438 				[0] = {
2439 					.type = RTE_BPF_ARG_PTR,
2440 					.size = sizeof(char),
2441 				},
2442 				[1] = {
2443 					.type = RTE_BPF_ARG_PTR,
2444 					.size = sizeof(char),
2445 				},
2446 			},
2447 			.ret = {
2448 				.type = RTE_BPF_ARG_RAW,
2449 				.size = sizeof(uint32_t),
2450 			},
2451 		},
2452 	},
2453 };
2454 
2455 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2456 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2457 
2458 	/* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2459 	{
2460 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2461 		.dst_reg = EBPF_REG_6,
2462 		.src_reg = EBPF_REG_1,
2463 	},
2464 	/* load IPv4 version and IHL */
2465 	{
2466 		.code = (BPF_LD | BPF_ABS | BPF_B),
2467 		.imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2468 	},
2469 	/* check IP version */
2470 	{
2471 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2472 		.dst_reg = EBPF_REG_2,
2473 		.src_reg = EBPF_REG_0,
2474 	},
2475 	{
2476 		.code = (BPF_ALU | BPF_AND | BPF_K),
2477 		.dst_reg = EBPF_REG_2,
2478 		.imm = 0xf0,
2479 	},
2480 	{
2481 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
2482 		.dst_reg = EBPF_REG_2,
2483 		.imm = IPVERSION << 4,
2484 		.off = 2,
2485 	},
2486 	/* invalid IP version, return 0 */
2487 	{
2488 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2489 		.dst_reg = EBPF_REG_0,
2490 		.src_reg = EBPF_REG_0,
2491 	},
2492 	{
2493 		.code = (BPF_JMP | EBPF_EXIT),
2494 	},
2495 	/* load 3-rd byte of IP data */
2496 	{
2497 		.code = (BPF_ALU | BPF_AND | BPF_K),
2498 		.dst_reg = EBPF_REG_0,
2499 		.imm = RTE_IPV4_HDR_IHL_MASK,
2500 	},
2501 	{
2502 		.code = (BPF_ALU | BPF_LSH | BPF_K),
2503 		.dst_reg = EBPF_REG_0,
2504 		.imm = 2,
2505 	},
2506 	{
2507 		.code = (BPF_LD | BPF_IND | BPF_B),
2508 		.src_reg = EBPF_REG_0,
2509 		.imm = 3,
2510 	},
2511 	{
2512 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2513 		.dst_reg = EBPF_REG_7,
2514 		.src_reg = EBPF_REG_0,
2515 	},
2516 	/* load IPv4 src addr */
2517 	{
2518 		.code = (BPF_LD | BPF_ABS | BPF_W),
2519 		.imm = offsetof(struct rte_ipv4_hdr, src_addr),
2520 	},
2521 	{
2522 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2523 		.dst_reg = EBPF_REG_7,
2524 		.src_reg = EBPF_REG_0,
2525 	},
2526 	/* load IPv4 total length */
2527 	{
2528 		.code = (BPF_LD | BPF_ABS | BPF_H),
2529 		.imm = offsetof(struct rte_ipv4_hdr, total_length),
2530 	},
2531 	{
2532 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2533 		.dst_reg = EBPF_REG_8,
2534 		.src_reg = EBPF_REG_0,
2535 	},
2536 	/* load last 4 bytes of IP data */
2537 	{
2538 		.code = (BPF_LD | BPF_IND | BPF_W),
2539 		.src_reg = EBPF_REG_8,
2540 		.imm = -(int32_t)sizeof(uint32_t),
2541 	},
2542 	{
2543 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2544 		.dst_reg = EBPF_REG_7,
2545 		.src_reg = EBPF_REG_0,
2546 	},
2547 	/* load 2 bytes from the middle of IP data */
2548 	{
2549 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2550 		.dst_reg = EBPF_REG_8,
2551 		.imm = 1,
2552 	},
2553 	{
2554 		.code = (BPF_LD | BPF_IND | BPF_H),
2555 		.src_reg = EBPF_REG_8,
2556 	},
2557 	{
2558 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2559 		.dst_reg = EBPF_REG_0,
2560 		.src_reg = EBPF_REG_7,
2561 	},
2562 	{
2563 		.code = (BPF_JMP | EBPF_EXIT),
2564 	},
2565 };
2566 
2567 static void
2568 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2569 	uint32_t data_len)
2570 {
2571 	uint32_t i;
2572 	uint8_t *db;
2573 
2574 	mb->buf_addr = buf;
2575 	mb->buf_iova = (uintptr_t)buf;
2576 	mb->buf_len = buf_len;
2577 	rte_mbuf_refcnt_set(mb, 1);
2578 
2579 	/* set pool pointer to dummy value, test doesn't use it */
2580 	mb->pool = (void *)buf;
2581 
2582 	rte_pktmbuf_reset(mb);
2583 	db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2584 
2585 	for (i = 0; i != data_len; i++)
2586 		db[i] = i;
2587 }
2588 
2589 static void
2590 test_ld_mbuf1_prepare(void *arg)
2591 {
2592 	struct dummy_mbuf *dm;
2593 	struct rte_ipv4_hdr *ph;
2594 
2595 	const uint32_t plen = 400;
2596 	const struct rte_ipv4_hdr iph = {
2597 		.version_ihl = RTE_IPV4_VHL_DEF,
2598 		.total_length = rte_cpu_to_be_16(plen),
2599 		.time_to_live = IPDEFTTL,
2600 		.next_proto_id = IPPROTO_RAW,
2601 		.src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2602 		.dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2603 	};
2604 
2605 	dm = arg;
2606 	memset(dm, 0, sizeof(*dm));
2607 
2608 	dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2609 		plen / 2 + 1);
2610 	dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2611 		plen / 2 - 1);
2612 
2613 	rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2614 
2615 	ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2616 	memcpy(ph, &iph, sizeof(iph));
2617 }
2618 
2619 static uint64_t
2620 test_ld_mbuf1(const struct rte_mbuf *pkt)
2621 {
2622 	uint64_t n, v;
2623 	const uint8_t *p8;
2624 	const uint16_t *p16;
2625 	const uint32_t *p32;
2626 	struct dummy_offset dof;
2627 
2628 	/* load IPv4 version and IHL */
2629 	p8 = rte_pktmbuf_read(pkt,
2630 		offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2631 		&dof);
2632 	if (p8 == NULL)
2633 		return 0;
2634 
2635 	/* check IP version */
2636 	if ((p8[0] & 0xf0) != IPVERSION << 4)
2637 		return 0;
2638 
2639 	n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2640 
2641 	/* load 3-rd byte of IP data */
2642 	p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2643 	if (p8 == NULL)
2644 		return 0;
2645 
2646 	v = p8[0];
2647 
2648 	/* load IPv4 src addr */
2649 	p32 = rte_pktmbuf_read(pkt,
2650 		offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2651 		&dof);
2652 	if (p32 == NULL)
2653 		return 0;
2654 
2655 	v += rte_be_to_cpu_32(p32[0]);
2656 
2657 	/* load IPv4 total length */
2658 	p16 = rte_pktmbuf_read(pkt,
2659 		offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2660 		&dof);
2661 	if (p16 == NULL)
2662 		return 0;
2663 
2664 	n = rte_be_to_cpu_16(p16[0]);
2665 
2666 	/* load last 4 bytes of IP data */
2667 	p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2668 	if (p32 == NULL)
2669 		return 0;
2670 
2671 	v += rte_be_to_cpu_32(p32[0]);
2672 
2673 	/* load 2 bytes from the middle of IP data */
2674 	p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2675 	if (p16 == NULL)
2676 		return 0;
2677 
2678 	v += rte_be_to_cpu_16(p16[0]);
2679 	return v;
2680 }
2681 
2682 static int
2683 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2684 {
2685 	const struct dummy_mbuf *dm;
2686 	uint64_t v;
2687 
2688 	dm = arg;
2689 	v = test_ld_mbuf1(dm->mb);
2690 	return cmp_res(__func__, v, rc, arg, arg, 0);
2691 }
2692 
2693 /*
2694  * same as ld_mbuf1, but then trancate the mbuf by 1B,
2695  * so load of last 4B fail.
2696  */
2697 static void
2698 test_ld_mbuf2_prepare(void *arg)
2699 {
2700 	struct dummy_mbuf *dm;
2701 
2702 	test_ld_mbuf1_prepare(arg);
2703 	dm = arg;
2704 	rte_pktmbuf_trim(dm->mb, 1);
2705 }
2706 
2707 static int
2708 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2709 {
2710 	return cmp_res(__func__, 0, rc, arg, arg, 0);
2711 }
2712 
2713 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2714 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2715 
2716 	/* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2717 	{
2718 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2719 		.dst_reg = EBPF_REG_6,
2720 		.src_reg = EBPF_REG_1,
2721 	},
2722 	/* load IPv4 version and IHL */
2723 	{
2724 		.code = (BPF_LD | BPF_ABS | BPF_B),
2725 		.imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2726 	},
2727 	/* check IP version */
2728 	{
2729 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2730 		.dst_reg = EBPF_REG_2,
2731 		.src_reg = EBPF_REG_0,
2732 	},
2733 	{
2734 		.code = (BPF_ALU | BPF_AND | BPF_K),
2735 		.dst_reg = EBPF_REG_2,
2736 		.imm = 0xf0,
2737 	},
2738 	{
2739 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
2740 		.dst_reg = EBPF_REG_2,
2741 		.imm = IPVERSION << 4,
2742 		.off = 2,
2743 	},
2744 	/* invalid IP version, return 0 */
2745 	{
2746 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2747 		.dst_reg = EBPF_REG_0,
2748 		.src_reg = EBPF_REG_0,
2749 	},
2750 	{
2751 		.code = (BPF_JMP | EBPF_EXIT),
2752 	},
2753 	/* load 3-rd byte of IP data */
2754 	{
2755 		.code = (BPF_ALU | BPF_AND | BPF_K),
2756 		.dst_reg = EBPF_REG_0,
2757 		.imm = RTE_IPV4_HDR_IHL_MASK,
2758 	},
2759 	{
2760 		.code = (BPF_ALU | BPF_LSH | BPF_K),
2761 		.dst_reg = EBPF_REG_0,
2762 		.imm = 2,
2763 	},
2764 	{
2765 		.code = (BPF_LD | BPF_IND | BPF_B),
2766 		.src_reg = EBPF_REG_0,
2767 		.imm = 3,
2768 	},
2769 	{
2770 		.code = (BPF_STX | BPF_MEM | BPF_B),
2771 		.dst_reg = EBPF_REG_10,
2772 		.src_reg = EBPF_REG_0,
2773 		.off = (int16_t)(offsetof(struct dummy_offset, u8) -
2774 			sizeof(struct dummy_offset)),
2775 	},
2776 	/* load IPv4 src addr */
2777 	{
2778 		.code = (BPF_LD | BPF_ABS | BPF_W),
2779 		.imm = offsetof(struct rte_ipv4_hdr, src_addr),
2780 	},
2781 	{
2782 		.code = (BPF_STX | BPF_MEM | BPF_W),
2783 		.dst_reg = EBPF_REG_10,
2784 		.src_reg = EBPF_REG_0,
2785 		.off = (int16_t)(offsetof(struct dummy_offset, u32) -
2786 			sizeof(struct dummy_offset)),
2787 	},
2788 	/* load IPv4 total length */
2789 	{
2790 		.code = (BPF_LD | BPF_ABS | BPF_H),
2791 		.imm = offsetof(struct rte_ipv4_hdr, total_length),
2792 	},
2793 	{
2794 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2795 		.dst_reg = EBPF_REG_8,
2796 		.src_reg = EBPF_REG_0,
2797 	},
2798 	/* load last 4 bytes of IP data */
2799 	{
2800 		.code = (BPF_LD | BPF_IND | BPF_W),
2801 		.src_reg = EBPF_REG_8,
2802 		.imm = -(int32_t)sizeof(uint32_t),
2803 	},
2804 	{
2805 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
2806 		.dst_reg = EBPF_REG_10,
2807 		.src_reg = EBPF_REG_0,
2808 		.off = (int16_t)(offsetof(struct dummy_offset, u64) -
2809 			sizeof(struct dummy_offset)),
2810 	},
2811 	/* load 2 bytes from the middle of IP data */
2812 	{
2813 		.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2814 		.dst_reg = EBPF_REG_8,
2815 		.imm = 1,
2816 	},
2817 	{
2818 		.code = (BPF_LD | BPF_IND | BPF_H),
2819 		.src_reg = EBPF_REG_8,
2820 	},
2821 	{
2822 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
2823 		.dst_reg = EBPF_REG_1,
2824 		.src_reg = EBPF_REG_10,
2825 		.off = (int16_t)(offsetof(struct dummy_offset, u64) -
2826 			sizeof(struct dummy_offset)),
2827 	},
2828 	{
2829 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2830 		.dst_reg = EBPF_REG_0,
2831 		.src_reg = EBPF_REG_1,
2832 	},
2833 	{
2834 		.code = (BPF_LDX | BPF_MEM | BPF_W),
2835 		.dst_reg = EBPF_REG_1,
2836 		.src_reg = EBPF_REG_10,
2837 		.off = (int16_t)(offsetof(struct dummy_offset, u32) -
2838 			sizeof(struct dummy_offset)),
2839 	},
2840 	{
2841 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2842 		.dst_reg = EBPF_REG_0,
2843 		.src_reg = EBPF_REG_1,
2844 	},
2845 	{
2846 		.code = (BPF_LDX | BPF_MEM | BPF_B),
2847 		.dst_reg = EBPF_REG_1,
2848 		.src_reg = EBPF_REG_10,
2849 		.off = (int16_t)(offsetof(struct dummy_offset, u8) -
2850 			sizeof(struct dummy_offset)),
2851 	},
2852 	{
2853 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2854 		.dst_reg = EBPF_REG_0,
2855 		.src_reg = EBPF_REG_1,
2856 	},
2857 	{
2858 		.code = (BPF_JMP | EBPF_EXIT),
2859 	},
2860 };
2861 
2862 /* all bpf test cases */
2863 static const struct bpf_test tests[] = {
2864 	{
2865 		.name = "test_store1",
2866 		.arg_sz = sizeof(struct dummy_offset),
2867 		.prm = {
2868 			.ins = test_store1_prog,
2869 			.nb_ins = RTE_DIM(test_store1_prog),
2870 			.prog_arg = {
2871 				.type = RTE_BPF_ARG_PTR,
2872 				.size = sizeof(struct dummy_offset),
2873 			},
2874 		},
2875 		.prepare = test_store1_prepare,
2876 		.check_result = test_store1_check,
2877 	},
2878 	{
2879 		.name = "test_store2",
2880 		.arg_sz = sizeof(struct dummy_offset),
2881 		.prm = {
2882 			.ins = test_store2_prog,
2883 			.nb_ins = RTE_DIM(test_store2_prog),
2884 			.prog_arg = {
2885 				.type = RTE_BPF_ARG_PTR,
2886 				.size = sizeof(struct dummy_offset),
2887 			},
2888 		},
2889 		.prepare = test_store1_prepare,
2890 		.check_result = test_store1_check,
2891 	},
2892 	{
2893 		.name = "test_load1",
2894 		.arg_sz = sizeof(struct dummy_offset),
2895 		.prm = {
2896 			.ins = test_load1_prog,
2897 			.nb_ins = RTE_DIM(test_load1_prog),
2898 			.prog_arg = {
2899 				.type = RTE_BPF_ARG_PTR,
2900 				.size = sizeof(struct dummy_offset),
2901 			},
2902 		},
2903 		.prepare = test_load1_prepare,
2904 		.check_result = test_load1_check,
2905 	},
2906 	{
2907 		.name = "test_ldimm1",
2908 		.arg_sz = sizeof(struct dummy_offset),
2909 		.prm = {
2910 			.ins = test_ldimm1_prog,
2911 			.nb_ins = RTE_DIM(test_ldimm1_prog),
2912 			.prog_arg = {
2913 				.type = RTE_BPF_ARG_PTR,
2914 				.size = sizeof(struct dummy_offset),
2915 			},
2916 		},
2917 		.prepare = test_store1_prepare,
2918 		.check_result = test_ldimm1_check,
2919 	},
2920 	{
2921 		.name = "test_mul1",
2922 		.arg_sz = sizeof(struct dummy_vect8),
2923 		.prm = {
2924 			.ins = test_mul1_prog,
2925 			.nb_ins = RTE_DIM(test_mul1_prog),
2926 			.prog_arg = {
2927 				.type = RTE_BPF_ARG_PTR,
2928 				.size = sizeof(struct dummy_vect8),
2929 			},
2930 		},
2931 		.prepare = test_mul1_prepare,
2932 		.check_result = test_mul1_check,
2933 	},
2934 	{
2935 		.name = "test_shift1",
2936 		.arg_sz = sizeof(struct dummy_vect8),
2937 		.prm = {
2938 			.ins = test_shift1_prog,
2939 			.nb_ins = RTE_DIM(test_shift1_prog),
2940 			.prog_arg = {
2941 				.type = RTE_BPF_ARG_PTR,
2942 				.size = sizeof(struct dummy_vect8),
2943 			},
2944 		},
2945 		.prepare = test_shift1_prepare,
2946 		.check_result = test_shift1_check,
2947 	},
2948 	{
2949 		.name = "test_jump1",
2950 		.arg_sz = sizeof(struct dummy_vect8),
2951 		.prm = {
2952 			.ins = test_jump1_prog,
2953 			.nb_ins = RTE_DIM(test_jump1_prog),
2954 			.prog_arg = {
2955 				.type = RTE_BPF_ARG_PTR,
2956 				.size = sizeof(struct dummy_vect8),
2957 			},
2958 		},
2959 		.prepare = test_jump1_prepare,
2960 		.check_result = test_jump1_check,
2961 	},
2962 	{
2963 		.name = "test_jump2",
2964 		.arg_sz = sizeof(struct dummy_net),
2965 		.prm = {
2966 			.ins = test_jump2_prog,
2967 			.nb_ins = RTE_DIM(test_jump2_prog),
2968 			.prog_arg = {
2969 				.type = RTE_BPF_ARG_PTR,
2970 				.size = sizeof(struct dummy_net),
2971 			},
2972 		},
2973 		.prepare = test_jump2_prepare,
2974 		.check_result = test_jump2_check,
2975 	},
2976 	{
2977 		.name = "test_alu1",
2978 		.arg_sz = sizeof(struct dummy_vect8),
2979 		.prm = {
2980 			.ins = test_alu1_prog,
2981 			.nb_ins = RTE_DIM(test_alu1_prog),
2982 			.prog_arg = {
2983 				.type = RTE_BPF_ARG_PTR,
2984 				.size = sizeof(struct dummy_vect8),
2985 			},
2986 		},
2987 		.prepare = test_jump1_prepare,
2988 		.check_result = test_alu1_check,
2989 	},
2990 	{
2991 		.name = "test_bele1",
2992 		.arg_sz = sizeof(struct dummy_vect8),
2993 		.prm = {
2994 			.ins = test_bele1_prog,
2995 			.nb_ins = RTE_DIM(test_bele1_prog),
2996 			.prog_arg = {
2997 				.type = RTE_BPF_ARG_PTR,
2998 				.size = sizeof(struct dummy_vect8),
2999 			},
3000 		},
3001 		.prepare = test_bele1_prepare,
3002 		.check_result = test_bele1_check,
3003 	},
3004 	{
3005 		.name = "test_xadd1",
3006 		.arg_sz = sizeof(struct dummy_offset),
3007 		.prm = {
3008 			.ins = test_xadd1_prog,
3009 			.nb_ins = RTE_DIM(test_xadd1_prog),
3010 			.prog_arg = {
3011 				.type = RTE_BPF_ARG_PTR,
3012 				.size = sizeof(struct dummy_offset),
3013 			},
3014 		},
3015 		.prepare = test_store1_prepare,
3016 		.check_result = test_xadd1_check,
3017 	},
3018 	{
3019 		.name = "test_div1",
3020 		.arg_sz = sizeof(struct dummy_vect8),
3021 		.prm = {
3022 			.ins = test_div1_prog,
3023 			.nb_ins = RTE_DIM(test_div1_prog),
3024 			.prog_arg = {
3025 				.type = RTE_BPF_ARG_PTR,
3026 				.size = sizeof(struct dummy_vect8),
3027 			},
3028 		},
3029 		.prepare = test_mul1_prepare,
3030 		.check_result = test_div1_check,
3031 	},
3032 	{
3033 		.name = "test_call1",
3034 		.arg_sz = sizeof(struct dummy_offset),
3035 		.prm = {
3036 			.ins = test_call1_prog,
3037 			.nb_ins = RTE_DIM(test_call1_prog),
3038 			.prog_arg = {
3039 				.type = RTE_BPF_ARG_PTR,
3040 				.size = sizeof(struct dummy_offset),
3041 			},
3042 			.xsym = test_call1_xsym,
3043 			.nb_xsym = RTE_DIM(test_call1_xsym),
3044 		},
3045 		.prepare = test_load1_prepare,
3046 		.check_result = test_call1_check,
3047 		/* for now don't support function calls on 32 bit platform */
3048 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3049 	},
3050 	{
3051 		.name = "test_call2",
3052 		.arg_sz = sizeof(struct dummy_offset),
3053 		.prm = {
3054 			.ins = test_call2_prog,
3055 			.nb_ins = RTE_DIM(test_call2_prog),
3056 			.prog_arg = {
3057 				.type = RTE_BPF_ARG_PTR,
3058 				.size = sizeof(struct dummy_offset),
3059 			},
3060 			.xsym = test_call2_xsym,
3061 			.nb_xsym = RTE_DIM(test_call2_xsym),
3062 		},
3063 		.prepare = test_store1_prepare,
3064 		.check_result = test_call2_check,
3065 		/* for now don't support function calls on 32 bit platform */
3066 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3067 	},
3068 	{
3069 		.name = "test_call3",
3070 		.arg_sz = sizeof(struct dummy_vect8),
3071 		.prm = {
3072 			.ins = test_call3_prog,
3073 			.nb_ins = RTE_DIM(test_call3_prog),
3074 			.prog_arg = {
3075 				.type = RTE_BPF_ARG_PTR,
3076 				.size = sizeof(struct dummy_vect8),
3077 			},
3078 			.xsym = test_call3_xsym,
3079 			.nb_xsym = RTE_DIM(test_call3_xsym),
3080 		},
3081 		.prepare = test_call3_prepare,
3082 		.check_result = test_call3_check,
3083 		/* for now don't support function calls on 32 bit platform */
3084 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3085 	},
3086 	{
3087 		.name = "test_call4",
3088 		.arg_sz = sizeof(struct dummy_offset),
3089 		.prm = {
3090 			.ins = test_call4_prog,
3091 			.nb_ins = RTE_DIM(test_call4_prog),
3092 			.prog_arg = {
3093 				.type = RTE_BPF_ARG_PTR,
3094 				.size = 2 * sizeof(struct dummy_offset),
3095 			},
3096 			.xsym = test_call4_xsym,
3097 			.nb_xsym = RTE_DIM(test_call4_xsym),
3098 		},
3099 		.prepare = test_store1_prepare,
3100 		.check_result = test_call4_check,
3101 		/* for now don't support function calls on 32 bit platform */
3102 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3103 	},
3104 	{
3105 		.name = "test_call5",
3106 		.arg_sz = sizeof(struct dummy_offset),
3107 		.prm = {
3108 			.ins = test_call5_prog,
3109 			.nb_ins = RTE_DIM(test_call5_prog),
3110 			.prog_arg = {
3111 				.type = RTE_BPF_ARG_PTR,
3112 				.size = sizeof(struct dummy_offset),
3113 			},
3114 			.xsym = test_call5_xsym,
3115 			.nb_xsym = RTE_DIM(test_call5_xsym),
3116 		},
3117 		.prepare = test_store1_prepare,
3118 		.check_result = test_call5_check,
3119 		/* for now don't support function calls on 32 bit platform */
3120 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3121 	},
3122 	{
3123 		.name = "test_ld_mbuf1",
3124 		.arg_sz = sizeof(struct dummy_mbuf),
3125 		.prm = {
3126 			.ins = test_ld_mbuf1_prog,
3127 			.nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3128 			.prog_arg = {
3129 				.type = RTE_BPF_ARG_PTR_MBUF,
3130 				.buf_size = sizeof(struct dummy_mbuf),
3131 			},
3132 		},
3133 		.prepare = test_ld_mbuf1_prepare,
3134 		.check_result = test_ld_mbuf1_check,
3135 		/* mbuf as input argument is not supported on 32 bit platform */
3136 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3137 	},
3138 	{
3139 		.name = "test_ld_mbuf2",
3140 		.arg_sz = sizeof(struct dummy_mbuf),
3141 		.prm = {
3142 			.ins = test_ld_mbuf1_prog,
3143 			.nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3144 			.prog_arg = {
3145 				.type = RTE_BPF_ARG_PTR_MBUF,
3146 				.buf_size = sizeof(struct dummy_mbuf),
3147 			},
3148 		},
3149 		.prepare = test_ld_mbuf2_prepare,
3150 		.check_result = test_ld_mbuf2_check,
3151 		/* mbuf as input argument is not supported on 32 bit platform */
3152 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3153 	},
3154 	{
3155 		.name = "test_ld_mbuf3",
3156 		.arg_sz = sizeof(struct dummy_mbuf),
3157 		.prm = {
3158 			.ins = test_ld_mbuf3_prog,
3159 			.nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3160 			.prog_arg = {
3161 				.type = RTE_BPF_ARG_PTR_MBUF,
3162 				.buf_size = sizeof(struct dummy_mbuf),
3163 			},
3164 		},
3165 		.prepare = test_ld_mbuf1_prepare,
3166 		.check_result = test_ld_mbuf1_check,
3167 		/* mbuf as input argument is not supported on 32 bit platform */
3168 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3169 	},
3170 };
3171 
3172 static int
3173 run_test(const struct bpf_test *tst)
3174 {
3175 	int32_t ret, rv;
3176 	int64_t rc;
3177 	struct rte_bpf *bpf;
3178 	struct rte_bpf_jit jit;
3179 	uint8_t tbuf[tst->arg_sz];
3180 
3181 	printf("%s(%s) start\n", __func__, tst->name);
3182 
3183 	bpf = rte_bpf_load(&tst->prm);
3184 	if (bpf == NULL) {
3185 		printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3186 			__func__, __LINE__, rte_errno, strerror(rte_errno));
3187 		return -1;
3188 	}
3189 
3190 	tst->prepare(tbuf);
3191 	rc = rte_bpf_exec(bpf, tbuf);
3192 	ret = tst->check_result(rc, tbuf);
3193 	if (ret != 0) {
3194 		printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3195 			__func__, __LINE__, tst->name, ret, strerror(ret));
3196 	}
3197 
3198 	/* repeat the same test with jit, when possible */
3199 	rte_bpf_get_jit(bpf, &jit);
3200 	if (jit.func != NULL) {
3201 
3202 		tst->prepare(tbuf);
3203 		rc = jit.func(tbuf);
3204 		rv = tst->check_result(rc, tbuf);
3205 		ret |= rv;
3206 		if (rv != 0) {
3207 			printf("%s@%d: check_result(%s) failed, "
3208 				"error: %d(%s);\n",
3209 				__func__, __LINE__, tst->name,
3210 				rv, strerror(ret));
3211 		}
3212 	}
3213 
3214 	rte_bpf_destroy(bpf);
3215 	return ret;
3216 
3217 }
3218 
3219 static int
3220 test_bpf(void)
3221 {
3222 	int32_t rc, rv;
3223 	uint32_t i;
3224 
3225 	rc = 0;
3226 	for (i = 0; i != RTE_DIM(tests); i++) {
3227 		rv = run_test(tests + i);
3228 		if (tests[i].allow_fail == 0)
3229 			rc |= rv;
3230 	}
3231 
3232 	return rc;
3233 }
3234 
3235 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
3236