xref: /dpdk/app/test/test_bpf.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
16 #include <rte_bpf.h>
17 
18 #include "test.h"
19 
20 /*
21  * Basic functional tests for librte_bpf.
22  * The main procedure - load eBPF program, execute it and
23  * compare restuls with expected values.
24  */
25 
26 struct dummy_offset {
27 	uint64_t u64;
28 	uint32_t u32;
29 	uint16_t u16;
30 	uint8_t  u8;
31 };
32 
33 struct dummy_vect8 {
34 	struct dummy_offset in[8];
35 	struct dummy_offset out[8];
36 };
37 
38 #define	TEST_FILL_1	0xDEADBEEF
39 
40 #define	TEST_MUL_1	21
41 #define TEST_MUL_2	-100
42 
43 #define TEST_SHIFT_1	15
44 #define TEST_SHIFT_2	33
45 
46 #define TEST_JCC_1	0
47 #define TEST_JCC_2	-123
48 #define TEST_JCC_3	5678
49 #define TEST_JCC_4	TEST_FILL_1
50 
51 #define TEST_IMM_1	UINT64_MAX
52 #define TEST_IMM_2	((uint64_t)INT64_MIN)
53 #define TEST_IMM_3	((uint64_t)INT64_MAX + INT32_MAX)
54 #define TEST_IMM_4	((uint64_t)UINT32_MAX)
55 #define TEST_IMM_5	((uint64_t)UINT32_MAX + 1)
56 
57 struct bpf_test {
58 	const char *name;
59 	size_t arg_sz;
60 	struct rte_bpf_prm prm;
61 	void (*prepare)(void *);
62 	int (*check_result)(uint64_t, const void *);
63 	uint32_t allow_fail;
64 };
65 
66 /*
67  * Compare return value and result data with expected ones.
68  * Report a failure if they don't match.
69  */
70 static int
71 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
72 	const void *exp_res, const void *ret_res, size_t res_sz)
73 {
74 	int32_t ret;
75 
76 	ret = 0;
77 	if (exp_rc != ret_rc) {
78 		printf("%s@%d: invalid return value, expected: 0x%" PRIx64
79 			",result: 0x%" PRIx64 "\n",
80 			func, __LINE__, exp_rc, ret_rc);
81 		ret |= -1;
82 	}
83 
84 	if (memcmp(exp_res, ret_res, res_sz) != 0) {
85 		printf("%s: invalid value\n", func);
86 		rte_memdump(stdout, "expected", exp_res, res_sz);
87 		rte_memdump(stdout, "result", ret_res, res_sz);
88 		ret |= -1;
89 	}
90 
91 	return ret;
92 }
93 
94 /* store immediate test-cases */
95 static const struct ebpf_insn test_store1_prog[] = {
96 	{
97 		.code = (BPF_ST | BPF_MEM | BPF_B),
98 		.dst_reg = EBPF_REG_1,
99 		.off = offsetof(struct dummy_offset, u8),
100 		.imm = TEST_FILL_1,
101 	},
102 	{
103 		.code = (BPF_ST | BPF_MEM | BPF_H),
104 		.dst_reg = EBPF_REG_1,
105 		.off = offsetof(struct dummy_offset, u16),
106 		.imm = TEST_FILL_1,
107 	},
108 	{
109 		.code = (BPF_ST | BPF_MEM | BPF_W),
110 		.dst_reg = EBPF_REG_1,
111 		.off = offsetof(struct dummy_offset, u32),
112 		.imm = TEST_FILL_1,
113 	},
114 	{
115 		.code = (BPF_ST | BPF_MEM | EBPF_DW),
116 		.dst_reg = EBPF_REG_1,
117 		.off = offsetof(struct dummy_offset, u64),
118 		.imm = TEST_FILL_1,
119 	},
120 	/* return 1 */
121 	{
122 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
123 		.dst_reg = EBPF_REG_0,
124 		.imm = 1,
125 	},
126 	{
127 		.code = (BPF_JMP | EBPF_EXIT),
128 	},
129 };
130 
131 static void
132 test_store1_prepare(void *arg)
133 {
134 	struct dummy_offset *df;
135 
136 	df = arg;
137 	memset(df, 0, sizeof(*df));
138 }
139 
140 static int
141 test_store1_check(uint64_t rc, const void *arg)
142 {
143 	const struct dummy_offset *dft;
144 	struct dummy_offset dfe;
145 
146 	dft = arg;
147 
148 	memset(&dfe, 0, sizeof(dfe));
149 	dfe.u64 = (int32_t)TEST_FILL_1;
150 	dfe.u32 = dfe.u64;
151 	dfe.u16 = dfe.u64;
152 	dfe.u8 = dfe.u64;
153 
154 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
155 }
156 
157 /* store register test-cases */
158 static const struct ebpf_insn test_store2_prog[] = {
159 
160 	{
161 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
162 		.dst_reg = EBPF_REG_2,
163 		.imm = TEST_FILL_1,
164 	},
165 	{
166 		.code = (BPF_STX | BPF_MEM | BPF_B),
167 		.dst_reg = EBPF_REG_1,
168 		.src_reg = EBPF_REG_2,
169 		.off = offsetof(struct dummy_offset, u8),
170 	},
171 	{
172 		.code = (BPF_STX | BPF_MEM | BPF_H),
173 		.dst_reg = EBPF_REG_1,
174 		.src_reg = EBPF_REG_2,
175 		.off = offsetof(struct dummy_offset, u16),
176 	},
177 	{
178 		.code = (BPF_STX | BPF_MEM | BPF_W),
179 		.dst_reg = EBPF_REG_1,
180 		.src_reg = EBPF_REG_2,
181 		.off = offsetof(struct dummy_offset, u32),
182 	},
183 	{
184 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
185 		.dst_reg = EBPF_REG_1,
186 		.src_reg = EBPF_REG_2,
187 		.off = offsetof(struct dummy_offset, u64),
188 	},
189 	/* return 1 */
190 	{
191 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
192 		.dst_reg = EBPF_REG_0,
193 		.imm = 1,
194 	},
195 	{
196 		.code = (BPF_JMP | EBPF_EXIT),
197 	},
198 };
199 
200 /* load test-cases */
201 static const struct ebpf_insn test_load1_prog[] = {
202 
203 	{
204 		.code = (BPF_LDX | BPF_MEM | BPF_B),
205 		.dst_reg = EBPF_REG_2,
206 		.src_reg = EBPF_REG_1,
207 		.off = offsetof(struct dummy_offset, u8),
208 	},
209 	{
210 		.code = (BPF_LDX | BPF_MEM | BPF_H),
211 		.dst_reg = EBPF_REG_3,
212 		.src_reg = EBPF_REG_1,
213 		.off = offsetof(struct dummy_offset, u16),
214 	},
215 	{
216 		.code = (BPF_LDX | BPF_MEM | BPF_W),
217 		.dst_reg = EBPF_REG_4,
218 		.src_reg = EBPF_REG_1,
219 		.off = offsetof(struct dummy_offset, u32),
220 	},
221 	{
222 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
223 		.dst_reg = EBPF_REG_0,
224 		.src_reg = EBPF_REG_1,
225 		.off = offsetof(struct dummy_offset, u64),
226 	},
227 	/* return sum */
228 	{
229 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
230 		.dst_reg = EBPF_REG_0,
231 		.src_reg = EBPF_REG_4,
232 	},
233 	{
234 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
235 		.dst_reg = EBPF_REG_0,
236 		.src_reg = EBPF_REG_3,
237 	},
238 	{
239 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
240 		.dst_reg = EBPF_REG_0,
241 		.src_reg = EBPF_REG_2,
242 	},
243 	{
244 		.code = (BPF_JMP | EBPF_EXIT),
245 	},
246 };
247 
248 static void
249 test_load1_prepare(void *arg)
250 {
251 	struct dummy_offset *df;
252 
253 	df = arg;
254 
255 	memset(df, 0, sizeof(*df));
256 	df->u64 = (int32_t)TEST_FILL_1;
257 	df->u32 = df->u64;
258 	df->u16 = df->u64;
259 	df->u8 = df->u64;
260 }
261 
262 static int
263 test_load1_check(uint64_t rc, const void *arg)
264 {
265 	uint64_t v;
266 	const struct dummy_offset *dft;
267 
268 	dft = arg;
269 	v = dft->u64;
270 	v += dft->u32;
271 	v += dft->u16;
272 	v += dft->u8;
273 
274 	return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
275 }
276 
277 /* load immediate test-cases */
278 static const struct ebpf_insn test_ldimm1_prog[] = {
279 
280 	{
281 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
282 		.dst_reg = EBPF_REG_0,
283 		.imm = (uint32_t)TEST_IMM_1,
284 	},
285 	{
286 		.imm = TEST_IMM_1 >> 32,
287 	},
288 	{
289 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
290 		.dst_reg = EBPF_REG_3,
291 		.imm = (uint32_t)TEST_IMM_2,
292 	},
293 	{
294 		.imm = TEST_IMM_2 >> 32,
295 	},
296 	{
297 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
298 		.dst_reg = EBPF_REG_5,
299 		.imm = (uint32_t)TEST_IMM_3,
300 	},
301 	{
302 		.imm = TEST_IMM_3 >> 32,
303 	},
304 	{
305 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
306 		.dst_reg = EBPF_REG_7,
307 		.imm = (uint32_t)TEST_IMM_4,
308 	},
309 	{
310 		.imm = TEST_IMM_4 >> 32,
311 	},
312 	{
313 		.code = (BPF_LD | BPF_IMM | EBPF_DW),
314 		.dst_reg = EBPF_REG_9,
315 		.imm = (uint32_t)TEST_IMM_5,
316 	},
317 	{
318 		.imm = TEST_IMM_5 >> 32,
319 	},
320 	/* return sum */
321 	{
322 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
323 		.dst_reg = EBPF_REG_0,
324 		.src_reg = EBPF_REG_3,
325 	},
326 	{
327 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
328 		.dst_reg = EBPF_REG_0,
329 		.src_reg = EBPF_REG_5,
330 	},
331 	{
332 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
333 		.dst_reg = EBPF_REG_0,
334 		.src_reg = EBPF_REG_7,
335 	},
336 	{
337 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
338 		.dst_reg = EBPF_REG_0,
339 		.src_reg = EBPF_REG_9,
340 	},
341 	{
342 		.code = (BPF_JMP | EBPF_EXIT),
343 	},
344 };
345 
346 static int
347 test_ldimm1_check(uint64_t rc, const void *arg)
348 {
349 	uint64_t v1, v2;
350 
351 	v1 = TEST_IMM_1;
352 	v2 = TEST_IMM_2;
353 	v1 += v2;
354 	v2 = TEST_IMM_3;
355 	v1 += v2;
356 	v2 = TEST_IMM_4;
357 	v1 += v2;
358 	v2 = TEST_IMM_5;
359 	v1 += v2;
360 
361 	return cmp_res(__func__, v1, rc, arg, arg, 0);
362 }
363 
364 
365 /* alu mul test-cases */
366 static const struct ebpf_insn test_mul1_prog[] = {
367 
368 	{
369 		.code = (BPF_LDX | BPF_MEM | BPF_W),
370 		.dst_reg = EBPF_REG_2,
371 		.src_reg = EBPF_REG_1,
372 		.off = offsetof(struct dummy_vect8, in[0].u32),
373 	},
374 	{
375 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
376 		.dst_reg = EBPF_REG_3,
377 		.src_reg = EBPF_REG_1,
378 		.off = offsetof(struct dummy_vect8, in[1].u64),
379 	},
380 	{
381 		.code = (BPF_LDX | BPF_MEM | BPF_W),
382 		.dst_reg = EBPF_REG_4,
383 		.src_reg = EBPF_REG_1,
384 		.off = offsetof(struct dummy_vect8, in[2].u32),
385 	},
386 	{
387 		.code = (BPF_ALU | BPF_MUL | BPF_K),
388 		.dst_reg = EBPF_REG_2,
389 		.imm = TEST_MUL_1,
390 	},
391 	{
392 		.code = (EBPF_ALU64 | BPF_MUL | BPF_K),
393 		.dst_reg = EBPF_REG_3,
394 		.imm = TEST_MUL_2,
395 	},
396 	{
397 		.code = (BPF_ALU | BPF_MUL | BPF_X),
398 		.dst_reg = EBPF_REG_4,
399 		.src_reg = EBPF_REG_2,
400 	},
401 	{
402 		.code = (EBPF_ALU64 | BPF_MUL | BPF_X),
403 		.dst_reg = EBPF_REG_4,
404 		.src_reg = EBPF_REG_3,
405 	},
406 	{
407 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
408 		.dst_reg = EBPF_REG_1,
409 		.src_reg = EBPF_REG_2,
410 		.off = offsetof(struct dummy_vect8, out[0].u64),
411 	},
412 	{
413 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
414 		.dst_reg = EBPF_REG_1,
415 		.src_reg = EBPF_REG_3,
416 		.off = offsetof(struct dummy_vect8, out[1].u64),
417 	},
418 	{
419 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
420 		.dst_reg = EBPF_REG_1,
421 		.src_reg = EBPF_REG_4,
422 		.off = offsetof(struct dummy_vect8, out[2].u64),
423 	},
424 	/* return 1 */
425 	{
426 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
427 		.dst_reg = EBPF_REG_0,
428 		.imm = 1,
429 	},
430 	{
431 		.code = (BPF_JMP | EBPF_EXIT),
432 	},
433 };
434 
435 static void
436 test_mul1_prepare(void *arg)
437 {
438 	struct dummy_vect8 *dv;
439 	uint64_t v;
440 
441 	dv = arg;
442 
443 	v = rte_rand();
444 
445 	memset(dv, 0, sizeof(*dv));
446 	dv->in[0].u32 = v;
447 	dv->in[1].u64 = v << 12 | v >> 6;
448 	dv->in[2].u32 = -v;
449 }
450 
451 static int
452 test_mul1_check(uint64_t rc, const void *arg)
453 {
454 	uint64_t r2, r3, r4;
455 	const struct dummy_vect8 *dvt;
456 	struct dummy_vect8 dve;
457 
458 	dvt = arg;
459 	memset(&dve, 0, sizeof(dve));
460 
461 	r2 = dvt->in[0].u32;
462 	r3 = dvt->in[1].u64;
463 	r4 = dvt->in[2].u32;
464 
465 	r2 = (uint32_t)r2 * TEST_MUL_1;
466 	r3 *= TEST_MUL_2;
467 	r4 = (uint32_t)(r4 * r2);
468 	r4 *= r3;
469 
470 	dve.out[0].u64 = r2;
471 	dve.out[1].u64 = r3;
472 	dve.out[2].u64 = r4;
473 
474 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
475 }
476 
477 /* alu shift test-cases */
478 static const struct ebpf_insn test_shift1_prog[] = {
479 
480 	{
481 		.code = (BPF_LDX | BPF_MEM | BPF_W),
482 		.dst_reg = EBPF_REG_2,
483 		.src_reg = EBPF_REG_1,
484 		.off = offsetof(struct dummy_vect8, in[0].u32),
485 	},
486 	{
487 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
488 		.dst_reg = EBPF_REG_3,
489 		.src_reg = EBPF_REG_1,
490 		.off = offsetof(struct dummy_vect8, in[1].u64),
491 	},
492 	{
493 		.code = (BPF_LDX | BPF_MEM | BPF_W),
494 		.dst_reg = EBPF_REG_4,
495 		.src_reg = EBPF_REG_1,
496 		.off = offsetof(struct dummy_vect8, in[2].u32),
497 	},
498 	{
499 		.code = (BPF_ALU | BPF_LSH | BPF_K),
500 		.dst_reg = EBPF_REG_2,
501 		.imm = TEST_SHIFT_1,
502 	},
503 	{
504 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
505 		.dst_reg = EBPF_REG_3,
506 		.imm = TEST_SHIFT_2,
507 	},
508 	{
509 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
510 		.dst_reg = EBPF_REG_1,
511 		.src_reg = EBPF_REG_2,
512 		.off = offsetof(struct dummy_vect8, out[0].u64),
513 	},
514 	{
515 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
516 		.dst_reg = EBPF_REG_1,
517 		.src_reg = EBPF_REG_3,
518 		.off = offsetof(struct dummy_vect8, out[1].u64),
519 	},
520 	{
521 		.code = (BPF_ALU | BPF_RSH | BPF_X),
522 		.dst_reg = EBPF_REG_2,
523 		.src_reg = EBPF_REG_4,
524 	},
525 	{
526 		.code = (EBPF_ALU64 | BPF_LSH | BPF_X),
527 		.dst_reg = EBPF_REG_3,
528 		.src_reg = EBPF_REG_4,
529 	},
530 	{
531 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
532 		.dst_reg = EBPF_REG_1,
533 		.src_reg = EBPF_REG_2,
534 		.off = offsetof(struct dummy_vect8, out[2].u64),
535 	},
536 	{
537 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
538 		.dst_reg = EBPF_REG_1,
539 		.src_reg = EBPF_REG_3,
540 		.off = offsetof(struct dummy_vect8, out[3].u64),
541 	},
542 	{
543 		.code = (BPF_LDX | BPF_MEM | BPF_W),
544 		.dst_reg = EBPF_REG_2,
545 		.src_reg = EBPF_REG_1,
546 		.off = offsetof(struct dummy_vect8, in[0].u32),
547 	},
548 	{
549 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
550 		.dst_reg = EBPF_REG_3,
551 		.src_reg = EBPF_REG_1,
552 		.off = offsetof(struct dummy_vect8, in[1].u64),
553 	},
554 	{
555 		.code = (BPF_LDX | BPF_MEM | BPF_W),
556 		.dst_reg = EBPF_REG_4,
557 		.src_reg = EBPF_REG_1,
558 		.off = offsetof(struct dummy_vect8, in[2].u32),
559 	},
560 	{
561 		.code = (BPF_ALU | BPF_AND | BPF_K),
562 		.dst_reg = EBPF_REG_2,
563 		.imm = sizeof(uint64_t) * CHAR_BIT - 1,
564 	},
565 	{
566 		.code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
567 		.dst_reg = EBPF_REG_3,
568 		.src_reg = EBPF_REG_2,
569 	},
570 	{
571 		.code = (BPF_ALU | BPF_AND | BPF_K),
572 		.dst_reg = EBPF_REG_2,
573 		.imm = sizeof(uint32_t) * CHAR_BIT - 1,
574 	},
575 	{
576 		.code = (BPF_ALU | BPF_LSH | BPF_X),
577 		.dst_reg = EBPF_REG_4,
578 		.src_reg = EBPF_REG_2,
579 	},
580 	{
581 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
582 		.dst_reg = EBPF_REG_1,
583 		.src_reg = EBPF_REG_4,
584 		.off = offsetof(struct dummy_vect8, out[4].u64),
585 	},
586 	{
587 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
588 		.dst_reg = EBPF_REG_1,
589 		.src_reg = EBPF_REG_3,
590 		.off = offsetof(struct dummy_vect8, out[5].u64),
591 	},
592 	/* return 1 */
593 	{
594 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
595 		.dst_reg = EBPF_REG_0,
596 		.imm = 1,
597 	},
598 	{
599 		.code = (BPF_JMP | EBPF_EXIT),
600 	},
601 };
602 
603 static void
604 test_shift1_prepare(void *arg)
605 {
606 	struct dummy_vect8 *dv;
607 	uint64_t v;
608 
609 	dv = arg;
610 
611 	v = rte_rand();
612 
613 	memset(dv, 0, sizeof(*dv));
614 	dv->in[0].u32 = v;
615 	dv->in[1].u64 = v << 12 | v >> 6;
616 	dv->in[2].u32 = (-v ^ 5);
617 }
618 
619 static int
620 test_shift1_check(uint64_t rc, const void *arg)
621 {
622 	uint64_t r2, r3, r4;
623 	const struct dummy_vect8 *dvt;
624 	struct dummy_vect8 dve;
625 
626 	dvt = arg;
627 	memset(&dve, 0, sizeof(dve));
628 
629 	r2 = dvt->in[0].u32;
630 	r3 = dvt->in[1].u64;
631 	r4 = dvt->in[2].u32;
632 
633 	r2 = (uint32_t)r2 << TEST_SHIFT_1;
634 	r3 = (int64_t)r3 >> TEST_SHIFT_2;
635 
636 	dve.out[0].u64 = r2;
637 	dve.out[1].u64 = r3;
638 
639 	r2 = (uint32_t)r2 >> r4;
640 	r3 <<= r4;
641 
642 	dve.out[2].u64 = r2;
643 	dve.out[3].u64 = r3;
644 
645 	r2 = dvt->in[0].u32;
646 	r3 = dvt->in[1].u64;
647 	r4 = dvt->in[2].u32;
648 
649 	r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
650 	r3 = (int64_t)r3 >> r2;
651 	r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
652 	r4 = (uint32_t)r4 << r2;
653 
654 	dve.out[4].u64 = r4;
655 	dve.out[5].u64 = r3;
656 
657 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
658 }
659 
660 /* jmp test-cases */
661 static const struct ebpf_insn test_jump1_prog[] = {
662 
663 	[0] = {
664 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
665 		.dst_reg = EBPF_REG_0,
666 		.imm = 0,
667 	},
668 	[1] = {
669 		.code = (BPF_LDX | BPF_MEM | BPF_W),
670 		.dst_reg = EBPF_REG_2,
671 		.src_reg = EBPF_REG_1,
672 		.off = offsetof(struct dummy_vect8, in[0].u32),
673 	},
674 	[2] = {
675 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
676 		.dst_reg = EBPF_REG_3,
677 		.src_reg = EBPF_REG_1,
678 		.off = offsetof(struct dummy_vect8, in[0].u64),
679 	},
680 	[3] = {
681 		.code = (BPF_LDX | BPF_MEM | BPF_W),
682 		.dst_reg = EBPF_REG_4,
683 		.src_reg = EBPF_REG_1,
684 		.off = offsetof(struct dummy_vect8, in[1].u32),
685 	},
686 	[4] = {
687 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
688 		.dst_reg = EBPF_REG_5,
689 		.src_reg = EBPF_REG_1,
690 		.off = offsetof(struct dummy_vect8, in[1].u64),
691 	},
692 	[5] = {
693 		.code = (BPF_JMP | BPF_JEQ | BPF_K),
694 		.dst_reg = EBPF_REG_2,
695 		.imm = TEST_JCC_1,
696 		.off = 8,
697 	},
698 	[6] = {
699 		.code = (BPF_JMP | EBPF_JSLE | BPF_K),
700 		.dst_reg = EBPF_REG_3,
701 		.imm = TEST_JCC_2,
702 		.off = 9,
703 	},
704 	[7] = {
705 		.code = (BPF_JMP | BPF_JGT | BPF_K),
706 		.dst_reg = EBPF_REG_4,
707 		.imm = TEST_JCC_3,
708 		.off = 10,
709 	},
710 	[8] = {
711 		.code = (BPF_JMP | BPF_JSET | BPF_K),
712 		.dst_reg = EBPF_REG_5,
713 		.imm = TEST_JCC_4,
714 		.off = 11,
715 	},
716 	[9] = {
717 		.code = (BPF_JMP | EBPF_JNE | BPF_X),
718 		.dst_reg = EBPF_REG_2,
719 		.src_reg = EBPF_REG_3,
720 		.off = 12,
721 	},
722 	[10] = {
723 		.code = (BPF_JMP | EBPF_JSGT | BPF_X),
724 		.dst_reg = EBPF_REG_2,
725 		.src_reg = EBPF_REG_4,
726 		.off = 13,
727 	},
728 	[11] = {
729 		.code = (BPF_JMP | EBPF_JLE | BPF_X),
730 		.dst_reg = EBPF_REG_2,
731 		.src_reg = EBPF_REG_5,
732 		.off = 14,
733 	},
734 	[12] = {
735 		.code = (BPF_JMP | BPF_JSET | BPF_X),
736 		.dst_reg = EBPF_REG_3,
737 		.src_reg = EBPF_REG_5,
738 		.off = 15,
739 	},
740 	[13] = {
741 		.code = (BPF_JMP | EBPF_EXIT),
742 	},
743 	[14] = {
744 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
745 		.dst_reg = EBPF_REG_0,
746 		.imm = 0x1,
747 	},
748 	[15] = {
749 		.code = (BPF_JMP | BPF_JA),
750 		.off = -10,
751 	},
752 	[16] = {
753 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
754 		.dst_reg = EBPF_REG_0,
755 		.imm = 0x2,
756 	},
757 	[17] = {
758 		.code = (BPF_JMP | BPF_JA),
759 		.off = -11,
760 	},
761 	[18] = {
762 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
763 		.dst_reg = EBPF_REG_0,
764 		.imm = 0x4,
765 	},
766 	[19] = {
767 		.code = (BPF_JMP | BPF_JA),
768 		.off = -12,
769 	},
770 	[20] = {
771 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
772 		.dst_reg = EBPF_REG_0,
773 		.imm = 0x8,
774 	},
775 	[21] = {
776 		.code = (BPF_JMP | BPF_JA),
777 		.off = -13,
778 	},
779 	[22] = {
780 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
781 		.dst_reg = EBPF_REG_0,
782 		.imm = 0x10,
783 	},
784 	[23] = {
785 		.code = (BPF_JMP | BPF_JA),
786 		.off = -14,
787 	},
788 	[24] = {
789 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
790 		.dst_reg = EBPF_REG_0,
791 		.imm = 0x20,
792 	},
793 	[25] = {
794 		.code = (BPF_JMP | BPF_JA),
795 		.off = -15,
796 	},
797 	[26] = {
798 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
799 		.dst_reg = EBPF_REG_0,
800 		.imm = 0x40,
801 	},
802 	[27] = {
803 		.code = (BPF_JMP | BPF_JA),
804 		.off = -16,
805 	},
806 	[28] = {
807 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
808 		.dst_reg = EBPF_REG_0,
809 		.imm = 0x80,
810 	},
811 	[29] = {
812 		.code = (BPF_JMP | BPF_JA),
813 		.off = -17,
814 	},
815 };
816 
817 static void
818 test_jump1_prepare(void *arg)
819 {
820 	struct dummy_vect8 *dv;
821 	uint64_t v1, v2;
822 
823 	dv = arg;
824 
825 	v1 = rte_rand();
826 	v2 = rte_rand();
827 
828 	memset(dv, 0, sizeof(*dv));
829 	dv->in[0].u64 = v1;
830 	dv->in[1].u64 = v2;
831 	dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
832 	dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
833 }
834 
835 static int
836 test_jump1_check(uint64_t rc, const void *arg)
837 {
838 	uint64_t r2, r3, r4, r5, rv;
839 	const struct dummy_vect8 *dvt;
840 
841 	dvt = arg;
842 
843 	rv = 0;
844 	r2 = dvt->in[0].u32;
845 	r3 = dvt->in[0].u64;
846 	r4 = dvt->in[1].u32;
847 	r5 = dvt->in[1].u64;
848 
849 	if (r2 == TEST_JCC_1)
850 		rv |= 0x1;
851 	if ((int64_t)r3 <= TEST_JCC_2)
852 		rv |= 0x2;
853 	if (r4 > TEST_JCC_3)
854 		rv |= 0x4;
855 	if (r5 & TEST_JCC_4)
856 		rv |= 0x8;
857 	if (r2 != r3)
858 		rv |= 0x10;
859 	if ((int64_t)r2 > (int64_t)r4)
860 		rv |= 0x20;
861 	if (r2 <= r5)
862 		rv |= 0x40;
863 	if (r3 & r5)
864 		rv |= 0x80;
865 
866 	return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
867 }
868 
869 /* alu (add, sub, and, or, xor, neg)  test-cases */
870 static const struct ebpf_insn test_alu1_prog[] = {
871 
872 	{
873 		.code = (BPF_LDX | BPF_MEM | BPF_W),
874 		.dst_reg = EBPF_REG_2,
875 		.src_reg = EBPF_REG_1,
876 		.off = offsetof(struct dummy_vect8, in[0].u32),
877 	},
878 	{
879 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
880 		.dst_reg = EBPF_REG_3,
881 		.src_reg = EBPF_REG_1,
882 		.off = offsetof(struct dummy_vect8, in[0].u64),
883 	},
884 	{
885 		.code = (BPF_LDX | BPF_MEM | BPF_W),
886 		.dst_reg = EBPF_REG_4,
887 		.src_reg = EBPF_REG_1,
888 		.off = offsetof(struct dummy_vect8, in[1].u32),
889 	},
890 	{
891 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
892 		.dst_reg = EBPF_REG_5,
893 		.src_reg = EBPF_REG_1,
894 		.off = offsetof(struct dummy_vect8, in[1].u64),
895 	},
896 	{
897 		.code = (BPF_ALU | BPF_AND | BPF_K),
898 		.dst_reg = EBPF_REG_2,
899 		.imm = TEST_FILL_1,
900 	},
901 	{
902 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
903 		.dst_reg = EBPF_REG_3,
904 		.imm = TEST_FILL_1,
905 	},
906 	{
907 		.code = (BPF_ALU | BPF_XOR | BPF_K),
908 		.dst_reg = EBPF_REG_4,
909 		.imm = TEST_FILL_1,
910 	},
911 	{
912 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
913 		.dst_reg = EBPF_REG_5,
914 		.imm = TEST_FILL_1,
915 	},
916 	{
917 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
918 		.dst_reg = EBPF_REG_1,
919 		.src_reg = EBPF_REG_2,
920 		.off = offsetof(struct dummy_vect8, out[0].u64),
921 	},
922 	{
923 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
924 		.dst_reg = EBPF_REG_1,
925 		.src_reg = EBPF_REG_3,
926 		.off = offsetof(struct dummy_vect8, out[1].u64),
927 	},
928 	{
929 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
930 		.dst_reg = EBPF_REG_1,
931 		.src_reg = EBPF_REG_4,
932 		.off = offsetof(struct dummy_vect8, out[2].u64),
933 	},
934 	{
935 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
936 		.dst_reg = EBPF_REG_1,
937 		.src_reg = EBPF_REG_5,
938 		.off = offsetof(struct dummy_vect8, out[3].u64),
939 	},
940 	{
941 		.code = (BPF_ALU | BPF_OR | BPF_X),
942 		.dst_reg = EBPF_REG_2,
943 		.src_reg = EBPF_REG_3,
944 	},
945 	{
946 		.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
947 		.dst_reg = EBPF_REG_3,
948 		.src_reg = EBPF_REG_4,
949 	},
950 	{
951 		.code = (BPF_ALU | BPF_SUB | BPF_X),
952 		.dst_reg = EBPF_REG_4,
953 		.src_reg = EBPF_REG_5,
954 	},
955 	{
956 		.code = (EBPF_ALU64 | BPF_AND | BPF_X),
957 		.dst_reg = EBPF_REG_5,
958 		.src_reg = EBPF_REG_2,
959 	},
960 	{
961 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
962 		.dst_reg = EBPF_REG_1,
963 		.src_reg = EBPF_REG_2,
964 		.off = offsetof(struct dummy_vect8, out[4].u64),
965 	},
966 	{
967 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
968 		.dst_reg = EBPF_REG_1,
969 		.src_reg = EBPF_REG_3,
970 		.off = offsetof(struct dummy_vect8, out[5].u64),
971 	},
972 	{
973 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
974 		.dst_reg = EBPF_REG_1,
975 		.src_reg = EBPF_REG_4,
976 		.off = offsetof(struct dummy_vect8, out[6].u64),
977 	},
978 	{
979 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
980 		.dst_reg = EBPF_REG_1,
981 		.src_reg = EBPF_REG_5,
982 		.off = offsetof(struct dummy_vect8, out[7].u64),
983 	},
984 	/* return (-r2 + (-r3)) */
985 	{
986 		.code = (BPF_ALU | BPF_NEG),
987 		.dst_reg = EBPF_REG_2,
988 	},
989 	{
990 		.code = (EBPF_ALU64 | BPF_NEG),
991 		.dst_reg = EBPF_REG_3,
992 	},
993 	{
994 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
995 		.dst_reg = EBPF_REG_2,
996 		.src_reg = EBPF_REG_3,
997 	},
998 	{
999 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1000 		.dst_reg = EBPF_REG_0,
1001 		.src_reg = EBPF_REG_2,
1002 	},
1003 	{
1004 		.code = (BPF_JMP | EBPF_EXIT),
1005 	},
1006 };
1007 
1008 static int
1009 test_alu1_check(uint64_t rc, const void *arg)
1010 {
1011 	uint64_t r2, r3, r4, r5, rv;
1012 	const struct dummy_vect8 *dvt;
1013 	struct dummy_vect8 dve;
1014 
1015 	dvt = arg;
1016 	memset(&dve, 0, sizeof(dve));
1017 
1018 	r2 = dvt->in[0].u32;
1019 	r3 = dvt->in[0].u64;
1020 	r4 = dvt->in[1].u32;
1021 	r5 = dvt->in[1].u64;
1022 
1023 	r2 = (uint32_t)r2 & TEST_FILL_1;
1024 	r3 |= (int32_t) TEST_FILL_1;
1025 	r4 = (uint32_t)r4 ^ TEST_FILL_1;
1026 	r5 += (int32_t)TEST_FILL_1;
1027 
1028 	dve.out[0].u64 = r2;
1029 	dve.out[1].u64 = r3;
1030 	dve.out[2].u64 = r4;
1031 	dve.out[3].u64 = r5;
1032 
1033 	r2 = (uint32_t)r2 | (uint32_t)r3;
1034 	r3 ^= r4;
1035 	r4 = (uint32_t)r4 - (uint32_t)r5;
1036 	r5 &= r2;
1037 
1038 	dve.out[4].u64 = r2;
1039 	dve.out[5].u64 = r3;
1040 	dve.out[6].u64 = r4;
1041 	dve.out[7].u64 = r5;
1042 
1043 	r2 = -(int32_t)r2;
1044 	rv = (uint32_t)r2;
1045 	r3 = -r3;
1046 	rv += r3;
1047 
1048 	return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1049 }
1050 
1051 /* endianness conversions (BE->LE/LE->BE)  test-cases */
1052 static const struct ebpf_insn test_bele1_prog[] = {
1053 
1054 	{
1055 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1056 		.dst_reg = EBPF_REG_2,
1057 		.src_reg = EBPF_REG_1,
1058 		.off = offsetof(struct dummy_vect8, in[0].u16),
1059 	},
1060 	{
1061 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1062 		.dst_reg = EBPF_REG_3,
1063 		.src_reg = EBPF_REG_1,
1064 		.off = offsetof(struct dummy_vect8, in[0].u32),
1065 	},
1066 	{
1067 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1068 		.dst_reg = EBPF_REG_4,
1069 		.src_reg = EBPF_REG_1,
1070 		.off = offsetof(struct dummy_vect8, in[0].u64),
1071 	},
1072 	{
1073 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1074 		.dst_reg = EBPF_REG_2,
1075 		.imm = sizeof(uint16_t) * CHAR_BIT,
1076 	},
1077 	{
1078 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1079 		.dst_reg = EBPF_REG_3,
1080 		.imm = sizeof(uint32_t) * CHAR_BIT,
1081 	},
1082 	{
1083 		.code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1084 		.dst_reg = EBPF_REG_4,
1085 		.imm = sizeof(uint64_t) * CHAR_BIT,
1086 	},
1087 	{
1088 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1089 		.dst_reg = EBPF_REG_1,
1090 		.src_reg = EBPF_REG_2,
1091 		.off = offsetof(struct dummy_vect8, out[0].u64),
1092 	},
1093 	{
1094 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1095 		.dst_reg = EBPF_REG_1,
1096 		.src_reg = EBPF_REG_3,
1097 		.off = offsetof(struct dummy_vect8, out[1].u64),
1098 	},
1099 	{
1100 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1101 		.dst_reg = EBPF_REG_1,
1102 		.src_reg = EBPF_REG_4,
1103 		.off = offsetof(struct dummy_vect8, out[2].u64),
1104 	},
1105 	{
1106 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1107 		.dst_reg = EBPF_REG_2,
1108 		.src_reg = EBPF_REG_1,
1109 		.off = offsetof(struct dummy_vect8, in[0].u16),
1110 	},
1111 	{
1112 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1113 		.dst_reg = EBPF_REG_3,
1114 		.src_reg = EBPF_REG_1,
1115 		.off = offsetof(struct dummy_vect8, in[0].u32),
1116 	},
1117 	{
1118 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1119 		.dst_reg = EBPF_REG_4,
1120 		.src_reg = EBPF_REG_1,
1121 		.off = offsetof(struct dummy_vect8, in[0].u64),
1122 	},
1123 	{
1124 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1125 		.dst_reg = EBPF_REG_2,
1126 		.imm = sizeof(uint16_t) * CHAR_BIT,
1127 	},
1128 	{
1129 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1130 		.dst_reg = EBPF_REG_3,
1131 		.imm = sizeof(uint32_t) * CHAR_BIT,
1132 	},
1133 	{
1134 		.code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1135 		.dst_reg = EBPF_REG_4,
1136 		.imm = sizeof(uint64_t) * CHAR_BIT,
1137 	},
1138 	{
1139 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1140 		.dst_reg = EBPF_REG_1,
1141 		.src_reg = EBPF_REG_2,
1142 		.off = offsetof(struct dummy_vect8, out[3].u64),
1143 	},
1144 	{
1145 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1146 		.dst_reg = EBPF_REG_1,
1147 		.src_reg = EBPF_REG_3,
1148 		.off = offsetof(struct dummy_vect8, out[4].u64),
1149 	},
1150 	{
1151 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1152 		.dst_reg = EBPF_REG_1,
1153 		.src_reg = EBPF_REG_4,
1154 		.off = offsetof(struct dummy_vect8, out[5].u64),
1155 	},
1156 	/* return 1 */
1157 	{
1158 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1159 		.dst_reg = EBPF_REG_0,
1160 		.imm = 1,
1161 	},
1162 	{
1163 		.code = (BPF_JMP | EBPF_EXIT),
1164 	},
1165 };
1166 
1167 static void
1168 test_bele1_prepare(void *arg)
1169 {
1170 	struct dummy_vect8 *dv;
1171 
1172 	dv = arg;
1173 
1174 	memset(dv, 0, sizeof(*dv));
1175 	dv->in[0].u64 = rte_rand();
1176 	dv->in[0].u32 = dv->in[0].u64;
1177 	dv->in[0].u16 = dv->in[0].u64;
1178 }
1179 
1180 static int
1181 test_bele1_check(uint64_t rc, const void *arg)
1182 {
1183 	uint64_t r2, r3, r4;
1184 	const struct dummy_vect8 *dvt;
1185 	struct dummy_vect8 dve;
1186 
1187 	dvt = arg;
1188 	memset(&dve, 0, sizeof(dve));
1189 
1190 	r2 = dvt->in[0].u16;
1191 	r3 = dvt->in[0].u32;
1192 	r4 = dvt->in[0].u64;
1193 
1194 	r2 =  rte_cpu_to_be_16(r2);
1195 	r3 =  rte_cpu_to_be_32(r3);
1196 	r4 =  rte_cpu_to_be_64(r4);
1197 
1198 	dve.out[0].u64 = r2;
1199 	dve.out[1].u64 = r3;
1200 	dve.out[2].u64 = r4;
1201 
1202 	r2 = dvt->in[0].u16;
1203 	r3 = dvt->in[0].u32;
1204 	r4 = dvt->in[0].u64;
1205 
1206 	r2 =  rte_cpu_to_le_16(r2);
1207 	r3 =  rte_cpu_to_le_32(r3);
1208 	r4 =  rte_cpu_to_le_64(r4);
1209 
1210 	dve.out[3].u64 = r2;
1211 	dve.out[4].u64 = r3;
1212 	dve.out[5].u64 = r4;
1213 
1214 	return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1215 }
1216 
1217 /* atomic add test-cases */
1218 static const struct ebpf_insn test_xadd1_prog[] = {
1219 
1220 	{
1221 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1222 		.dst_reg = EBPF_REG_2,
1223 		.imm = 1,
1224 	},
1225 	{
1226 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1227 		.dst_reg = EBPF_REG_1,
1228 		.src_reg = EBPF_REG_2,
1229 		.off = offsetof(struct dummy_offset, u32),
1230 	},
1231 	{
1232 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1233 		.dst_reg = EBPF_REG_1,
1234 		.src_reg = EBPF_REG_2,
1235 		.off = offsetof(struct dummy_offset, u64),
1236 	},
1237 	{
1238 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1239 		.dst_reg = EBPF_REG_3,
1240 		.imm = -1,
1241 	},
1242 	{
1243 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1244 		.dst_reg = EBPF_REG_1,
1245 		.src_reg = EBPF_REG_3,
1246 		.off = offsetof(struct dummy_offset, u32),
1247 	},
1248 	{
1249 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1250 		.dst_reg = EBPF_REG_1,
1251 		.src_reg = EBPF_REG_3,
1252 		.off = offsetof(struct dummy_offset, u64),
1253 	},
1254 	{
1255 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1256 		.dst_reg = EBPF_REG_4,
1257 		.imm = TEST_FILL_1,
1258 	},
1259 	{
1260 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1261 		.dst_reg = EBPF_REG_1,
1262 		.src_reg = EBPF_REG_4,
1263 		.off = offsetof(struct dummy_offset, u32),
1264 	},
1265 	{
1266 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1267 		.dst_reg = EBPF_REG_1,
1268 		.src_reg = EBPF_REG_4,
1269 		.off = offsetof(struct dummy_offset, u64),
1270 	},
1271 	{
1272 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1273 		.dst_reg = EBPF_REG_5,
1274 		.imm = TEST_MUL_1,
1275 	},
1276 	{
1277 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1278 		.dst_reg = EBPF_REG_1,
1279 		.src_reg = EBPF_REG_5,
1280 		.off = offsetof(struct dummy_offset, u32),
1281 	},
1282 	{
1283 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1284 		.dst_reg = EBPF_REG_1,
1285 		.src_reg = EBPF_REG_5,
1286 		.off = offsetof(struct dummy_offset, u64),
1287 	},
1288 	{
1289 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1290 		.dst_reg = EBPF_REG_6,
1291 		.imm = TEST_MUL_2,
1292 	},
1293 	{
1294 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1295 		.dst_reg = EBPF_REG_1,
1296 		.src_reg = EBPF_REG_6,
1297 		.off = offsetof(struct dummy_offset, u32),
1298 	},
1299 	{
1300 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1301 		.dst_reg = EBPF_REG_1,
1302 		.src_reg = EBPF_REG_6,
1303 		.off = offsetof(struct dummy_offset, u64),
1304 	},
1305 	{
1306 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1307 		.dst_reg = EBPF_REG_7,
1308 		.imm = TEST_JCC_2,
1309 	},
1310 	{
1311 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1312 		.dst_reg = EBPF_REG_1,
1313 		.src_reg = EBPF_REG_7,
1314 		.off = offsetof(struct dummy_offset, u32),
1315 	},
1316 	{
1317 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1318 		.dst_reg = EBPF_REG_1,
1319 		.src_reg = EBPF_REG_7,
1320 		.off = offsetof(struct dummy_offset, u64),
1321 	},
1322 	{
1323 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1324 		.dst_reg = EBPF_REG_8,
1325 		.imm = TEST_JCC_3,
1326 	},
1327 	{
1328 		.code = (BPF_STX | EBPF_XADD | BPF_W),
1329 		.dst_reg = EBPF_REG_1,
1330 		.src_reg = EBPF_REG_8,
1331 		.off = offsetof(struct dummy_offset, u32),
1332 	},
1333 	{
1334 		.code = (BPF_STX | EBPF_XADD | EBPF_DW),
1335 		.dst_reg = EBPF_REG_1,
1336 		.src_reg = EBPF_REG_8,
1337 		.off = offsetof(struct dummy_offset, u64),
1338 	},
1339 	/* return 1 */
1340 	{
1341 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1342 		.dst_reg = EBPF_REG_0,
1343 		.imm = 1,
1344 	},
1345 	{
1346 		.code = (BPF_JMP | EBPF_EXIT),
1347 	},
1348 };
1349 
1350 static int
1351 test_xadd1_check(uint64_t rc, const void *arg)
1352 {
1353 	uint64_t rv;
1354 	const struct dummy_offset *dft;
1355 	struct dummy_offset dfe;
1356 
1357 	dft = arg;
1358 	memset(&dfe, 0, sizeof(dfe));
1359 
1360 	rv = 1;
1361 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1362 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1363 
1364 	rv = -1;
1365 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1366 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1367 
1368 	rv = (int32_t)TEST_FILL_1;
1369 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1370 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1371 
1372 	rv = TEST_MUL_1;
1373 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1374 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1375 
1376 	rv = TEST_MUL_2;
1377 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1378 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1379 
1380 	rv = TEST_JCC_2;
1381 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1382 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1383 
1384 	rv = TEST_JCC_3;
1385 	rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1386 	rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1387 
1388 	return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1389 }
1390 
1391 /* alu div test-cases */
1392 static const struct ebpf_insn test_div1_prog[] = {
1393 
1394 	{
1395 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1396 		.dst_reg = EBPF_REG_2,
1397 		.src_reg = EBPF_REG_1,
1398 		.off = offsetof(struct dummy_vect8, in[0].u32),
1399 	},
1400 	{
1401 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1402 		.dst_reg = EBPF_REG_3,
1403 		.src_reg = EBPF_REG_1,
1404 		.off = offsetof(struct dummy_vect8, in[1].u64),
1405 	},
1406 	{
1407 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1408 		.dst_reg = EBPF_REG_4,
1409 		.src_reg = EBPF_REG_1,
1410 		.off = offsetof(struct dummy_vect8, in[2].u32),
1411 	},
1412 	{
1413 		.code = (BPF_ALU | BPF_DIV | BPF_K),
1414 		.dst_reg = EBPF_REG_2,
1415 		.imm = TEST_MUL_1,
1416 	},
1417 	{
1418 		.code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1419 		.dst_reg = EBPF_REG_3,
1420 		.imm = TEST_MUL_2,
1421 	},
1422 	{
1423 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1424 		.dst_reg = EBPF_REG_2,
1425 		.imm = 1,
1426 	},
1427 	{
1428 		.code = (EBPF_ALU64 | BPF_OR | BPF_K),
1429 		.dst_reg = EBPF_REG_3,
1430 		.imm = 1,
1431 	},
1432 	{
1433 		.code = (BPF_ALU | BPF_MOD | BPF_X),
1434 		.dst_reg = EBPF_REG_4,
1435 		.src_reg = EBPF_REG_2,
1436 	},
1437 	{
1438 		.code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1439 		.dst_reg = EBPF_REG_4,
1440 		.src_reg = EBPF_REG_3,
1441 	},
1442 	{
1443 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1444 		.dst_reg = EBPF_REG_1,
1445 		.src_reg = EBPF_REG_2,
1446 		.off = offsetof(struct dummy_vect8, out[0].u64),
1447 	},
1448 	{
1449 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1450 		.dst_reg = EBPF_REG_1,
1451 		.src_reg = EBPF_REG_3,
1452 		.off = offsetof(struct dummy_vect8, out[1].u64),
1453 	},
1454 	{
1455 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1456 		.dst_reg = EBPF_REG_1,
1457 		.src_reg = EBPF_REG_4,
1458 		.off = offsetof(struct dummy_vect8, out[2].u64),
1459 	},
1460 	/* check that we can handle division by zero gracefully. */
1461 	{
1462 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1463 		.dst_reg = EBPF_REG_2,
1464 		.src_reg = EBPF_REG_1,
1465 		.off = offsetof(struct dummy_vect8, in[3].u32),
1466 	},
1467 	{
1468 		.code = (BPF_ALU | BPF_DIV | BPF_X),
1469 		.dst_reg = EBPF_REG_4,
1470 		.src_reg = EBPF_REG_2,
1471 	},
1472 	/* return 1 */
1473 	{
1474 		.code = (BPF_ALU | EBPF_MOV | BPF_K),
1475 		.dst_reg = EBPF_REG_0,
1476 		.imm = 1,
1477 	},
1478 	{
1479 		.code = (BPF_JMP | EBPF_EXIT),
1480 	},
1481 };
1482 
1483 static int
1484 test_div1_check(uint64_t rc, const void *arg)
1485 {
1486 	uint64_t r2, r3, r4;
1487 	const struct dummy_vect8 *dvt;
1488 	struct dummy_vect8 dve;
1489 
1490 	dvt = arg;
1491 	memset(&dve, 0, sizeof(dve));
1492 
1493 	r2 = dvt->in[0].u32;
1494 	r3 = dvt->in[1].u64;
1495 	r4 = dvt->in[2].u32;
1496 
1497 	r2 = (uint32_t)r2 / TEST_MUL_1;
1498 	r3 %= TEST_MUL_2;
1499 	r2 |= 1;
1500 	r3 |= 1;
1501 	r4 = (uint32_t)(r4 % r2);
1502 	r4 /= r3;
1503 
1504 	dve.out[0].u64 = r2;
1505 	dve.out[1].u64 = r3;
1506 	dve.out[2].u64 = r4;
1507 
1508 	/*
1509 	 * in the test prog we attempted to divide by zero.
1510 	 * so return value should return 0.
1511 	 */
1512 	return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1513 }
1514 
1515 /* call test-cases */
1516 static const struct ebpf_insn test_call1_prog[] = {
1517 
1518 	{
1519 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1520 		.dst_reg = EBPF_REG_2,
1521 		.src_reg = EBPF_REG_1,
1522 		.off = offsetof(struct dummy_offset, u32),
1523 	},
1524 	{
1525 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1526 		.dst_reg = EBPF_REG_3,
1527 		.src_reg = EBPF_REG_1,
1528 		.off = offsetof(struct dummy_offset, u64),
1529 	},
1530 	{
1531 		.code = (BPF_STX | BPF_MEM | BPF_W),
1532 		.dst_reg = EBPF_REG_10,
1533 		.src_reg = EBPF_REG_2,
1534 		.off = -4,
1535 	},
1536 	{
1537 		.code = (BPF_STX | BPF_MEM | EBPF_DW),
1538 		.dst_reg = EBPF_REG_10,
1539 		.src_reg = EBPF_REG_3,
1540 		.off = -16,
1541 	},
1542 	{
1543 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1544 		.dst_reg = EBPF_REG_2,
1545 		.src_reg = EBPF_REG_10,
1546 	},
1547 	{
1548 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1549 		.dst_reg = EBPF_REG_2,
1550 		.imm = 4,
1551 	},
1552 	{
1553 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1554 		.dst_reg = EBPF_REG_3,
1555 		.src_reg = EBPF_REG_10,
1556 	},
1557 	{
1558 		.code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1559 		.dst_reg = EBPF_REG_3,
1560 		.imm = 16,
1561 	},
1562 	{
1563 		.code = (BPF_JMP | EBPF_CALL),
1564 		.imm = 0,
1565 	},
1566 	{
1567 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1568 		.dst_reg = EBPF_REG_2,
1569 		.src_reg = EBPF_REG_10,
1570 		.off = -4,
1571 	},
1572 	{
1573 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1574 		.dst_reg = EBPF_REG_0,
1575 		.src_reg = EBPF_REG_10,
1576 		.off = -16
1577 	},
1578 	{
1579 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1580 		.dst_reg = EBPF_REG_0,
1581 		.src_reg = EBPF_REG_2,
1582 	},
1583 	{
1584 		.code = (BPF_JMP | EBPF_EXIT),
1585 	},
1586 };
1587 
1588 static void
1589 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1590 {
1591 	const struct dummy_offset *dv;
1592 
1593 	dv = p;
1594 
1595 	v32[0] += dv->u16;
1596 	v64[0] += dv->u8;
1597 }
1598 
1599 static int
1600 test_call1_check(uint64_t rc, const void *arg)
1601 {
1602 	uint32_t v32;
1603 	uint64_t v64;
1604 	const struct dummy_offset *dv;
1605 
1606 	dv = arg;
1607 
1608 	v32 = dv->u32;
1609 	v64 = dv->u64;
1610 	dummy_func1(arg, &v32, &v64);
1611 	v64 += v32;
1612 
1613 	if (v64 != rc) {
1614 		printf("%s@%d: invalid return value "
1615 			"expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1616 			__func__, __LINE__, v64, rc);
1617 		return -1;
1618 	}
1619 	return 0;
1620 	return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1621 }
1622 
1623 static const struct rte_bpf_xsym test_call1_xsym[] = {
1624 	{
1625 		.name = RTE_STR(dummy_func1),
1626 		.type = RTE_BPF_XTYPE_FUNC,
1627 		.func = {
1628 			.val = (void *)dummy_func1,
1629 			.nb_args = 3,
1630 			.args = {
1631 				[0] = {
1632 					.type = RTE_BPF_ARG_PTR,
1633 					.size = sizeof(struct dummy_offset),
1634 				},
1635 				[1] = {
1636 					.type = RTE_BPF_ARG_PTR,
1637 					.size = sizeof(uint32_t),
1638 				},
1639 				[2] = {
1640 					.type = RTE_BPF_ARG_PTR,
1641 					.size = sizeof(uint64_t),
1642 				},
1643 			},
1644 		},
1645 	},
1646 };
1647 
1648 static const struct ebpf_insn test_call2_prog[] = {
1649 
1650 	{
1651 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1652 		.dst_reg = EBPF_REG_1,
1653 		.src_reg = EBPF_REG_10,
1654 	},
1655 	{
1656 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1657 		.dst_reg = EBPF_REG_1,
1658 		.imm = -(int32_t)sizeof(struct dummy_offset),
1659 	},
1660 	{
1661 		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1662 		.dst_reg = EBPF_REG_2,
1663 		.src_reg = EBPF_REG_10,
1664 	},
1665 	{
1666 		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1667 		.dst_reg = EBPF_REG_2,
1668 		.imm = -2 * (int32_t)sizeof(struct dummy_offset),
1669 	},
1670 	{
1671 		.code = (BPF_JMP | EBPF_CALL),
1672 		.imm = 0,
1673 	},
1674 	{
1675 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1676 		.dst_reg = EBPF_REG_1,
1677 		.src_reg = EBPF_REG_10,
1678 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1679 			offsetof(struct dummy_offset, u64)),
1680 	},
1681 	{
1682 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1683 		.dst_reg = EBPF_REG_0,
1684 		.src_reg = EBPF_REG_10,
1685 		.off = -(int32_t)(sizeof(struct dummy_offset) -
1686 			offsetof(struct dummy_offset, u32)),
1687 	},
1688 	{
1689 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1690 		.dst_reg = EBPF_REG_0,
1691 		.src_reg = EBPF_REG_1,
1692 	},
1693 	{
1694 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1695 		.dst_reg = EBPF_REG_1,
1696 		.src_reg = EBPF_REG_10,
1697 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1698 			offsetof(struct dummy_offset, u16)),
1699 	},
1700 	{
1701 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1702 		.dst_reg = EBPF_REG_0,
1703 		.src_reg = EBPF_REG_1,
1704 	},
1705 	{
1706 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1707 		.dst_reg = EBPF_REG_1,
1708 		.src_reg = EBPF_REG_10,
1709 		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1710 			offsetof(struct dummy_offset, u8)),
1711 	},
1712 	{
1713 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1714 		.dst_reg = EBPF_REG_0,
1715 		.src_reg = EBPF_REG_1,
1716 	},
1717 	{
1718 		.code = (BPF_JMP | EBPF_EXIT),
1719 	},
1720 
1721 };
1722 
1723 static void
1724 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1725 {
1726 	uint64_t v;
1727 
1728 	v = 0;
1729 	a->u64 = v++;
1730 	a->u32 = v++;
1731 	a->u16 = v++;
1732 	a->u8 = v++;
1733 	b->u64 = v++;
1734 	b->u32 = v++;
1735 	b->u16 = v++;
1736 	b->u8 = v++;
1737 }
1738 
1739 static int
1740 test_call2_check(uint64_t rc, const void *arg)
1741 {
1742 	uint64_t v;
1743 	struct dummy_offset a, b;
1744 
1745 	RTE_SET_USED(arg);
1746 
1747 	dummy_func2(&a, &b);
1748 	v = a.u64 + a.u32 + b.u16 + b.u8;
1749 
1750 	if (v != rc) {
1751 		printf("%s@%d: invalid return value "
1752 			"expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1753 			__func__, __LINE__, v, rc);
1754 		return -1;
1755 	}
1756 	return 0;
1757 }
1758 
1759 static const struct rte_bpf_xsym test_call2_xsym[] = {
1760 	{
1761 		.name = RTE_STR(dummy_func2),
1762 		.type = RTE_BPF_XTYPE_FUNC,
1763 		.func = {
1764 			.val = (void *)dummy_func2,
1765 			.nb_args = 2,
1766 			.args = {
1767 				[0] = {
1768 					.type = RTE_BPF_ARG_PTR,
1769 					.size = sizeof(struct dummy_offset),
1770 				},
1771 				[1] = {
1772 					.type = RTE_BPF_ARG_PTR,
1773 					.size = sizeof(struct dummy_offset),
1774 				},
1775 			},
1776 		},
1777 	},
1778 };
1779 
1780 static const struct ebpf_insn test_call3_prog[] = {
1781 
1782 	{
1783 		.code = (BPF_JMP | EBPF_CALL),
1784 		.imm = 0,
1785 	},
1786 	{
1787 		.code = (BPF_LDX | BPF_MEM | BPF_B),
1788 		.dst_reg = EBPF_REG_2,
1789 		.src_reg = EBPF_REG_0,
1790 		.off = offsetof(struct dummy_offset, u8),
1791 	},
1792 	{
1793 		.code = (BPF_LDX | BPF_MEM | BPF_H),
1794 		.dst_reg = EBPF_REG_3,
1795 		.src_reg = EBPF_REG_0,
1796 		.off = offsetof(struct dummy_offset, u16),
1797 	},
1798 	{
1799 		.code = (BPF_LDX | BPF_MEM | BPF_W),
1800 		.dst_reg = EBPF_REG_4,
1801 		.src_reg = EBPF_REG_0,
1802 		.off = offsetof(struct dummy_offset, u32),
1803 	},
1804 	{
1805 		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
1806 		.dst_reg = EBPF_REG_0,
1807 		.src_reg = EBPF_REG_0,
1808 		.off = offsetof(struct dummy_offset, u64),
1809 	},
1810 	/* return sum */
1811 	{
1812 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1813 		.dst_reg = EBPF_REG_0,
1814 		.src_reg = EBPF_REG_4,
1815 	},
1816 	{
1817 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1818 		.dst_reg = EBPF_REG_0,
1819 		.src_reg = EBPF_REG_3,
1820 	},
1821 	{
1822 		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1823 		.dst_reg = EBPF_REG_0,
1824 		.src_reg = EBPF_REG_2,
1825 	},
1826 	{
1827 		.code = (BPF_JMP | EBPF_EXIT),
1828 	},
1829 };
1830 
1831 static const struct dummy_offset *
1832 dummy_func3(const struct dummy_vect8 *p)
1833 {
1834 	return &p->in[RTE_DIM(p->in) - 1];
1835 }
1836 
1837 static void
1838 test_call3_prepare(void *arg)
1839 {
1840 	struct dummy_vect8 *pv;
1841 	struct dummy_offset *df;
1842 
1843 	pv = arg;
1844 	df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
1845 
1846 	memset(pv, 0, sizeof(*pv));
1847 	df->u64 = (int32_t)TEST_FILL_1;
1848 	df->u32 = df->u64;
1849 	df->u16 = df->u64;
1850 	df->u8 = df->u64;
1851 }
1852 
1853 static int
1854 test_call3_check(uint64_t rc, const void *arg)
1855 {
1856 	uint64_t v;
1857 	const struct dummy_vect8 *pv;
1858 	const struct dummy_offset *dft;
1859 
1860 	pv = arg;
1861 	dft = dummy_func3(pv);
1862 
1863 	v = dft->u64;
1864 	v += dft->u32;
1865 	v += dft->u16;
1866 	v += dft->u8;
1867 
1868 	return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
1869 }
1870 
1871 static const struct rte_bpf_xsym test_call3_xsym[] = {
1872 	{
1873 		.name = RTE_STR(dummy_func3),
1874 		.type = RTE_BPF_XTYPE_FUNC,
1875 		.func = {
1876 			.val = (void *)dummy_func3,
1877 			.nb_args = 1,
1878 			.args = {
1879 				[0] = {
1880 					.type = RTE_BPF_ARG_PTR,
1881 					.size = sizeof(struct dummy_vect8),
1882 				},
1883 			},
1884 			.ret = {
1885 				.type = RTE_BPF_ARG_PTR,
1886 				.size = sizeof(struct dummy_offset),
1887 			},
1888 		},
1889 	},
1890 };
1891 
1892 static const struct bpf_test tests[] = {
1893 	{
1894 		.name = "test_store1",
1895 		.arg_sz = sizeof(struct dummy_offset),
1896 		.prm = {
1897 			.ins = test_store1_prog,
1898 			.nb_ins = RTE_DIM(test_store1_prog),
1899 			.prog_arg = {
1900 				.type = RTE_BPF_ARG_PTR,
1901 				.size = sizeof(struct dummy_offset),
1902 			},
1903 		},
1904 		.prepare = test_store1_prepare,
1905 		.check_result = test_store1_check,
1906 	},
1907 	{
1908 		.name = "test_store2",
1909 		.arg_sz = sizeof(struct dummy_offset),
1910 		.prm = {
1911 			.ins = test_store2_prog,
1912 			.nb_ins = RTE_DIM(test_store2_prog),
1913 			.prog_arg = {
1914 				.type = RTE_BPF_ARG_PTR,
1915 				.size = sizeof(struct dummy_offset),
1916 			},
1917 		},
1918 		.prepare = test_store1_prepare,
1919 		.check_result = test_store1_check,
1920 	},
1921 	{
1922 		.name = "test_load1",
1923 		.arg_sz = sizeof(struct dummy_offset),
1924 		.prm = {
1925 			.ins = test_load1_prog,
1926 			.nb_ins = RTE_DIM(test_load1_prog),
1927 			.prog_arg = {
1928 				.type = RTE_BPF_ARG_PTR,
1929 				.size = sizeof(struct dummy_offset),
1930 			},
1931 		},
1932 		.prepare = test_load1_prepare,
1933 		.check_result = test_load1_check,
1934 	},
1935 	{
1936 		.name = "test_ldimm1",
1937 		.arg_sz = sizeof(struct dummy_offset),
1938 		.prm = {
1939 			.ins = test_ldimm1_prog,
1940 			.nb_ins = RTE_DIM(test_ldimm1_prog),
1941 			.prog_arg = {
1942 				.type = RTE_BPF_ARG_PTR,
1943 				.size = sizeof(struct dummy_offset),
1944 			},
1945 		},
1946 		.prepare = test_store1_prepare,
1947 		.check_result = test_ldimm1_check,
1948 	},
1949 	{
1950 		.name = "test_mul1",
1951 		.arg_sz = sizeof(struct dummy_vect8),
1952 		.prm = {
1953 			.ins = test_mul1_prog,
1954 			.nb_ins = RTE_DIM(test_mul1_prog),
1955 			.prog_arg = {
1956 				.type = RTE_BPF_ARG_PTR,
1957 				.size = sizeof(struct dummy_vect8),
1958 			},
1959 		},
1960 		.prepare = test_mul1_prepare,
1961 		.check_result = test_mul1_check,
1962 	},
1963 	{
1964 		.name = "test_shift1",
1965 		.arg_sz = sizeof(struct dummy_vect8),
1966 		.prm = {
1967 			.ins = test_shift1_prog,
1968 			.nb_ins = RTE_DIM(test_shift1_prog),
1969 			.prog_arg = {
1970 				.type = RTE_BPF_ARG_PTR,
1971 				.size = sizeof(struct dummy_vect8),
1972 			},
1973 		},
1974 		.prepare = test_shift1_prepare,
1975 		.check_result = test_shift1_check,
1976 	},
1977 	{
1978 		.name = "test_jump1",
1979 		.arg_sz = sizeof(struct dummy_vect8),
1980 		.prm = {
1981 			.ins = test_jump1_prog,
1982 			.nb_ins = RTE_DIM(test_jump1_prog),
1983 			.prog_arg = {
1984 				.type = RTE_BPF_ARG_PTR,
1985 				.size = sizeof(struct dummy_vect8),
1986 			},
1987 		},
1988 		.prepare = test_jump1_prepare,
1989 		.check_result = test_jump1_check,
1990 	},
1991 	{
1992 		.name = "test_alu1",
1993 		.arg_sz = sizeof(struct dummy_vect8),
1994 		.prm = {
1995 			.ins = test_alu1_prog,
1996 			.nb_ins = RTE_DIM(test_alu1_prog),
1997 			.prog_arg = {
1998 				.type = RTE_BPF_ARG_PTR,
1999 				.size = sizeof(struct dummy_vect8),
2000 			},
2001 		},
2002 		.prepare = test_jump1_prepare,
2003 		.check_result = test_alu1_check,
2004 	},
2005 	{
2006 		.name = "test_bele1",
2007 		.arg_sz = sizeof(struct dummy_vect8),
2008 		.prm = {
2009 			.ins = test_bele1_prog,
2010 			.nb_ins = RTE_DIM(test_bele1_prog),
2011 			.prog_arg = {
2012 				.type = RTE_BPF_ARG_PTR,
2013 				.size = sizeof(struct dummy_vect8),
2014 			},
2015 		},
2016 		.prepare = test_bele1_prepare,
2017 		.check_result = test_bele1_check,
2018 	},
2019 	{
2020 		.name = "test_xadd1",
2021 		.arg_sz = sizeof(struct dummy_offset),
2022 		.prm = {
2023 			.ins = test_xadd1_prog,
2024 			.nb_ins = RTE_DIM(test_xadd1_prog),
2025 			.prog_arg = {
2026 				.type = RTE_BPF_ARG_PTR,
2027 				.size = sizeof(struct dummy_offset),
2028 			},
2029 		},
2030 		.prepare = test_store1_prepare,
2031 		.check_result = test_xadd1_check,
2032 	},
2033 	{
2034 		.name = "test_div1",
2035 		.arg_sz = sizeof(struct dummy_vect8),
2036 		.prm = {
2037 			.ins = test_div1_prog,
2038 			.nb_ins = RTE_DIM(test_div1_prog),
2039 			.prog_arg = {
2040 				.type = RTE_BPF_ARG_PTR,
2041 				.size = sizeof(struct dummy_vect8),
2042 			},
2043 		},
2044 		.prepare = test_mul1_prepare,
2045 		.check_result = test_div1_check,
2046 	},
2047 	{
2048 		.name = "test_call1",
2049 		.arg_sz = sizeof(struct dummy_offset),
2050 		.prm = {
2051 			.ins = test_call1_prog,
2052 			.nb_ins = RTE_DIM(test_call1_prog),
2053 			.prog_arg = {
2054 				.type = RTE_BPF_ARG_PTR,
2055 				.size = sizeof(struct dummy_offset),
2056 			},
2057 			.xsym = test_call1_xsym,
2058 			.nb_xsym = RTE_DIM(test_call1_xsym),
2059 		},
2060 		.prepare = test_load1_prepare,
2061 		.check_result = test_call1_check,
2062 		/* for now don't support function calls on 32 bit platform */
2063 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2064 	},
2065 	{
2066 		.name = "test_call2",
2067 		.arg_sz = sizeof(struct dummy_offset),
2068 		.prm = {
2069 			.ins = test_call2_prog,
2070 			.nb_ins = RTE_DIM(test_call2_prog),
2071 			.prog_arg = {
2072 				.type = RTE_BPF_ARG_PTR,
2073 				.size = sizeof(struct dummy_offset),
2074 			},
2075 			.xsym = test_call2_xsym,
2076 			.nb_xsym = RTE_DIM(test_call2_xsym),
2077 		},
2078 		.prepare = test_store1_prepare,
2079 		.check_result = test_call2_check,
2080 		/* for now don't support function calls on 32 bit platform */
2081 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2082 	},
2083 	{
2084 		.name = "test_call3",
2085 		.arg_sz = sizeof(struct dummy_vect8),
2086 		.prm = {
2087 			.ins = test_call3_prog,
2088 			.nb_ins = RTE_DIM(test_call3_prog),
2089 			.prog_arg = {
2090 				.type = RTE_BPF_ARG_PTR,
2091 				.size = sizeof(struct dummy_vect8),
2092 			},
2093 			.xsym = test_call3_xsym,
2094 			.nb_xsym = RTE_DIM(test_call3_xsym),
2095 		},
2096 		.prepare = test_call3_prepare,
2097 		.check_result = test_call3_check,
2098 		/* for now don't support function calls on 32 bit platform */
2099 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2100 	},
2101 };
2102 
2103 static int
2104 run_test(const struct bpf_test *tst)
2105 {
2106 	int32_t ret, rv;
2107 	int64_t rc;
2108 	struct rte_bpf *bpf;
2109 	struct rte_bpf_jit jit;
2110 	uint8_t tbuf[tst->arg_sz];
2111 
2112 	printf("%s(%s) start\n", __func__, tst->name);
2113 
2114 	bpf = rte_bpf_load(&tst->prm);
2115 	if (bpf == NULL) {
2116 		printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
2117 			__func__, __LINE__, rte_errno, strerror(rte_errno));
2118 		return -1;
2119 	}
2120 
2121 	tst->prepare(tbuf);
2122 
2123 	rc = rte_bpf_exec(bpf, tbuf);
2124 	ret = tst->check_result(rc, tbuf);
2125 	if (ret != 0) {
2126 		printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2127 			__func__, __LINE__, tst->name, ret, strerror(ret));
2128 	}
2129 
2130 	rte_bpf_get_jit(bpf, &jit);
2131 	if (jit.func == NULL)
2132 		return 0;
2133 
2134 	tst->prepare(tbuf);
2135 	rc = jit.func(tbuf);
2136 	rv = tst->check_result(rc, tbuf);
2137 	ret |= rv;
2138 	if (rv != 0) {
2139 		printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2140 			__func__, __LINE__, tst->name, rv, strerror(ret));
2141 	}
2142 
2143 	rte_bpf_destroy(bpf);
2144 	return ret;
2145 
2146 }
2147 
2148 static int
2149 test_bpf(void)
2150 {
2151 	int32_t rc, rv;
2152 	uint32_t i;
2153 
2154 	rc = 0;
2155 	for (i = 0; i != RTE_DIM(tests); i++) {
2156 		rv = run_test(tests + i);
2157 		if (tests[i].allow_fail == 0)
2158 			rc |= rv;
2159 	}
2160 
2161 	return rc;
2162 }
2163 
2164 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
2165