xref: /dpdk/lib/bpf/bpf_validate.c (revision 7e06c0de1952d3109a5b0c4779d7e7d8059c9d78)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 
12 #include <rte_common.h>
13 
14 #include "bpf_impl.h"
15 
16 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
17 
18 struct bpf_reg_val {
19 	struct rte_bpf_arg v;
20 	uint64_t mask;
21 	struct {
22 		int64_t min;
23 		int64_t max;
24 	} s;
25 	struct {
26 		uint64_t min;
27 		uint64_t max;
28 	} u;
29 };
30 
31 struct bpf_eval_state {
32 	struct bpf_reg_val rv[EBPF_REG_NUM];
33 	struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
34 };
35 
36 /* possible instruction node colour */
37 enum {
38 	WHITE,
39 	GREY,
40 	BLACK,
41 	MAX_NODE_COLOUR
42 };
43 
44 /* possible edge types */
45 enum {
46 	UNKNOWN_EDGE,
47 	TREE_EDGE,
48 	BACK_EDGE,
49 	CROSS_EDGE,
50 	MAX_EDGE_TYPE
51 };
52 
53 #define	MAX_EDGES	2
54 
55 struct inst_node {
56 	uint8_t colour;
57 	uint8_t nb_edge:4;
58 	uint8_t cur_edge:4;
59 	uint8_t edge_type[MAX_EDGES];
60 	uint32_t edge_dest[MAX_EDGES];
61 	uint32_t prev_node;
62 	struct bpf_eval_state *evst;
63 };
64 
65 struct bpf_verifier {
66 	const struct rte_bpf_prm *prm;
67 	struct inst_node *in;
68 	uint64_t stack_sz;
69 	uint32_t nb_nodes;
70 	uint32_t nb_jcc_nodes;
71 	uint32_t nb_ldmb_nodes;
72 	uint32_t node_colour[MAX_NODE_COLOUR];
73 	uint32_t edge_type[MAX_EDGE_TYPE];
74 	struct bpf_eval_state *evst;
75 	struct inst_node *evin;
76 	struct {
77 		uint32_t num;
78 		uint32_t cur;
79 		struct bpf_eval_state *ent;
80 	} evst_pool;
81 };
82 
83 struct bpf_ins_check {
84 	struct {
85 		uint16_t dreg;
86 		uint16_t sreg;
87 	} mask;
88 	struct {
89 		uint16_t min;
90 		uint16_t max;
91 	} off;
92 	struct {
93 		uint32_t min;
94 		uint32_t max;
95 	} imm;
96 	const char * (*check)(const struct ebpf_insn *);
97 	const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
98 };
99 
100 #define	ALL_REGS	RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
101 #define	WRT_REGS	RTE_LEN2MASK(EBPF_REG_10, uint16_t)
102 #define	ZERO_REG	RTE_LEN2MASK(EBPF_REG_1, uint16_t)
103 
104 /* For LD_IND R6 is an implicit CTX register. */
105 #define	IND_SRC_REGS	(WRT_REGS ^ 1 << EBPF_REG_6)
106 
107 /*
108  * check and evaluate functions for particular instruction types.
109  */
110 
111 static const char *
112 check_alu_bele(const struct ebpf_insn *ins)
113 {
114 	if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
115 		return "invalid imm field";
116 	return NULL;
117 }
118 
119 static const char *
120 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
121 {
122 	RTE_SET_USED(ins);
123 	if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
124 		return "undefined return value";
125 	return NULL;
126 }
127 
128 /* setup max possible with this mask bounds */
129 static void
130 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
131 {
132 	rv->u.max = mask;
133 	rv->u.min = 0;
134 }
135 
136 static void
137 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
138 {
139 	rv->s.max = mask >> 1;
140 	rv->s.min = rv->s.max ^ UINT64_MAX;
141 }
142 
143 static void
144 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
145 {
146 	eval_umax_bound(rv, mask);
147 	eval_smax_bound(rv, mask);
148 }
149 
150 static void
151 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
152 {
153 	eval_max_bound(rv, mask);
154 	rv->v.type = RTE_BPF_ARG_RAW;
155 	rv->mask = mask;
156 }
157 
158 static void
159 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
160 {
161 	rv->mask = mask;
162 	rv->s.min = val;
163 	rv->s.max = val;
164 	rv->u.min = val;
165 	rv->u.max = val;
166 }
167 
168 static void
169 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
170 {
171 	uint64_t v;
172 
173 	v = (uint64_t)imm & mask;
174 
175 	rv->v.type = RTE_BPF_ARG_RAW;
176 	eval_fill_imm64(rv, mask, v);
177 }
178 
179 static const char *
180 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
181 {
182 	uint32_t i;
183 	uint64_t val;
184 	struct bpf_reg_val *rd;
185 
186 	val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
187 
188 	rd = bvf->evst->rv + ins->dst_reg;
189 	rd->v.type = RTE_BPF_ARG_RAW;
190 	eval_fill_imm64(rd, UINT64_MAX, val);
191 
192 	for (i = 0; i != bvf->prm->nb_xsym; i++) {
193 
194 		/* load of external variable */
195 		if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
196 				(uintptr_t)bvf->prm->xsym[i].var.val == val) {
197 			rd->v = bvf->prm->xsym[i].var.desc;
198 			eval_fill_imm64(rd, UINT64_MAX, 0);
199 			break;
200 		}
201 	}
202 
203 	return NULL;
204 }
205 
206 static void
207 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
208 {
209 	struct bpf_reg_val rt;
210 
211 	rt.u.min = rv->u.min & mask;
212 	rt.u.max = rv->u.max & mask;
213 	if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
214 		rv->u.max = RTE_MAX(rt.u.max, mask);
215 		rv->u.min = 0;
216 	}
217 
218 	eval_smax_bound(&rt, mask);
219 	rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
220 	rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
221 
222 	rv->mask = mask;
223 }
224 
225 static void
226 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
227 {
228 	struct bpf_reg_val rv;
229 
230 	rv.u.min = (rd->u.min + rs->u.min) & msk;
231 	rv.u.max = (rd->u.max + rs->u.max) & msk;
232 	rv.s.min = (rd->s.min + rs->s.min) & msk;
233 	rv.s.max = (rd->s.max + rs->s.max) & msk;
234 
235 	/*
236 	 * if at least one of the operands is not constant,
237 	 * then check for overflow
238 	 */
239 	if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
240 			(rv.u.min < rd->u.min || rv.u.max < rd->u.max))
241 		eval_umax_bound(&rv, msk);
242 
243 	if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
244 			(((rs->s.min < 0 && rv.s.min > rd->s.min) ||
245 			rv.s.min < rd->s.min) ||
246 			((rs->s.max < 0 && rv.s.max > rd->s.max) ||
247 				rv.s.max < rd->s.max)))
248 		eval_smax_bound(&rv, msk);
249 
250 	rd->s = rv.s;
251 	rd->u = rv.u;
252 }
253 
254 static void
255 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
256 {
257 	struct bpf_reg_val rv;
258 
259 	rv.u.min = (rd->u.min - rs->u.max) & msk;
260 	rv.u.max = (rd->u.max - rs->u.min) & msk;
261 	rv.s.min = (rd->s.min - rs->s.max) & msk;
262 	rv.s.max = (rd->s.max - rs->s.min) & msk;
263 
264 	/*
265 	 * if at least one of the operands is not constant,
266 	 * then check for overflow
267 	 */
268 	if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
269 			(rv.u.min > rd->u.min || rv.u.max > rd->u.max))
270 		eval_umax_bound(&rv, msk);
271 
272 	if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
273 			(((rs->s.min < 0 && rv.s.min < rd->s.min) ||
274 			rv.s.min > rd->s.min) ||
275 			((rs->s.max < 0 && rv.s.max < rd->s.max) ||
276 			rv.s.max > rd->s.max)))
277 		eval_smax_bound(&rv, msk);
278 
279 	rd->s = rv.s;
280 	rd->u = rv.u;
281 }
282 
283 static void
284 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
285 	uint64_t msk)
286 {
287 	/* check if shift value is less then max result bits */
288 	if (rs->u.max >= opsz) {
289 		eval_max_bound(rd, msk);
290 		return;
291 	}
292 
293 	/* check for overflow */
294 	if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
295 		eval_umax_bound(rd, msk);
296 	else {
297 		rd->u.max <<= rs->u.max;
298 		rd->u.min <<= rs->u.min;
299 	}
300 
301 	/* check that dreg values are and would remain always positive */
302 	if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
303 			RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
304 		eval_smax_bound(rd, msk);
305 	else {
306 		rd->s.max <<= rs->u.max;
307 		rd->s.min <<= rs->u.min;
308 	}
309 }
310 
311 static void
312 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
313 	uint64_t msk)
314 {
315 	/* check if shift value is less then max result bits */
316 	if (rs->u.max >= opsz) {
317 		eval_max_bound(rd, msk);
318 		return;
319 	}
320 
321 	rd->u.max >>= rs->u.min;
322 	rd->u.min >>= rs->u.max;
323 
324 	/* check that dreg values are always positive */
325 	if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
326 		eval_smax_bound(rd, msk);
327 	else {
328 		rd->s.max >>= rs->u.min;
329 		rd->s.min >>= rs->u.max;
330 	}
331 }
332 
333 static void
334 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
335 	uint64_t msk)
336 {
337 	uint32_t shv;
338 
339 	/* check if shift value is less then max result bits */
340 	if (rs->u.max >= opsz) {
341 		eval_max_bound(rd, msk);
342 		return;
343 	}
344 
345 	rd->u.max = (int64_t)rd->u.max >> rs->u.min;
346 	rd->u.min = (int64_t)rd->u.min >> rs->u.max;
347 
348 	/* if we have 32-bit values - extend them to 64-bit */
349 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
350 		rd->s.min <<= opsz;
351 		rd->s.max <<= opsz;
352 		shv = opsz;
353 	} else
354 		shv = 0;
355 
356 	if (rd->s.min < 0)
357 		rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
358 	else
359 		rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
360 
361 	if (rd->s.max < 0)
362 		rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
363 	else
364 		rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
365 }
366 
367 static uint64_t
368 eval_umax_bits(uint64_t v, size_t opsz)
369 {
370 	if (v == 0)
371 		return 0;
372 
373 	v = rte_clz64(v);
374 	return RTE_LEN2MASK(opsz - v, uint64_t);
375 }
376 
377 /* estimate max possible value for (v1 & v2) */
378 static uint64_t
379 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
380 {
381 	v1 = eval_umax_bits(v1, opsz);
382 	v2 = eval_umax_bits(v2, opsz);
383 	return (v1 & v2);
384 }
385 
386 /* estimate max possible value for (v1 | v2) */
387 static uint64_t
388 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
389 {
390 	v1 = eval_umax_bits(v1, opsz);
391 	v2 = eval_umax_bits(v2, opsz);
392 	return (v1 | v2);
393 }
394 
395 static void
396 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
397 	uint64_t msk)
398 {
399 	/* both operands are constants */
400 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
401 		rd->u.min &= rs->u.min;
402 		rd->u.max &= rs->u.max;
403 	} else {
404 		rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
405 		rd->u.min &= rs->u.min;
406 	}
407 
408 	/* both operands are constants */
409 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
410 		rd->s.min &= rs->s.min;
411 		rd->s.max &= rs->s.max;
412 	/* at least one of operand is non-negative */
413 	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
414 		rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
415 			rs->s.max & (msk >> 1), opsz);
416 		rd->s.min &= rs->s.min;
417 	} else
418 		eval_smax_bound(rd, msk);
419 }
420 
421 static void
422 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
423 	uint64_t msk)
424 {
425 	/* both operands are constants */
426 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
427 		rd->u.min |= rs->u.min;
428 		rd->u.max |= rs->u.max;
429 	} else {
430 		rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
431 		rd->u.min |= rs->u.min;
432 	}
433 
434 	/* both operands are constants */
435 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
436 		rd->s.min |= rs->s.min;
437 		rd->s.max |= rs->s.max;
438 
439 	/* both operands are non-negative */
440 	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
441 		rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
442 		rd->s.min |= rs->s.min;
443 	} else
444 		eval_smax_bound(rd, msk);
445 }
446 
447 static void
448 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
449 	uint64_t msk)
450 {
451 	/* both operands are constants */
452 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
453 		rd->u.min ^= rs->u.min;
454 		rd->u.max ^= rs->u.max;
455 	} else {
456 		rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
457 		rd->u.min = 0;
458 	}
459 
460 	/* both operands are constants */
461 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
462 		rd->s.min ^= rs->s.min;
463 		rd->s.max ^= rs->s.max;
464 
465 	/* both operands are non-negative */
466 	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
467 		rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
468 		rd->s.min = 0;
469 	} else
470 		eval_smax_bound(rd, msk);
471 }
472 
473 static void
474 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
475 	uint64_t msk)
476 {
477 	/* both operands are constants */
478 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
479 		rd->u.min = (rd->u.min * rs->u.min) & msk;
480 		rd->u.max = (rd->u.max * rs->u.max) & msk;
481 	/* check for overflow */
482 	} else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
483 		rd->u.max *= rs->u.max;
484 		rd->u.min *= rd->u.min;
485 	} else
486 		eval_umax_bound(rd, msk);
487 
488 	/* both operands are constants */
489 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
490 		rd->s.min = (rd->s.min * rs->s.min) & msk;
491 		rd->s.max = (rd->s.max * rs->s.max) & msk;
492 	/* check that both operands are positive and no overflow */
493 	} else if (rd->s.min >= 0 && rs->s.min >= 0) {
494 		rd->s.max *= rs->s.max;
495 		rd->s.min *= rd->s.min;
496 	} else
497 		eval_smax_bound(rd, msk);
498 }
499 
500 static const char *
501 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
502 	size_t opsz, uint64_t msk)
503 {
504 	/* both operands are constants */
505 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
506 		if (rs->u.max == 0)
507 			return "division by 0";
508 		if (op == BPF_DIV) {
509 			rd->u.min /= rs->u.min;
510 			rd->u.max /= rs->u.max;
511 		} else {
512 			rd->u.min %= rs->u.min;
513 			rd->u.max %= rs->u.max;
514 		}
515 	} else {
516 		if (op == BPF_MOD)
517 			rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
518 		else
519 			rd->u.max = rd->u.max;
520 		rd->u.min = 0;
521 	}
522 
523 	/* if we have 32-bit values - extend them to 64-bit */
524 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
525 		rd->s.min = (int32_t)rd->s.min;
526 		rd->s.max = (int32_t)rd->s.max;
527 		rs->s.min = (int32_t)rs->s.min;
528 		rs->s.max = (int32_t)rs->s.max;
529 	}
530 
531 	/* both operands are constants */
532 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
533 		if (rs->s.max == 0)
534 			return "division by 0";
535 		if (op == BPF_DIV) {
536 			rd->s.min /= rs->s.min;
537 			rd->s.max /= rs->s.max;
538 		} else {
539 			rd->s.min %= rs->s.min;
540 			rd->s.max %= rs->s.max;
541 		}
542 	} else if (op == BPF_MOD) {
543 		rd->s.min = RTE_MAX(rd->s.max, 0);
544 		rd->s.min = RTE_MIN(rd->s.min, 0);
545 	} else
546 		eval_smax_bound(rd, msk);
547 
548 	rd->s.max &= msk;
549 	rd->s.min &= msk;
550 
551 	return NULL;
552 }
553 
554 static void
555 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
556 {
557 	uint64_t ux, uy;
558 	int64_t sx, sy;
559 
560 	/* if we have 32-bit values - extend them to 64-bit */
561 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
562 		rd->u.min = (int32_t)rd->u.min;
563 		rd->u.max = (int32_t)rd->u.max;
564 	}
565 
566 	ux = -(int64_t)rd->u.min & msk;
567 	uy = -(int64_t)rd->u.max & msk;
568 
569 	rd->u.max = RTE_MAX(ux, uy);
570 	rd->u.min = RTE_MIN(ux, uy);
571 
572 	/* if we have 32-bit values - extend them to 64-bit */
573 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
574 		rd->s.min = (int32_t)rd->s.min;
575 		rd->s.max = (int32_t)rd->s.max;
576 	}
577 
578 	sx = -rd->s.min & msk;
579 	sy = -rd->s.max & msk;
580 
581 	rd->s.max = RTE_MAX(sx, sy);
582 	rd->s.min = RTE_MIN(sx, sy);
583 }
584 
585 static const char *
586 eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
587 {
588 	uint32_t i, mode;
589 	struct bpf_reg_val *rv, ri, rs;
590 
591 	mode = BPF_MODE(ins->code);
592 
593 	/* R6 is an implicit input that must contain pointer to mbuf */
594 	if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
595 		return "invalid type for implicit ctx register";
596 
597 	if (mode == BPF_IND) {
598 		rs = bvf->evst->rv[ins->src_reg];
599 		if (rs.v.type != RTE_BPF_ARG_RAW)
600 			return "unexpected type for src register";
601 
602 		eval_fill_imm(&ri, UINT64_MAX, ins->imm);
603 		eval_add(&rs, &ri, UINT64_MAX);
604 
605 		if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
606 			return "mbuf boundary violation";
607 	}
608 
609 	/* R1-R5 scratch registers */
610 	for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
611 		bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
612 
613 	/* R0 is an implicit output, contains data fetched from the packet */
614 	rv = bvf->evst->rv + EBPF_REG_0;
615 	rv->v.size = bpf_size(BPF_SIZE(ins->code));
616 	eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
617 
618 	return NULL;
619 }
620 
621 /*
622  * check that destination and source operand are in defined state.
623  */
624 static const char *
625 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
626 {
627 	if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
628 		return "dest reg value is undefined";
629 	if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
630 		return "src reg value is undefined";
631 	return NULL;
632 }
633 
634 static const char *
635 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
636 {
637 	uint64_t msk;
638 	uint32_t op;
639 	size_t opsz;
640 	const char *err;
641 	struct bpf_eval_state *st;
642 	struct bpf_reg_val *rd, rs;
643 
644 	opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
645 		sizeof(uint32_t) : sizeof(uint64_t);
646 	opsz = opsz * CHAR_BIT;
647 	msk = RTE_LEN2MASK(opsz, uint64_t);
648 
649 	st = bvf->evst;
650 	rd = st->rv + ins->dst_reg;
651 
652 	if (BPF_SRC(ins->code) == BPF_X) {
653 		rs = st->rv[ins->src_reg];
654 		eval_apply_mask(&rs, msk);
655 	} else
656 		eval_fill_imm(&rs, msk, ins->imm);
657 
658 	eval_apply_mask(rd, msk);
659 
660 	op = BPF_OP(ins->code);
661 
662 	/* Allow self-xor as way to zero register */
663 	if (op == BPF_XOR && BPF_SRC(ins->code) == BPF_X &&
664 	    ins->src_reg == ins->dst_reg) {
665 		eval_fill_imm(&rs, UINT64_MAX, 0);
666 		eval_fill_imm(rd, UINT64_MAX, 0);
667 	}
668 
669 	err = eval_defined((op != EBPF_MOV) ? rd : NULL,
670 			   (op != BPF_NEG) ? &rs : NULL);
671 	if (err != NULL)
672 		return err;
673 
674 	if (op == BPF_ADD)
675 		eval_add(rd, &rs, msk);
676 	else if (op == BPF_SUB)
677 		eval_sub(rd, &rs, msk);
678 	else if (op == BPF_LSH)
679 		eval_lsh(rd, &rs, opsz, msk);
680 	else if (op == BPF_RSH)
681 		eval_rsh(rd, &rs, opsz, msk);
682 	else if (op == EBPF_ARSH)
683 		eval_arsh(rd, &rs, opsz, msk);
684 	else if (op == BPF_AND)
685 		eval_and(rd, &rs, opsz, msk);
686 	else if (op == BPF_OR)
687 		eval_or(rd, &rs, opsz, msk);
688 	else if (op == BPF_XOR)
689 		eval_xor(rd, &rs, opsz, msk);
690 	else if (op == BPF_MUL)
691 		eval_mul(rd, &rs, opsz, msk);
692 	else if (op == BPF_DIV || op == BPF_MOD)
693 		err = eval_divmod(op, rd, &rs, opsz, msk);
694 	else if (op == BPF_NEG)
695 		eval_neg(rd, opsz, msk);
696 	else if (op == EBPF_MOV)
697 		*rd = rs;
698 	else
699 		eval_max_bound(rd, msk);
700 
701 	return err;
702 }
703 
704 static const char *
705 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
706 {
707 	uint64_t msk;
708 	struct bpf_eval_state *st;
709 	struct bpf_reg_val *rd;
710 	const char *err;
711 
712 	msk = RTE_LEN2MASK(ins->imm, uint64_t);
713 
714 	st = bvf->evst;
715 	rd = st->rv + ins->dst_reg;
716 
717 	err = eval_defined(rd, NULL);
718 	if (err != NULL)
719 		return err;
720 
721 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
722 	if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
723 		eval_max_bound(rd, msk);
724 	else
725 		eval_apply_mask(rd, msk);
726 #else
727 	if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
728 		eval_max_bound(rd, msk);
729 	else
730 		eval_apply_mask(rd, msk);
731 #endif
732 
733 	return NULL;
734 }
735 
736 static const char *
737 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
738 	uint32_t align, int16_t off)
739 {
740 	struct bpf_reg_val rv;
741 
742 	/* calculate reg + offset */
743 	eval_fill_imm(&rv, rm->mask, off);
744 	eval_add(rm, &rv, rm->mask);
745 
746 	if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
747 		return "destination is not a pointer";
748 
749 	if (rm->mask != UINT64_MAX)
750 		return "pointer truncation";
751 
752 	if (rm->u.max + opsz > rm->v.size ||
753 			(uint64_t)rm->s.max + opsz > rm->v.size ||
754 			rm->s.min < 0)
755 		return "memory boundary violation";
756 
757 	if (rm->u.max % align !=  0)
758 		return "unaligned memory access";
759 
760 	if (rm->v.type == BPF_ARG_PTR_STACK) {
761 
762 		if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
763 				rm->u.max != (uint64_t)rm->s.max)
764 			return "stack access with variable offset";
765 
766 		bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
767 
768 	/* pointer to mbuf */
769 	} else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
770 
771 		if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
772 				rm->u.max != (uint64_t)rm->s.max)
773 			return "mbuf access with variable offset";
774 	}
775 
776 	return NULL;
777 }
778 
779 static void
780 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
781 {
782 	eval_umax_bound(rv, mask);
783 
784 	/* full 64-bit load */
785 	if (mask == UINT64_MAX)
786 		eval_smax_bound(rv, mask);
787 
788 	/* zero-extend load */
789 	rv->s.min = rv->u.min;
790 	rv->s.max = rv->u.max;
791 }
792 
793 
794 static const char *
795 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
796 {
797 	uint32_t opsz;
798 	uint64_t msk;
799 	const char *err;
800 	struct bpf_eval_state *st;
801 	struct bpf_reg_val *rd, rs;
802 	const struct bpf_reg_val *sv;
803 
804 	st = bvf->evst;
805 	rd = st->rv + ins->dst_reg;
806 	rs = st->rv[ins->src_reg];
807 	opsz = bpf_size(BPF_SIZE(ins->code));
808 	msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
809 
810 	err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
811 	if (err != NULL)
812 		return err;
813 
814 	if (rs.v.type == BPF_ARG_PTR_STACK) {
815 
816 		sv = st->sv + rs.u.max / sizeof(uint64_t);
817 		if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
818 			return "undefined value on the stack";
819 
820 		*rd = *sv;
821 
822 	/* pointer to mbuf */
823 	} else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
824 
825 		if (rs.u.max == offsetof(struct rte_mbuf, next)) {
826 			eval_fill_imm(rd, msk, 0);
827 			rd->v = rs.v;
828 		} else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
829 			eval_fill_imm(rd, msk, 0);
830 			rd->v.type = RTE_BPF_ARG_PTR;
831 			rd->v.size = rs.v.buf_size;
832 		} else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
833 			eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
834 			rd->v.type = RTE_BPF_ARG_RAW;
835 		} else {
836 			eval_max_load(rd, msk);
837 			rd->v.type = RTE_BPF_ARG_RAW;
838 		}
839 
840 	/* pointer to raw data */
841 	} else {
842 		eval_max_load(rd, msk);
843 		rd->v.type = RTE_BPF_ARG_RAW;
844 	}
845 
846 	return NULL;
847 }
848 
849 static const char *
850 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
851 {
852 	uint32_t i;
853 
854 	static const struct {
855 		size_t off;
856 		size_t sz;
857 	} mbuf_ro_fileds[] = {
858 		{ .off = offsetof(struct rte_mbuf, buf_addr), },
859 		{ .off = offsetof(struct rte_mbuf, refcnt), },
860 		{ .off = offsetof(struct rte_mbuf, nb_segs), },
861 		{ .off = offsetof(struct rte_mbuf, buf_len), },
862 		{ .off = offsetof(struct rte_mbuf, pool), },
863 		{ .off = offsetof(struct rte_mbuf, next), },
864 		{ .off = offsetof(struct rte_mbuf, priv_size), },
865 	};
866 
867 	for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
868 			(mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
869 			rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
870 			i++)
871 		;
872 
873 	if (i != RTE_DIM(mbuf_ro_fileds))
874 		return "store to the read-only mbuf field";
875 
876 	return NULL;
877 
878 }
879 
880 static const char *
881 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
882 {
883 	uint32_t opsz;
884 	uint64_t msk;
885 	const char *err;
886 	struct bpf_eval_state *st;
887 	struct bpf_reg_val rd, rs, *sv;
888 
889 	opsz = bpf_size(BPF_SIZE(ins->code));
890 	msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
891 
892 	st = bvf->evst;
893 	rd = st->rv[ins->dst_reg];
894 
895 	if (BPF_CLASS(ins->code) == BPF_STX) {
896 		rs = st->rv[ins->src_reg];
897 		eval_apply_mask(&rs, msk);
898 	} else
899 		eval_fill_imm(&rs, msk, ins->imm);
900 
901 	err = eval_defined(NULL, &rs);
902 	if (err != NULL)
903 		return err;
904 
905 	err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
906 	if (err != NULL)
907 		return err;
908 
909 	if (rd.v.type == BPF_ARG_PTR_STACK) {
910 
911 		sv = st->sv + rd.u.max / sizeof(uint64_t);
912 		if (BPF_CLASS(ins->code) == BPF_STX &&
913 				BPF_MODE(ins->code) == EBPF_XADD)
914 			eval_max_bound(sv, msk);
915 		else
916 			*sv = rs;
917 
918 	/* pointer to mbuf */
919 	} else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
920 		err = eval_mbuf_store(&rd, opsz);
921 		if (err != NULL)
922 			return err;
923 	}
924 
925 	return NULL;
926 }
927 
928 static const char *
929 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
930 	struct bpf_reg_val *rv)
931 {
932 	uint32_t i, n;
933 	struct bpf_eval_state *st;
934 	const char *err;
935 
936 	st = bvf->evst;
937 
938 	if (rv->v.type == RTE_BPF_ARG_UNDEF)
939 		return "Undefined argument type";
940 
941 	if (arg->type != rv->v.type &&
942 			arg->type != RTE_BPF_ARG_RAW &&
943 			(arg->type != RTE_BPF_ARG_PTR ||
944 			RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
945 		return "Invalid argument type";
946 
947 	err = NULL;
948 
949 	/* argument is a pointer */
950 	if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
951 
952 		err = eval_ptr(bvf, rv, arg->size, 1, 0);
953 
954 		/*
955 		 * pointer to the variable on the stack is passed
956 		 * as an argument, mark stack space it occupies as initialized.
957 		 */
958 		if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
959 
960 			i = rv->u.max / sizeof(uint64_t);
961 			n = i + arg->size / sizeof(uint64_t);
962 			while (i != n) {
963 				eval_fill_max_bound(st->sv + i, UINT64_MAX);
964 				i++;
965 			};
966 		}
967 	}
968 
969 	return err;
970 }
971 
972 static const char *
973 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
974 {
975 	uint32_t i, idx;
976 	struct bpf_reg_val *rv;
977 	const struct rte_bpf_xsym *xsym;
978 	const char *err;
979 
980 	idx = ins->imm;
981 
982 	if (idx >= bvf->prm->nb_xsym ||
983 			bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
984 		return "invalid external function index";
985 
986 	/* for now don't support function calls on 32 bit platform */
987 	if (sizeof(uint64_t) != sizeof(uintptr_t))
988 		return "function calls are supported only for 64 bit apps";
989 
990 	xsym = bvf->prm->xsym + idx;
991 
992 	/* evaluate function arguments */
993 	err = NULL;
994 	for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
995 		err = eval_func_arg(bvf, xsym->func.args + i,
996 			bvf->evst->rv + EBPF_REG_1 + i);
997 	}
998 
999 	/* R1-R5 argument/scratch registers */
1000 	for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
1001 		bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
1002 
1003 	/* update return value */
1004 
1005 	rv = bvf->evst->rv + EBPF_REG_0;
1006 	rv->v = xsym->func.ret;
1007 	if (rv->v.type == RTE_BPF_ARG_RAW)
1008 		eval_fill_max_bound(rv,
1009 			RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
1010 	else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
1011 		eval_fill_imm64(rv, UINTPTR_MAX, 0);
1012 
1013 	return err;
1014 }
1015 
1016 static void
1017 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
1018 {
1019 	/* sreg is constant */
1020 	if (trs->u.min == trs->u.max) {
1021 		trd->u = trs->u;
1022 	/* dreg is constant */
1023 	} else if (trd->u.min == trd->u.max) {
1024 		trs->u = trd->u;
1025 	} else {
1026 		trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
1027 		trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
1028 		trs->u = trd->u;
1029 	}
1030 
1031 	/* sreg is constant */
1032 	if (trs->s.min == trs->s.max) {
1033 		trd->s = trs->s;
1034 	/* dreg is constant */
1035 	} else if (trd->s.min == trd->s.max) {
1036 		trs->s = trd->s;
1037 	} else {
1038 		trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
1039 		trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1040 		trs->s = trd->s;
1041 	}
1042 }
1043 
1044 static void
1045 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1046 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1047 {
1048 	frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1049 	trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1050 }
1051 
1052 static void
1053 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1054 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1055 {
1056 	frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1057 	trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1058 }
1059 
1060 static void
1061 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1062 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1063 {
1064 	frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1065 	trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1066 }
1067 
1068 static void
1069 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1070 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1071 {
1072 	frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1073 	trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1074 }
1075 
1076 static const char *
1077 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1078 {
1079 	uint32_t op;
1080 	const char *err;
1081 	struct bpf_eval_state *fst, *tst;
1082 	struct bpf_reg_val *frd, *frs, *trd, *trs;
1083 	struct bpf_reg_val rvf, rvt;
1084 
1085 	tst = bvf->evst;
1086 	fst = bvf->evin->evst;
1087 
1088 	frd = fst->rv + ins->dst_reg;
1089 	trd = tst->rv + ins->dst_reg;
1090 
1091 	if (BPF_SRC(ins->code) == BPF_X) {
1092 		frs = fst->rv + ins->src_reg;
1093 		trs = tst->rv + ins->src_reg;
1094 	} else {
1095 		frs = &rvf;
1096 		trs = &rvt;
1097 		eval_fill_imm(frs, UINT64_MAX, ins->imm);
1098 		eval_fill_imm(trs, UINT64_MAX, ins->imm);
1099 	}
1100 
1101 	err = eval_defined(trd, trs);
1102 	if (err != NULL)
1103 		return err;
1104 
1105 	op = BPF_OP(ins->code);
1106 
1107 	if (op == BPF_JEQ)
1108 		eval_jeq_jne(trd, trs);
1109 	else if (op == EBPF_JNE)
1110 		eval_jeq_jne(frd, frs);
1111 	else if (op == BPF_JGT)
1112 		eval_jgt_jle(trd, trs, frd, frs);
1113 	else if (op == EBPF_JLE)
1114 		eval_jgt_jle(frd, frs, trd, trs);
1115 	else if (op == EBPF_JLT)
1116 		eval_jlt_jge(trd, trs, frd, frs);
1117 	else if (op == BPF_JGE)
1118 		eval_jlt_jge(frd, frs, trd, trs);
1119 	else if (op == EBPF_JSGT)
1120 		eval_jsgt_jsle(trd, trs, frd, frs);
1121 	else if (op == EBPF_JSLE)
1122 		eval_jsgt_jsle(frd, frs, trd, trs);
1123 	else if (op == EBPF_JSLT)
1124 		eval_jslt_jsge(trd, trs, frd, frs);
1125 	else if (op == EBPF_JSGE)
1126 		eval_jslt_jsge(frd, frs, trd, trs);
1127 
1128 	return NULL;
1129 }
1130 
1131 /*
1132  * validate parameters for each instruction type.
1133  */
1134 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1135 	/* ALU IMM 32-bit instructions */
1136 	[(BPF_ALU | BPF_ADD | BPF_K)] = {
1137 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1138 		.off = { .min = 0, .max = 0},
1139 		.imm = { .min = 0, .max = UINT32_MAX,},
1140 		.eval = eval_alu,
1141 	},
1142 	[(BPF_ALU | BPF_SUB | BPF_K)] = {
1143 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1144 		.off = { .min = 0, .max = 0},
1145 		.imm = { .min = 0, .max = UINT32_MAX,},
1146 		.eval = eval_alu,
1147 	},
1148 	[(BPF_ALU | BPF_AND | BPF_K)] = {
1149 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1150 		.off = { .min = 0, .max = 0},
1151 		.imm = { .min = 0, .max = UINT32_MAX,},
1152 		.eval = eval_alu,
1153 	},
1154 	[(BPF_ALU | BPF_OR | BPF_K)] = {
1155 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1156 		.off = { .min = 0, .max = 0},
1157 		.imm = { .min = 0, .max = UINT32_MAX,},
1158 		.eval = eval_alu,
1159 	},
1160 	[(BPF_ALU | BPF_LSH | BPF_K)] = {
1161 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1162 		.off = { .min = 0, .max = 0},
1163 		.imm = { .min = 0, .max = UINT32_MAX,},
1164 		.eval = eval_alu,
1165 	},
1166 	[(BPF_ALU | BPF_RSH | BPF_K)] = {
1167 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1168 		.off = { .min = 0, .max = 0},
1169 		.imm = { .min = 0, .max = UINT32_MAX,},
1170 		.eval = eval_alu,
1171 	},
1172 	[(BPF_ALU | BPF_XOR | BPF_K)] = {
1173 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1174 		.off = { .min = 0, .max = 0},
1175 		.imm = { .min = 0, .max = UINT32_MAX,},
1176 		.eval = eval_alu,
1177 	},
1178 	[(BPF_ALU | BPF_MUL | BPF_K)] = {
1179 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1180 		.off = { .min = 0, .max = 0},
1181 		.imm = { .min = 0, .max = UINT32_MAX,},
1182 		.eval = eval_alu,
1183 	},
1184 	[(BPF_ALU | EBPF_MOV | BPF_K)] = {
1185 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1186 		.off = { .min = 0, .max = 0},
1187 		.imm = { .min = 0, .max = UINT32_MAX,},
1188 		.eval = eval_alu,
1189 	},
1190 	[(BPF_ALU | BPF_DIV | BPF_K)] = {
1191 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1192 		.off = { .min = 0, .max = 0},
1193 		.imm = { .min = 1, .max = UINT32_MAX},
1194 		.eval = eval_alu,
1195 	},
1196 	[(BPF_ALU | BPF_MOD | BPF_K)] = {
1197 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1198 		.off = { .min = 0, .max = 0},
1199 		.imm = { .min = 1, .max = UINT32_MAX},
1200 		.eval = eval_alu,
1201 	},
1202 	/* ALU IMM 64-bit instructions */
1203 	[(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1204 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1205 		.off = { .min = 0, .max = 0},
1206 		.imm = { .min = 0, .max = UINT32_MAX,},
1207 		.eval = eval_alu,
1208 	},
1209 	[(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1210 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1211 		.off = { .min = 0, .max = 0},
1212 		.imm = { .min = 0, .max = UINT32_MAX,},
1213 		.eval = eval_alu,
1214 	},
1215 	[(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1216 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1217 		.off = { .min = 0, .max = 0},
1218 		.imm = { .min = 0, .max = UINT32_MAX,},
1219 		.eval = eval_alu,
1220 	},
1221 	[(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1222 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1223 		.off = { .min = 0, .max = 0},
1224 		.imm = { .min = 0, .max = UINT32_MAX,},
1225 		.eval = eval_alu,
1226 	},
1227 	[(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1228 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1229 		.off = { .min = 0, .max = 0},
1230 		.imm = { .min = 0, .max = UINT32_MAX,},
1231 		.eval = eval_alu,
1232 	},
1233 	[(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1234 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1235 		.off = { .min = 0, .max = 0},
1236 		.imm = { .min = 0, .max = UINT32_MAX,},
1237 		.eval = eval_alu,
1238 	},
1239 	[(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1240 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1241 		.off = { .min = 0, .max = 0},
1242 		.imm = { .min = 0, .max = UINT32_MAX,},
1243 		.eval = eval_alu,
1244 	},
1245 	[(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1246 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1247 		.off = { .min = 0, .max = 0},
1248 		.imm = { .min = 0, .max = UINT32_MAX,},
1249 		.eval = eval_alu,
1250 	},
1251 	[(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1252 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1253 		.off = { .min = 0, .max = 0},
1254 		.imm = { .min = 0, .max = UINT32_MAX,},
1255 		.eval = eval_alu,
1256 	},
1257 	[(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1258 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1259 		.off = { .min = 0, .max = 0},
1260 		.imm = { .min = 0, .max = UINT32_MAX,},
1261 		.eval = eval_alu,
1262 	},
1263 	[(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1264 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1265 		.off = { .min = 0, .max = 0},
1266 		.imm = { .min = 1, .max = UINT32_MAX},
1267 		.eval = eval_alu,
1268 	},
1269 	[(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1270 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1271 		.off = { .min = 0, .max = 0},
1272 		.imm = { .min = 1, .max = UINT32_MAX},
1273 		.eval = eval_alu,
1274 	},
1275 	/* ALU REG 32-bit instructions */
1276 	[(BPF_ALU | BPF_ADD | BPF_X)] = {
1277 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1278 		.off = { .min = 0, .max = 0},
1279 		.imm = { .min = 0, .max = 0},
1280 		.eval = eval_alu,
1281 	},
1282 	[(BPF_ALU | BPF_SUB | BPF_X)] = {
1283 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1284 		.off = { .min = 0, .max = 0},
1285 		.imm = { .min = 0, .max = 0},
1286 		.eval = eval_alu,
1287 	},
1288 	[(BPF_ALU | BPF_AND | BPF_X)] = {
1289 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1290 		.off = { .min = 0, .max = 0},
1291 		.imm = { .min = 0, .max = 0},
1292 		.eval = eval_alu,
1293 	},
1294 	[(BPF_ALU | BPF_OR | BPF_X)] = {
1295 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1296 		.off = { .min = 0, .max = 0},
1297 		.imm = { .min = 0, .max = 0},
1298 		.eval = eval_alu,
1299 	},
1300 	[(BPF_ALU | BPF_LSH | BPF_X)] = {
1301 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1302 		.off = { .min = 0, .max = 0},
1303 		.imm = { .min = 0, .max = 0},
1304 		.eval = eval_alu,
1305 	},
1306 	[(BPF_ALU | BPF_RSH | BPF_X)] = {
1307 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1308 		.off = { .min = 0, .max = 0},
1309 		.imm = { .min = 0, .max = 0},
1310 		.eval = eval_alu,
1311 	},
1312 	[(BPF_ALU | BPF_XOR | BPF_X)] = {
1313 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1314 		.off = { .min = 0, .max = 0},
1315 		.imm = { .min = 0, .max = 0},
1316 		.eval = eval_alu,
1317 	},
1318 	[(BPF_ALU | BPF_MUL | BPF_X)] = {
1319 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1320 		.off = { .min = 0, .max = 0},
1321 		.imm = { .min = 0, .max = 0},
1322 		.eval = eval_alu,
1323 	},
1324 	[(BPF_ALU | BPF_DIV | BPF_X)] = {
1325 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1326 		.off = { .min = 0, .max = 0},
1327 		.imm = { .min = 0, .max = 0},
1328 		.eval = eval_alu,
1329 	},
1330 	[(BPF_ALU | BPF_MOD | BPF_X)] = {
1331 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1332 		.off = { .min = 0, .max = 0},
1333 		.imm = { .min = 0, .max = 0},
1334 		.eval = eval_alu,
1335 	},
1336 	[(BPF_ALU | EBPF_MOV | BPF_X)] = {
1337 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1338 		.off = { .min = 0, .max = 0},
1339 		.imm = { .min = 0, .max = 0},
1340 		.eval = eval_alu,
1341 	},
1342 	[(BPF_ALU | BPF_NEG)] = {
1343 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1344 		.off = { .min = 0, .max = 0},
1345 		.imm = { .min = 0, .max = 0},
1346 		.eval = eval_alu,
1347 	},
1348 	[(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1349 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1350 		.off = { .min = 0, .max = 0},
1351 		.imm = { .min = 16, .max = 64},
1352 		.check = check_alu_bele,
1353 		.eval = eval_bele,
1354 	},
1355 	[(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1356 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1357 		.off = { .min = 0, .max = 0},
1358 		.imm = { .min = 16, .max = 64},
1359 		.check = check_alu_bele,
1360 		.eval = eval_bele,
1361 	},
1362 	/* ALU REG 64-bit instructions */
1363 	[(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1364 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1365 		.off = { .min = 0, .max = 0},
1366 		.imm = { .min = 0, .max = 0},
1367 		.eval = eval_alu,
1368 	},
1369 	[(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1370 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1371 		.off = { .min = 0, .max = 0},
1372 		.imm = { .min = 0, .max = 0},
1373 		.eval = eval_alu,
1374 	},
1375 	[(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1376 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1377 		.off = { .min = 0, .max = 0},
1378 		.imm = { .min = 0, .max = 0},
1379 		.eval = eval_alu,
1380 	},
1381 	[(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1382 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1383 		.off = { .min = 0, .max = 0},
1384 		.imm = { .min = 0, .max = 0},
1385 		.eval = eval_alu,
1386 	},
1387 	[(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1388 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1389 		.off = { .min = 0, .max = 0},
1390 		.imm = { .min = 0, .max = 0},
1391 		.eval = eval_alu,
1392 	},
1393 	[(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1394 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1395 		.off = { .min = 0, .max = 0},
1396 		.imm = { .min = 0, .max = 0},
1397 		.eval = eval_alu,
1398 	},
1399 	[(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1400 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1401 		.off = { .min = 0, .max = 0},
1402 		.imm = { .min = 0, .max = 0},
1403 		.eval = eval_alu,
1404 	},
1405 	[(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1406 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1407 		.off = { .min = 0, .max = 0},
1408 		.imm = { .min = 0, .max = 0},
1409 		.eval = eval_alu,
1410 	},
1411 	[(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1412 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1413 		.off = { .min = 0, .max = 0},
1414 		.imm = { .min = 0, .max = 0},
1415 		.eval = eval_alu,
1416 	},
1417 	[(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1418 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1419 		.off = { .min = 0, .max = 0},
1420 		.imm = { .min = 0, .max = 0},
1421 		.eval = eval_alu,
1422 	},
1423 	[(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1424 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1425 		.off = { .min = 0, .max = 0},
1426 		.imm = { .min = 0, .max = 0},
1427 		.eval = eval_alu,
1428 	},
1429 	[(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1430 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1431 		.off = { .min = 0, .max = 0},
1432 		.imm = { .min = 0, .max = 0},
1433 		.eval = eval_alu,
1434 	},
1435 	[(EBPF_ALU64 | BPF_NEG)] = {
1436 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1437 		.off = { .min = 0, .max = 0},
1438 		.imm = { .min = 0, .max = 0},
1439 		.eval = eval_alu,
1440 	},
1441 	/* load instructions */
1442 	[(BPF_LDX | BPF_MEM | BPF_B)] = {
1443 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1444 		.off = { .min = 0, .max = UINT16_MAX},
1445 		.imm = { .min = 0, .max = 0},
1446 		.eval = eval_load,
1447 	},
1448 	[(BPF_LDX | BPF_MEM | BPF_H)] = {
1449 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1450 		.off = { .min = 0, .max = UINT16_MAX},
1451 		.imm = { .min = 0, .max = 0},
1452 		.eval = eval_load,
1453 	},
1454 	[(BPF_LDX | BPF_MEM | BPF_W)] = {
1455 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1456 		.off = { .min = 0, .max = UINT16_MAX},
1457 		.imm = { .min = 0, .max = 0},
1458 		.eval = eval_load,
1459 	},
1460 	[(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1461 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1462 		.off = { .min = 0, .max = UINT16_MAX},
1463 		.imm = { .min = 0, .max = 0},
1464 		.eval = eval_load,
1465 	},
1466 	/* load 64 bit immediate value */
1467 	[(BPF_LD | BPF_IMM | EBPF_DW)] = {
1468 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1469 		.off = { .min = 0, .max = 0},
1470 		.imm = { .min = 0, .max = UINT32_MAX},
1471 		.eval = eval_ld_imm64,
1472 	},
1473 	/* load absolute instructions */
1474 	[(BPF_LD | BPF_ABS | BPF_B)] = {
1475 		.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1476 		.off = { .min = 0, .max = 0},
1477 		.imm = { .min = 0, .max = INT32_MAX},
1478 		.eval = eval_ld_mbuf,
1479 	},
1480 	[(BPF_LD | BPF_ABS | BPF_H)] = {
1481 		.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1482 		.off = { .min = 0, .max = 0},
1483 		.imm = { .min = 0, .max = INT32_MAX},
1484 		.eval = eval_ld_mbuf,
1485 	},
1486 	[(BPF_LD | BPF_ABS | BPF_W)] = {
1487 		.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1488 		.off = { .min = 0, .max = 0},
1489 		.imm = { .min = 0, .max = INT32_MAX},
1490 		.eval = eval_ld_mbuf,
1491 	},
1492 	/* load indirect instructions */
1493 	[(BPF_LD | BPF_IND | BPF_B)] = {
1494 		.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1495 		.off = { .min = 0, .max = 0},
1496 		.imm = { .min = 0, .max = UINT32_MAX},
1497 		.eval = eval_ld_mbuf,
1498 	},
1499 	[(BPF_LD | BPF_IND | BPF_H)] = {
1500 		.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1501 		.off = { .min = 0, .max = 0},
1502 		.imm = { .min = 0, .max = UINT32_MAX},
1503 		.eval = eval_ld_mbuf,
1504 	},
1505 	[(BPF_LD | BPF_IND | BPF_W)] = {
1506 		.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1507 		.off = { .min = 0, .max = 0},
1508 		.imm = { .min = 0, .max = UINT32_MAX},
1509 		.eval = eval_ld_mbuf,
1510 	},
1511 	/* store REG instructions */
1512 	[(BPF_STX | BPF_MEM | BPF_B)] = {
1513 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1514 		.off = { .min = 0, .max = UINT16_MAX},
1515 		.imm = { .min = 0, .max = 0},
1516 		.eval = eval_store,
1517 	},
1518 	[(BPF_STX | BPF_MEM | BPF_H)] = {
1519 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1520 		.off = { .min = 0, .max = UINT16_MAX},
1521 		.imm = { .min = 0, .max = 0},
1522 		.eval = eval_store,
1523 	},
1524 	[(BPF_STX | BPF_MEM | BPF_W)] = {
1525 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1526 		.off = { .min = 0, .max = UINT16_MAX},
1527 		.imm = { .min = 0, .max = 0},
1528 		.eval = eval_store,
1529 	},
1530 	[(BPF_STX | BPF_MEM | EBPF_DW)] = {
1531 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1532 		.off = { .min = 0, .max = UINT16_MAX},
1533 		.imm = { .min = 0, .max = 0},
1534 		.eval = eval_store,
1535 	},
1536 	/* atomic add instructions */
1537 	[(BPF_STX | EBPF_XADD | BPF_W)] = {
1538 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1539 		.off = { .min = 0, .max = UINT16_MAX},
1540 		.imm = { .min = 0, .max = 0},
1541 		.eval = eval_store,
1542 	},
1543 	[(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1544 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1545 		.off = { .min = 0, .max = UINT16_MAX},
1546 		.imm = { .min = 0, .max = 0},
1547 		.eval = eval_store,
1548 	},
1549 	/* store IMM instructions */
1550 	[(BPF_ST | BPF_MEM | BPF_B)] = {
1551 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1552 		.off = { .min = 0, .max = UINT16_MAX},
1553 		.imm = { .min = 0, .max = UINT32_MAX},
1554 		.eval = eval_store,
1555 	},
1556 	[(BPF_ST | BPF_MEM | BPF_H)] = {
1557 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1558 		.off = { .min = 0, .max = UINT16_MAX},
1559 		.imm = { .min = 0, .max = UINT32_MAX},
1560 		.eval = eval_store,
1561 	},
1562 	[(BPF_ST | BPF_MEM | BPF_W)] = {
1563 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1564 		.off = { .min = 0, .max = UINT16_MAX},
1565 		.imm = { .min = 0, .max = UINT32_MAX},
1566 		.eval = eval_store,
1567 	},
1568 	[(BPF_ST | BPF_MEM | EBPF_DW)] = {
1569 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1570 		.off = { .min = 0, .max = UINT16_MAX},
1571 		.imm = { .min = 0, .max = UINT32_MAX},
1572 		.eval = eval_store,
1573 	},
1574 	/* jump instruction */
1575 	[(BPF_JMP | BPF_JA)] = {
1576 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1577 		.off = { .min = 0, .max = UINT16_MAX},
1578 		.imm = { .min = 0, .max = 0},
1579 	},
1580 	/* jcc IMM instructions */
1581 	[(BPF_JMP | BPF_JEQ | BPF_K)] = {
1582 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1583 		.off = { .min = 0, .max = UINT16_MAX},
1584 		.imm = { .min = 0, .max = UINT32_MAX},
1585 		.eval = eval_jcc,
1586 	},
1587 	[(BPF_JMP | EBPF_JNE | BPF_K)] = {
1588 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1589 		.off = { .min = 0, .max = UINT16_MAX},
1590 		.imm = { .min = 0, .max = UINT32_MAX},
1591 		.eval = eval_jcc,
1592 	},
1593 	[(BPF_JMP | BPF_JGT | BPF_K)] = {
1594 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1595 		.off = { .min = 0, .max = UINT16_MAX},
1596 		.imm = { .min = 0, .max = UINT32_MAX},
1597 		.eval = eval_jcc,
1598 	},
1599 	[(BPF_JMP | EBPF_JLT | BPF_K)] = {
1600 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1601 		.off = { .min = 0, .max = UINT16_MAX},
1602 		.imm = { .min = 0, .max = UINT32_MAX},
1603 		.eval = eval_jcc,
1604 	},
1605 	[(BPF_JMP | BPF_JGE | BPF_K)] = {
1606 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1607 		.off = { .min = 0, .max = UINT16_MAX},
1608 		.imm = { .min = 0, .max = UINT32_MAX},
1609 		.eval = eval_jcc,
1610 	},
1611 	[(BPF_JMP | EBPF_JLE | BPF_K)] = {
1612 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1613 		.off = { .min = 0, .max = UINT16_MAX},
1614 		.imm = { .min = 0, .max = UINT32_MAX},
1615 		.eval = eval_jcc,
1616 	},
1617 	[(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1618 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1619 		.off = { .min = 0, .max = UINT16_MAX},
1620 		.imm = { .min = 0, .max = UINT32_MAX},
1621 		.eval = eval_jcc,
1622 	},
1623 	[(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1624 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1625 		.off = { .min = 0, .max = UINT16_MAX},
1626 		.imm = { .min = 0, .max = UINT32_MAX},
1627 		.eval = eval_jcc,
1628 	},
1629 	[(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1630 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1631 		.off = { .min = 0, .max = UINT16_MAX},
1632 		.imm = { .min = 0, .max = UINT32_MAX},
1633 		.eval = eval_jcc,
1634 	},
1635 	[(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1636 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1637 		.off = { .min = 0, .max = UINT16_MAX},
1638 		.imm = { .min = 0, .max = UINT32_MAX},
1639 		.eval = eval_jcc,
1640 	},
1641 	[(BPF_JMP | BPF_JSET | BPF_K)] = {
1642 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1643 		.off = { .min = 0, .max = UINT16_MAX},
1644 		.imm = { .min = 0, .max = UINT32_MAX},
1645 		.eval = eval_jcc,
1646 	},
1647 	/* jcc REG instructions */
1648 	[(BPF_JMP | BPF_JEQ | BPF_X)] = {
1649 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1650 		.off = { .min = 0, .max = UINT16_MAX},
1651 		.imm = { .min = 0, .max = 0},
1652 		.eval = eval_jcc,
1653 	},
1654 	[(BPF_JMP | EBPF_JNE | BPF_X)] = {
1655 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1656 		.off = { .min = 0, .max = UINT16_MAX},
1657 		.imm = { .min = 0, .max = 0},
1658 		.eval = eval_jcc,
1659 	},
1660 	[(BPF_JMP | BPF_JGT | BPF_X)] = {
1661 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1662 		.off = { .min = 0, .max = UINT16_MAX},
1663 		.imm = { .min = 0, .max = 0},
1664 		.eval = eval_jcc,
1665 	},
1666 	[(BPF_JMP | EBPF_JLT | BPF_X)] = {
1667 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1668 		.off = { .min = 0, .max = UINT16_MAX},
1669 		.imm = { .min = 0, .max = 0},
1670 		.eval = eval_jcc,
1671 	},
1672 	[(BPF_JMP | BPF_JGE | BPF_X)] = {
1673 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1674 		.off = { .min = 0, .max = UINT16_MAX},
1675 		.imm = { .min = 0, .max = 0},
1676 		.eval = eval_jcc,
1677 	},
1678 	[(BPF_JMP | EBPF_JLE | BPF_X)] = {
1679 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1680 		.off = { .min = 0, .max = UINT16_MAX},
1681 		.imm = { .min = 0, .max = 0},
1682 		.eval = eval_jcc,
1683 	},
1684 	[(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1685 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1686 		.off = { .min = 0, .max = UINT16_MAX},
1687 		.imm = { .min = 0, .max = 0},
1688 		.eval = eval_jcc,
1689 	},
1690 	[(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1691 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1692 		.off = { .min = 0, .max = UINT16_MAX},
1693 		.imm = { .min = 0, .max = 0},
1694 	},
1695 	[(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1696 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1697 		.off = { .min = 0, .max = UINT16_MAX},
1698 		.imm = { .min = 0, .max = 0},
1699 		.eval = eval_jcc,
1700 	},
1701 	[(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1702 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1703 		.off = { .min = 0, .max = UINT16_MAX},
1704 		.imm = { .min = 0, .max = 0},
1705 		.eval = eval_jcc,
1706 	},
1707 	[(BPF_JMP | BPF_JSET | BPF_X)] = {
1708 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1709 		.off = { .min = 0, .max = UINT16_MAX},
1710 		.imm = { .min = 0, .max = 0},
1711 		.eval = eval_jcc,
1712 	},
1713 	/* call instruction */
1714 	[(BPF_JMP | EBPF_CALL)] = {
1715 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1716 		.off = { .min = 0, .max = 0},
1717 		.imm = { .min = 0, .max = UINT32_MAX},
1718 		.eval = eval_call,
1719 	},
1720 	/* ret instruction */
1721 	[(BPF_JMP | EBPF_EXIT)] = {
1722 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1723 		.off = { .min = 0, .max = 0},
1724 		.imm = { .min = 0, .max = 0},
1725 		.eval = eval_exit,
1726 	},
1727 };
1728 
1729 /*
1730  * make sure that instruction syntax is valid,
1731  * and its fields don't violate particular instruction type restrictions.
1732  */
1733 static const char *
1734 check_syntax(const struct ebpf_insn *ins)
1735 {
1736 
1737 	uint8_t op;
1738 	uint16_t off;
1739 	uint32_t imm;
1740 
1741 	op = ins->code;
1742 
1743 	if (ins_chk[op].mask.dreg == 0)
1744 		return "invalid opcode";
1745 
1746 	if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1747 		return "invalid dst-reg field";
1748 
1749 	if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1750 		return "invalid src-reg field";
1751 
1752 	off = ins->off;
1753 	if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1754 		return "invalid off field";
1755 
1756 	imm = ins->imm;
1757 	if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1758 		return "invalid imm field";
1759 
1760 	if (ins_chk[op].check != NULL)
1761 		return ins_chk[op].check(ins);
1762 
1763 	return NULL;
1764 }
1765 
1766 /*
1767  * helper function, return instruction index for the given node.
1768  */
1769 static uint32_t
1770 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1771 {
1772 	return node - bvf->in;
1773 }
1774 
1775 /*
1776  * helper function, used to walk through constructed CFG.
1777  */
1778 static struct inst_node *
1779 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1780 {
1781 	uint32_t ce, ne, dst;
1782 
1783 	ne = node->nb_edge;
1784 	ce = node->cur_edge;
1785 	if (ce == ne)
1786 		return NULL;
1787 
1788 	node->cur_edge++;
1789 	dst = node->edge_dest[ce];
1790 	return bvf->in + dst;
1791 }
1792 
1793 static void
1794 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1795 	uint32_t new)
1796 {
1797 	uint32_t prev;
1798 
1799 	prev = node->colour;
1800 	node->colour = new;
1801 
1802 	bvf->node_colour[prev]--;
1803 	bvf->node_colour[new]++;
1804 }
1805 
1806 /*
1807  * helper function, add new edge between two nodes.
1808  */
1809 static int
1810 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1811 {
1812 	uint32_t ne;
1813 
1814 	if (nidx > bvf->prm->nb_ins) {
1815 		RTE_BPF_LOG_LINE(ERR, "%s: program boundary violation at pc: %u, "
1816 			"next pc: %u",
1817 			__func__, get_node_idx(bvf, node), nidx);
1818 		return -EINVAL;
1819 	}
1820 
1821 	ne = node->nb_edge;
1822 	if (ne >= RTE_DIM(node->edge_dest)) {
1823 		RTE_BPF_LOG_LINE(ERR, "%s: internal error at pc: %u",
1824 			__func__, get_node_idx(bvf, node));
1825 		return -EINVAL;
1826 	}
1827 
1828 	node->edge_dest[ne] = nidx;
1829 	node->nb_edge = ne + 1;
1830 	return 0;
1831 }
1832 
1833 /*
1834  * helper function, determine type of edge between two nodes.
1835  */
1836 static void
1837 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1838 	const struct inst_node *next)
1839 {
1840 	uint32_t ce, clr, type;
1841 
1842 	ce = node->cur_edge - 1;
1843 	clr = next->colour;
1844 
1845 	type = UNKNOWN_EDGE;
1846 
1847 	if (clr == WHITE)
1848 		type = TREE_EDGE;
1849 	else if (clr == GREY)
1850 		type = BACK_EDGE;
1851 	else if (clr == BLACK)
1852 		/*
1853 		 * in fact it could be either direct or cross edge,
1854 		 * but for now, we don't need to distinguish between them.
1855 		 */
1856 		type = CROSS_EDGE;
1857 
1858 	node->edge_type[ce] = type;
1859 	bvf->edge_type[type]++;
1860 }
1861 
1862 static struct inst_node *
1863 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1864 {
1865 	return  bvf->in + node->prev_node;
1866 }
1867 
1868 /*
1869  * Depth-First Search (DFS) through previously constructed
1870  * Control Flow Graph (CFG).
1871  * Information collected at this path would be used later
1872  * to determine is there any loops, and/or unreachable instructions.
1873  */
1874 static void
1875 dfs(struct bpf_verifier *bvf)
1876 {
1877 	struct inst_node *next, *node;
1878 
1879 	node = bvf->in;
1880 	while (node != NULL) {
1881 
1882 		if (node->colour == WHITE)
1883 			set_node_colour(bvf, node, GREY);
1884 
1885 		if (node->colour == GREY) {
1886 
1887 			/* find next unprocessed child node */
1888 			do {
1889 				next = get_next_node(bvf, node);
1890 				if (next == NULL)
1891 					break;
1892 				set_edge_type(bvf, node, next);
1893 			} while (next->colour != WHITE);
1894 
1895 			if (next != NULL) {
1896 				/* proceed with next child */
1897 				next->prev_node = get_node_idx(bvf, node);
1898 				node = next;
1899 			} else {
1900 				/*
1901 				 * finished with current node and all it's kids,
1902 				 * proceed with parent
1903 				 */
1904 				set_node_colour(bvf, node, BLACK);
1905 				node->cur_edge = 0;
1906 				node = get_prev_node(bvf, node);
1907 			}
1908 		} else
1909 			node = NULL;
1910 	}
1911 }
1912 
1913 /*
1914  * report unreachable instructions.
1915  */
1916 static void
1917 log_unreachable(const struct bpf_verifier *bvf)
1918 {
1919 	uint32_t i;
1920 	struct inst_node *node;
1921 	const struct ebpf_insn *ins;
1922 
1923 	for (i = 0; i != bvf->prm->nb_ins; i++) {
1924 
1925 		node = bvf->in + i;
1926 		ins = bvf->prm->ins + i;
1927 
1928 		if (node->colour == WHITE &&
1929 				ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1930 			RTE_BPF_LOG_LINE(ERR, "unreachable code at pc: %u;", i);
1931 	}
1932 }
1933 
1934 /*
1935  * report loops detected.
1936  */
1937 static void
1938 log_loop(const struct bpf_verifier *bvf)
1939 {
1940 	uint32_t i, j;
1941 	struct inst_node *node;
1942 
1943 	for (i = 0; i != bvf->prm->nb_ins; i++) {
1944 
1945 		node = bvf->in + i;
1946 		if (node->colour != BLACK)
1947 			continue;
1948 
1949 		for (j = 0; j != node->nb_edge; j++) {
1950 			if (node->edge_type[j] == BACK_EDGE)
1951 				RTE_BPF_LOG_LINE(ERR,
1952 					"loop at pc:%u --> pc:%u;",
1953 					i, node->edge_dest[j]);
1954 		}
1955 	}
1956 }
1957 
1958 /*
1959  * First pass goes though all instructions in the set, checks that each
1960  * instruction is a valid one (correct syntax, valid field values, etc.)
1961  * and constructs control flow graph (CFG).
1962  * Then depth-first search is performed over the constructed graph.
1963  * Programs with unreachable instructions and/or loops will be rejected.
1964  */
1965 static int
1966 validate(struct bpf_verifier *bvf)
1967 {
1968 	int32_t rc;
1969 	uint32_t i;
1970 	struct inst_node *node;
1971 	const struct ebpf_insn *ins;
1972 	const char *err;
1973 
1974 	rc = 0;
1975 	for (i = 0; i < bvf->prm->nb_ins; i++) {
1976 
1977 		ins = bvf->prm->ins + i;
1978 		node = bvf->in + i;
1979 
1980 		err = check_syntax(ins);
1981 		if (err != 0) {
1982 			RTE_BPF_LOG_LINE(ERR, "%s: %s at pc: %u",
1983 				__func__, err, i);
1984 			rc |= -EINVAL;
1985 		}
1986 
1987 		/*
1988 		 * construct CFG, jcc nodes have to outgoing edges,
1989 		 * 'exit' nodes - none, all other nodes have exactly one
1990 		 * outgoing edge.
1991 		 */
1992 		switch (ins->code) {
1993 		case (BPF_JMP | EBPF_EXIT):
1994 			break;
1995 		case (BPF_JMP | BPF_JEQ | BPF_K):
1996 		case (BPF_JMP | EBPF_JNE | BPF_K):
1997 		case (BPF_JMP | BPF_JGT | BPF_K):
1998 		case (BPF_JMP | EBPF_JLT | BPF_K):
1999 		case (BPF_JMP | BPF_JGE | BPF_K):
2000 		case (BPF_JMP | EBPF_JLE | BPF_K):
2001 		case (BPF_JMP | EBPF_JSGT | BPF_K):
2002 		case (BPF_JMP | EBPF_JSLT | BPF_K):
2003 		case (BPF_JMP | EBPF_JSGE | BPF_K):
2004 		case (BPF_JMP | EBPF_JSLE | BPF_K):
2005 		case (BPF_JMP | BPF_JSET | BPF_K):
2006 		case (BPF_JMP | BPF_JEQ | BPF_X):
2007 		case (BPF_JMP | EBPF_JNE | BPF_X):
2008 		case (BPF_JMP | BPF_JGT | BPF_X):
2009 		case (BPF_JMP | EBPF_JLT | BPF_X):
2010 		case (BPF_JMP | BPF_JGE | BPF_X):
2011 		case (BPF_JMP | EBPF_JLE | BPF_X):
2012 		case (BPF_JMP | EBPF_JSGT | BPF_X):
2013 		case (BPF_JMP | EBPF_JSLT | BPF_X):
2014 		case (BPF_JMP | EBPF_JSGE | BPF_X):
2015 		case (BPF_JMP | EBPF_JSLE | BPF_X):
2016 		case (BPF_JMP | BPF_JSET | BPF_X):
2017 			rc |= add_edge(bvf, node, i + ins->off + 1);
2018 			rc |= add_edge(bvf, node, i + 1);
2019 			bvf->nb_jcc_nodes++;
2020 			break;
2021 		case (BPF_JMP | BPF_JA):
2022 			rc |= add_edge(bvf, node, i + ins->off + 1);
2023 			break;
2024 		/* load 64 bit immediate value */
2025 		case (BPF_LD | BPF_IMM | EBPF_DW):
2026 			rc |= add_edge(bvf, node, i + 2);
2027 			i++;
2028 			break;
2029 		case (BPF_LD | BPF_ABS | BPF_B):
2030 		case (BPF_LD | BPF_ABS | BPF_H):
2031 		case (BPF_LD | BPF_ABS | BPF_W):
2032 		case (BPF_LD | BPF_IND | BPF_B):
2033 		case (BPF_LD | BPF_IND | BPF_H):
2034 		case (BPF_LD | BPF_IND | BPF_W):
2035 			bvf->nb_ldmb_nodes++;
2036 			/* fallthrough */
2037 		default:
2038 			rc |= add_edge(bvf, node, i + 1);
2039 			break;
2040 		}
2041 
2042 		bvf->nb_nodes++;
2043 		bvf->node_colour[WHITE]++;
2044 	}
2045 
2046 	if (rc != 0)
2047 		return rc;
2048 
2049 	dfs(bvf);
2050 
2051 	RTE_LOG(DEBUG, BPF, "%s(%p) stats:\n"
2052 		"nb_nodes=%u;\n"
2053 		"nb_jcc_nodes=%u;\n"
2054 		"node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
2055 		"edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
2056 		__func__, bvf,
2057 		bvf->nb_nodes,
2058 		bvf->nb_jcc_nodes,
2059 		bvf->node_colour[WHITE], bvf->node_colour[GREY],
2060 			bvf->node_colour[BLACK],
2061 		bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
2062 		bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
2063 
2064 	if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
2065 		RTE_BPF_LOG_LINE(ERR, "%s(%p) unreachable instructions;",
2066 			__func__, bvf);
2067 		log_unreachable(bvf);
2068 		return -EINVAL;
2069 	}
2070 
2071 	if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
2072 			bvf->edge_type[UNKNOWN_EDGE] != 0) {
2073 		RTE_BPF_LOG_LINE(ERR, "%s(%p) DFS internal error;",
2074 			__func__, bvf);
2075 		return -EINVAL;
2076 	}
2077 
2078 	if (bvf->edge_type[BACK_EDGE] != 0) {
2079 		RTE_BPF_LOG_LINE(ERR, "%s(%p) loops detected;",
2080 			__func__, bvf);
2081 		log_loop(bvf);
2082 		return -EINVAL;
2083 	}
2084 
2085 	return 0;
2086 }
2087 
2088 /*
2089  * helper functions get/free eval states.
2090  */
2091 static struct bpf_eval_state *
2092 pull_eval_state(struct bpf_verifier *bvf)
2093 {
2094 	uint32_t n;
2095 
2096 	n = bvf->evst_pool.cur;
2097 	if (n == bvf->evst_pool.num)
2098 		return NULL;
2099 
2100 	bvf->evst_pool.cur = n + 1;
2101 	return bvf->evst_pool.ent + n;
2102 }
2103 
2104 static void
2105 push_eval_state(struct bpf_verifier *bvf)
2106 {
2107 	bvf->evst_pool.cur--;
2108 }
2109 
2110 static void
2111 evst_pool_fini(struct bpf_verifier *bvf)
2112 {
2113 	bvf->evst = NULL;
2114 	free(bvf->evst_pool.ent);
2115 	memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2116 }
2117 
2118 static int
2119 evst_pool_init(struct bpf_verifier *bvf)
2120 {
2121 	uint32_t n;
2122 
2123 	n = bvf->nb_jcc_nodes + 1;
2124 
2125 	bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2126 	if (bvf->evst_pool.ent == NULL)
2127 		return -ENOMEM;
2128 
2129 	bvf->evst_pool.num = n;
2130 	bvf->evst_pool.cur = 0;
2131 
2132 	bvf->evst = pull_eval_state(bvf);
2133 	return 0;
2134 }
2135 
2136 /*
2137  * Save current eval state.
2138  */
2139 static int
2140 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2141 {
2142 	struct bpf_eval_state *st;
2143 
2144 	/* get new eval_state for this node */
2145 	st = pull_eval_state(bvf);
2146 	if (st == NULL) {
2147 		RTE_BPF_LOG_LINE(ERR,
2148 			"%s: internal error (out of space) at pc: %u",
2149 			__func__, get_node_idx(bvf, node));
2150 		return -ENOMEM;
2151 	}
2152 
2153 	/* make a copy of current state */
2154 	memcpy(st, bvf->evst, sizeof(*st));
2155 
2156 	/* swap current state with new one */
2157 	node->evst = bvf->evst;
2158 	bvf->evst = st;
2159 
2160 	RTE_BPF_LOG_LINE(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;",
2161 		__func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2162 
2163 	return 0;
2164 }
2165 
2166 /*
2167  * Restore previous eval state and mark current eval state as free.
2168  */
2169 static void
2170 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2171 {
2172 	RTE_BPF_LOG_LINE(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;",
2173 		__func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2174 
2175 	bvf->evst = node->evst;
2176 	node->evst = NULL;
2177 	push_eval_state(bvf);
2178 }
2179 
2180 static void
2181 log_dbg_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2182 	uint32_t pc)
2183 {
2184 	const struct bpf_eval_state *st;
2185 	const struct bpf_reg_val *rv;
2186 
2187 	RTE_BPF_LOG_LINE(DEBUG, "%s(pc=%u):", __func__, pc);
2188 
2189 	st = bvf->evst;
2190 	rv = st->rv + ins->dst_reg;
2191 
2192 	RTE_LOG(DEBUG, BPF,
2193 		"r%u={\n"
2194 		"\tv={type=%u, size=%zu},\n"
2195 		"\tmask=0x%" PRIx64 ",\n"
2196 		"\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2197 		"\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2198 		"};\n",
2199 		ins->dst_reg,
2200 		rv->v.type, rv->v.size,
2201 		rv->mask,
2202 		rv->u.min, rv->u.max,
2203 		rv->s.min, rv->s.max);
2204 }
2205 
2206 /*
2207  * Do second pass through CFG and try to evaluate instructions
2208  * via each possible path.
2209  * Right now evaluation functionality is quite limited.
2210  * Still need to add extra checks for:
2211  * - use/return uninitialized registers.
2212  * - use uninitialized data from the stack.
2213  * - memory boundaries violation.
2214  */
2215 static int
2216 evaluate(struct bpf_verifier *bvf)
2217 {
2218 	int32_t rc;
2219 	uint32_t idx, op;
2220 	const char *err;
2221 	const struct ebpf_insn *ins;
2222 	struct inst_node *next, *node;
2223 
2224 	/* initial state of frame pointer */
2225 	static const struct bpf_reg_val rvfp = {
2226 		.v = {
2227 			.type = BPF_ARG_PTR_STACK,
2228 			.size = MAX_BPF_STACK_SIZE,
2229 		},
2230 		.mask = UINT64_MAX,
2231 		.u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2232 		.s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2233 	};
2234 
2235 	bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2236 	bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2237 	if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2238 		eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2239 
2240 	bvf->evst->rv[EBPF_REG_10] = rvfp;
2241 
2242 	ins = bvf->prm->ins;
2243 	node = bvf->in;
2244 	next = node;
2245 	rc = 0;
2246 
2247 	while (node != NULL && rc == 0) {
2248 
2249 		/*
2250 		 * current node evaluation, make sure we evaluate
2251 		 * each node only once.
2252 		 */
2253 		if (next != NULL) {
2254 
2255 			bvf->evin = node;
2256 			idx = get_node_idx(bvf, node);
2257 			op = ins[idx].code;
2258 
2259 			/* for jcc node make a copy of evaluation state */
2260 			if (node->nb_edge > 1)
2261 				rc |= save_eval_state(bvf, node);
2262 
2263 			if (ins_chk[op].eval != NULL && rc == 0) {
2264 				err = ins_chk[op].eval(bvf, ins + idx);
2265 				if (err != NULL) {
2266 					RTE_BPF_LOG_LINE(ERR, "%s: %s at pc: %u",
2267 						__func__, err, idx);
2268 					rc = -EINVAL;
2269 				}
2270 			}
2271 
2272 			log_dbg_eval_state(bvf, ins + idx, idx);
2273 			bvf->evin = NULL;
2274 		}
2275 
2276 		/* proceed through CFG */
2277 		next = get_next_node(bvf, node);
2278 		if (next != NULL) {
2279 
2280 			/* proceed with next child */
2281 			if (node->cur_edge == node->nb_edge &&
2282 					node->evst != NULL)
2283 				restore_eval_state(bvf, node);
2284 
2285 			next->prev_node = get_node_idx(bvf, node);
2286 			node = next;
2287 		} else {
2288 			/*
2289 			 * finished with current node and all it's kids,
2290 			 * proceed with parent
2291 			 */
2292 			node->cur_edge = 0;
2293 			node = get_prev_node(bvf, node);
2294 
2295 			/* finished */
2296 			if (node == bvf->in)
2297 				node = NULL;
2298 		}
2299 	}
2300 
2301 	return rc;
2302 }
2303 
2304 int
2305 __rte_bpf_validate(struct rte_bpf *bpf)
2306 {
2307 	int32_t rc;
2308 	struct bpf_verifier bvf;
2309 
2310 	/* check input argument type, don't allow mbuf ptr on 32-bit */
2311 	if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2312 			bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2313 			(sizeof(uint64_t) != sizeof(uintptr_t) ||
2314 			bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2315 		RTE_BPF_LOG_LINE(ERR, "%s: unsupported argument type", __func__);
2316 		return -ENOTSUP;
2317 	}
2318 
2319 	memset(&bvf, 0, sizeof(bvf));
2320 	bvf.prm = &bpf->prm;
2321 	bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2322 	if (bvf.in == NULL)
2323 		return -ENOMEM;
2324 
2325 	rc = validate(&bvf);
2326 
2327 	if (rc == 0) {
2328 		rc = evst_pool_init(&bvf);
2329 		if (rc == 0)
2330 			rc = evaluate(&bvf);
2331 		evst_pool_fini(&bvf);
2332 	}
2333 
2334 	free(bvf.in);
2335 
2336 	/* copy collected info */
2337 	if (rc == 0) {
2338 		bpf->stack_sz = bvf.stack_sz;
2339 
2340 		/* for LD_ABS/LD_IND, we'll need extra space on the stack */
2341 		if (bvf.nb_ldmb_nodes != 0)
2342 			bpf->stack_sz = RTE_ALIGN_CEIL(bpf->stack_sz +
2343 				sizeof(uint64_t), sizeof(uint64_t));
2344 	}
2345 
2346 	return rc;
2347 }
2348