xref: /dpdk/lib/bpf/bpf_validate.c (revision cbb44143be74e7dd19853f4f360b51104e02380f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 
12 #include <rte_common.h>
13 #include <rte_eal.h>
14 #include <rte_byteorder.h>
15 
16 #include "bpf_impl.h"
17 
18 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
19 
20 struct bpf_reg_val {
21 	struct rte_bpf_arg v;
22 	uint64_t mask;
23 	struct {
24 		int64_t min;
25 		int64_t max;
26 	} s;
27 	struct {
28 		uint64_t min;
29 		uint64_t max;
30 	} u;
31 };
32 
33 struct bpf_eval_state {
34 	struct bpf_reg_val rv[EBPF_REG_NUM];
35 	struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
36 };
37 
38 /* possible instruction node colour */
39 enum {
40 	WHITE,
41 	GREY,
42 	BLACK,
43 	MAX_NODE_COLOUR
44 };
45 
46 /* possible edge types */
47 enum {
48 	UNKNOWN_EDGE,
49 	TREE_EDGE,
50 	BACK_EDGE,
51 	CROSS_EDGE,
52 	MAX_EDGE_TYPE
53 };
54 
55 #define	MAX_EDGES	2
56 
57 struct inst_node {
58 	uint8_t colour;
59 	uint8_t nb_edge:4;
60 	uint8_t cur_edge:4;
61 	uint8_t edge_type[MAX_EDGES];
62 	uint32_t edge_dest[MAX_EDGES];
63 	uint32_t prev_node;
64 	struct bpf_eval_state *evst;
65 };
66 
67 struct bpf_verifier {
68 	const struct rte_bpf_prm *prm;
69 	struct inst_node *in;
70 	uint64_t stack_sz;
71 	uint32_t nb_nodes;
72 	uint32_t nb_jcc_nodes;
73 	uint32_t nb_ldmb_nodes;
74 	uint32_t node_colour[MAX_NODE_COLOUR];
75 	uint32_t edge_type[MAX_EDGE_TYPE];
76 	struct bpf_eval_state *evst;
77 	struct inst_node *evin;
78 	struct {
79 		uint32_t num;
80 		uint32_t cur;
81 		struct bpf_eval_state *ent;
82 	} evst_pool;
83 };
84 
85 struct bpf_ins_check {
86 	struct {
87 		uint16_t dreg;
88 		uint16_t sreg;
89 	} mask;
90 	struct {
91 		uint16_t min;
92 		uint16_t max;
93 	} off;
94 	struct {
95 		uint32_t min;
96 		uint32_t max;
97 	} imm;
98 	const char * (*check)(const struct ebpf_insn *);
99 	const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
100 };
101 
102 #define	ALL_REGS	RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
103 #define	WRT_REGS	RTE_LEN2MASK(EBPF_REG_10, uint16_t)
104 #define	ZERO_REG	RTE_LEN2MASK(EBPF_REG_1, uint16_t)
105 
106 /* For LD_IND R6 is an implicit CTX register. */
107 #define	IND_SRC_REGS	(WRT_REGS ^ 1 << EBPF_REG_6)
108 
109 /*
110  * check and evaluate functions for particular instruction types.
111  */
112 
113 static const char *
114 check_alu_bele(const struct ebpf_insn *ins)
115 {
116 	if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
117 		return "invalid imm field";
118 	return NULL;
119 }
120 
121 static const char *
122 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
123 {
124 	RTE_SET_USED(ins);
125 	if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
126 		return "undefined return value";
127 	return NULL;
128 }
129 
130 /* setup max possible with this mask bounds */
131 static void
132 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
133 {
134 	rv->u.max = mask;
135 	rv->u.min = 0;
136 }
137 
138 static void
139 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
140 {
141 	rv->s.max = mask >> 1;
142 	rv->s.min = rv->s.max ^ UINT64_MAX;
143 }
144 
145 static void
146 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
147 {
148 	eval_umax_bound(rv, mask);
149 	eval_smax_bound(rv, mask);
150 }
151 
152 static void
153 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
154 {
155 	eval_max_bound(rv, mask);
156 	rv->v.type = RTE_BPF_ARG_RAW;
157 	rv->mask = mask;
158 }
159 
160 static void
161 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
162 {
163 	rv->mask = mask;
164 	rv->s.min = val;
165 	rv->s.max = val;
166 	rv->u.min = val;
167 	rv->u.max = val;
168 }
169 
170 static void
171 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
172 {
173 	uint64_t v;
174 
175 	v = (uint64_t)imm & mask;
176 
177 	rv->v.type = RTE_BPF_ARG_RAW;
178 	eval_fill_imm64(rv, mask, v);
179 }
180 
181 static const char *
182 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
183 {
184 	uint32_t i;
185 	uint64_t val;
186 	struct bpf_reg_val *rd;
187 
188 	val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
189 
190 	rd = bvf->evst->rv + ins->dst_reg;
191 	rd->v.type = RTE_BPF_ARG_RAW;
192 	eval_fill_imm64(rd, UINT64_MAX, val);
193 
194 	for (i = 0; i != bvf->prm->nb_xsym; i++) {
195 
196 		/* load of external variable */
197 		if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
198 				(uintptr_t)bvf->prm->xsym[i].var.val == val) {
199 			rd->v = bvf->prm->xsym[i].var.desc;
200 			eval_fill_imm64(rd, UINT64_MAX, 0);
201 			break;
202 		}
203 	}
204 
205 	return NULL;
206 }
207 
208 static void
209 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
210 {
211 	struct bpf_reg_val rt;
212 
213 	rt.u.min = rv->u.min & mask;
214 	rt.u.max = rv->u.max & mask;
215 	if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
216 		rv->u.max = RTE_MAX(rt.u.max, mask);
217 		rv->u.min = 0;
218 	}
219 
220 	eval_smax_bound(&rt, mask);
221 	rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
222 	rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
223 
224 	rv->mask = mask;
225 }
226 
227 static void
228 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
229 {
230 	struct bpf_reg_val rv;
231 
232 	rv.u.min = (rd->u.min + rs->u.min) & msk;
233 	rv.u.max = (rd->u.max + rs->u.max) & msk;
234 	rv.s.min = (rd->s.min + rs->s.min) & msk;
235 	rv.s.max = (rd->s.max + rs->s.max) & msk;
236 
237 	/*
238 	 * if at least one of the operands is not constant,
239 	 * then check for overflow
240 	 */
241 	if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
242 			(rv.u.min < rd->u.min || rv.u.max < rd->u.max))
243 		eval_umax_bound(&rv, msk);
244 
245 	if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
246 			(((rs->s.min < 0 && rv.s.min > rd->s.min) ||
247 			rv.s.min < rd->s.min) ||
248 			((rs->s.max < 0 && rv.s.max > rd->s.max) ||
249 				rv.s.max < rd->s.max)))
250 		eval_smax_bound(&rv, msk);
251 
252 	rd->s = rv.s;
253 	rd->u = rv.u;
254 }
255 
256 static void
257 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
258 {
259 	struct bpf_reg_val rv;
260 
261 	rv.u.min = (rd->u.min - rs->u.max) & msk;
262 	rv.u.max = (rd->u.max - rs->u.min) & msk;
263 	rv.s.min = (rd->s.min - rs->s.max) & msk;
264 	rv.s.max = (rd->s.max - rs->s.min) & msk;
265 
266 	/*
267 	 * if at least one of the operands is not constant,
268 	 * then check for overflow
269 	 */
270 	if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
271 			(rv.u.min > rd->u.min || rv.u.max > rd->u.max))
272 		eval_umax_bound(&rv, msk);
273 
274 	if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
275 			(((rs->s.min < 0 && rv.s.min < rd->s.min) ||
276 			rv.s.min > rd->s.min) ||
277 			((rs->s.max < 0 && rv.s.max < rd->s.max) ||
278 			rv.s.max > rd->s.max)))
279 		eval_smax_bound(&rv, msk);
280 
281 	rd->s = rv.s;
282 	rd->u = rv.u;
283 }
284 
285 static void
286 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
287 	uint64_t msk)
288 {
289 	/* check if shift value is less then max result bits */
290 	if (rs->u.max >= opsz) {
291 		eval_max_bound(rd, msk);
292 		return;
293 	}
294 
295 	/* check for overflow */
296 	if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
297 		eval_umax_bound(rd, msk);
298 	else {
299 		rd->u.max <<= rs->u.max;
300 		rd->u.min <<= rs->u.min;
301 	}
302 
303 	/* check that dreg values are and would remain always positive */
304 	if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
305 			RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
306 		eval_smax_bound(rd, msk);
307 	else {
308 		rd->s.max <<= rs->u.max;
309 		rd->s.min <<= rs->u.min;
310 	}
311 }
312 
313 static void
314 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
315 	uint64_t msk)
316 {
317 	/* check if shift value is less then max result bits */
318 	if (rs->u.max >= opsz) {
319 		eval_max_bound(rd, msk);
320 		return;
321 	}
322 
323 	rd->u.max >>= rs->u.min;
324 	rd->u.min >>= rs->u.max;
325 
326 	/* check that dreg values are always positive */
327 	if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
328 		eval_smax_bound(rd, msk);
329 	else {
330 		rd->s.max >>= rs->u.min;
331 		rd->s.min >>= rs->u.max;
332 	}
333 }
334 
335 static void
336 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
337 	uint64_t msk)
338 {
339 	uint32_t shv;
340 
341 	/* check if shift value is less then max result bits */
342 	if (rs->u.max >= opsz) {
343 		eval_max_bound(rd, msk);
344 		return;
345 	}
346 
347 	rd->u.max = (int64_t)rd->u.max >> rs->u.min;
348 	rd->u.min = (int64_t)rd->u.min >> rs->u.max;
349 
350 	/* if we have 32-bit values - extend them to 64-bit */
351 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
352 		rd->s.min <<= opsz;
353 		rd->s.max <<= opsz;
354 		shv = opsz;
355 	} else
356 		shv = 0;
357 
358 	if (rd->s.min < 0)
359 		rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
360 	else
361 		rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
362 
363 	if (rd->s.max < 0)
364 		rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
365 	else
366 		rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
367 }
368 
369 static uint64_t
370 eval_umax_bits(uint64_t v, size_t opsz)
371 {
372 	if (v == 0)
373 		return 0;
374 
375 	v = __builtin_clzll(v);
376 	return RTE_LEN2MASK(opsz - v, uint64_t);
377 }
378 
379 /* estimate max possible value for (v1 & v2) */
380 static uint64_t
381 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
382 {
383 	v1 = eval_umax_bits(v1, opsz);
384 	v2 = eval_umax_bits(v2, opsz);
385 	return (v1 & v2);
386 }
387 
388 /* estimate max possible value for (v1 | v2) */
389 static uint64_t
390 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
391 {
392 	v1 = eval_umax_bits(v1, opsz);
393 	v2 = eval_umax_bits(v2, opsz);
394 	return (v1 | v2);
395 }
396 
397 static void
398 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
399 	uint64_t msk)
400 {
401 	/* both operands are constants */
402 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
403 		rd->u.min &= rs->u.min;
404 		rd->u.max &= rs->u.max;
405 	} else {
406 		rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
407 		rd->u.min &= rs->u.min;
408 	}
409 
410 	/* both operands are constants */
411 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
412 		rd->s.min &= rs->s.min;
413 		rd->s.max &= rs->s.max;
414 	/* at least one of operand is non-negative */
415 	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
416 		rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
417 			rs->s.max & (msk >> 1), opsz);
418 		rd->s.min &= rs->s.min;
419 	} else
420 		eval_smax_bound(rd, msk);
421 }
422 
423 static void
424 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
425 	uint64_t msk)
426 {
427 	/* both operands are constants */
428 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
429 		rd->u.min |= rs->u.min;
430 		rd->u.max |= rs->u.max;
431 	} else {
432 		rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
433 		rd->u.min |= rs->u.min;
434 	}
435 
436 	/* both operands are constants */
437 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
438 		rd->s.min |= rs->s.min;
439 		rd->s.max |= rs->s.max;
440 
441 	/* both operands are non-negative */
442 	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
443 		rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
444 		rd->s.min |= rs->s.min;
445 	} else
446 		eval_smax_bound(rd, msk);
447 }
448 
449 static void
450 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
451 	uint64_t msk)
452 {
453 	/* both operands are constants */
454 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
455 		rd->u.min ^= rs->u.min;
456 		rd->u.max ^= rs->u.max;
457 	} else {
458 		rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
459 		rd->u.min = 0;
460 	}
461 
462 	/* both operands are constants */
463 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
464 		rd->s.min ^= rs->s.min;
465 		rd->s.max ^= rs->s.max;
466 
467 	/* both operands are non-negative */
468 	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
469 		rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
470 		rd->s.min = 0;
471 	} else
472 		eval_smax_bound(rd, msk);
473 }
474 
475 static void
476 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
477 	uint64_t msk)
478 {
479 	/* both operands are constants */
480 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
481 		rd->u.min = (rd->u.min * rs->u.min) & msk;
482 		rd->u.max = (rd->u.max * rs->u.max) & msk;
483 	/* check for overflow */
484 	} else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
485 		rd->u.max *= rs->u.max;
486 		rd->u.min *= rd->u.min;
487 	} else
488 		eval_umax_bound(rd, msk);
489 
490 	/* both operands are constants */
491 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
492 		rd->s.min = (rd->s.min * rs->s.min) & msk;
493 		rd->s.max = (rd->s.max * rs->s.max) & msk;
494 	/* check that both operands are positive and no overflow */
495 	} else if (rd->s.min >= 0 && rs->s.min >= 0) {
496 		rd->s.max *= rs->s.max;
497 		rd->s.min *= rd->s.min;
498 	} else
499 		eval_smax_bound(rd, msk);
500 }
501 
502 static const char *
503 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
504 	size_t opsz, uint64_t msk)
505 {
506 	/* both operands are constants */
507 	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
508 		if (rs->u.max == 0)
509 			return "division by 0";
510 		if (op == BPF_DIV) {
511 			rd->u.min /= rs->u.min;
512 			rd->u.max /= rs->u.max;
513 		} else {
514 			rd->u.min %= rs->u.min;
515 			rd->u.max %= rs->u.max;
516 		}
517 	} else {
518 		if (op == BPF_MOD)
519 			rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
520 		else
521 			rd->u.max = rd->u.max;
522 		rd->u.min = 0;
523 	}
524 
525 	/* if we have 32-bit values - extend them to 64-bit */
526 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
527 		rd->s.min = (int32_t)rd->s.min;
528 		rd->s.max = (int32_t)rd->s.max;
529 		rs->s.min = (int32_t)rs->s.min;
530 		rs->s.max = (int32_t)rs->s.max;
531 	}
532 
533 	/* both operands are constants */
534 	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
535 		if (rs->s.max == 0)
536 			return "division by 0";
537 		if (op == BPF_DIV) {
538 			rd->s.min /= rs->s.min;
539 			rd->s.max /= rs->s.max;
540 		} else {
541 			rd->s.min %= rs->s.min;
542 			rd->s.max %= rs->s.max;
543 		}
544 	} else if (op == BPF_MOD) {
545 		rd->s.min = RTE_MAX(rd->s.max, 0);
546 		rd->s.min = RTE_MIN(rd->s.min, 0);
547 	} else
548 		eval_smax_bound(rd, msk);
549 
550 	rd->s.max &= msk;
551 	rd->s.min &= msk;
552 
553 	return NULL;
554 }
555 
556 static void
557 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
558 {
559 	uint64_t ux, uy;
560 	int64_t sx, sy;
561 
562 	/* if we have 32-bit values - extend them to 64-bit */
563 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
564 		rd->u.min = (int32_t)rd->u.min;
565 		rd->u.max = (int32_t)rd->u.max;
566 	}
567 
568 	ux = -(int64_t)rd->u.min & msk;
569 	uy = -(int64_t)rd->u.max & msk;
570 
571 	rd->u.max = RTE_MAX(ux, uy);
572 	rd->u.min = RTE_MIN(ux, uy);
573 
574 	/* if we have 32-bit values - extend them to 64-bit */
575 	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
576 		rd->s.min = (int32_t)rd->s.min;
577 		rd->s.max = (int32_t)rd->s.max;
578 	}
579 
580 	sx = -rd->s.min & msk;
581 	sy = -rd->s.max & msk;
582 
583 	rd->s.max = RTE_MAX(sx, sy);
584 	rd->s.min = RTE_MIN(sx, sy);
585 }
586 
587 static const char *
588 eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
589 {
590 	uint32_t i, mode;
591 	struct bpf_reg_val *rv, ri, rs;
592 
593 	mode = BPF_MODE(ins->code);
594 
595 	/* R6 is an implicit input that must contain pointer to mbuf */
596 	if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
597 		return "invalid type for implicit ctx register";
598 
599 	if (mode == BPF_IND) {
600 		rs = bvf->evst->rv[ins->src_reg];
601 		if (rs.v.type != RTE_BPF_ARG_RAW)
602 			return "unexpected type for src register";
603 
604 		eval_fill_imm(&ri, UINT64_MAX, ins->imm);
605 		eval_add(&rs, &ri, UINT64_MAX);
606 
607 		if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
608 			return "mbuf boundary violation";
609 	}
610 
611 	/* R1-R5 scratch registers */
612 	for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
613 		bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
614 
615 	/* R0 is an implicit output, contains data fetched from the packet */
616 	rv = bvf->evst->rv + EBPF_REG_0;
617 	rv->v.size = bpf_size(BPF_SIZE(ins->code));
618 	eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
619 
620 	return NULL;
621 }
622 
623 /*
624  * check that destination and source operand are in defined state.
625  */
626 static const char *
627 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
628 {
629 	if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
630 		return "dest reg value is undefined";
631 	if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
632 		return "src reg value is undefined";
633 	return NULL;
634 }
635 
636 static const char *
637 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
638 {
639 	uint64_t msk;
640 	uint32_t op;
641 	size_t opsz;
642 	const char *err;
643 	struct bpf_eval_state *st;
644 	struct bpf_reg_val *rd, rs;
645 
646 	opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
647 		sizeof(uint32_t) : sizeof(uint64_t);
648 	opsz = opsz * CHAR_BIT;
649 	msk = RTE_LEN2MASK(opsz, uint64_t);
650 
651 	st = bvf->evst;
652 	rd = st->rv + ins->dst_reg;
653 
654 	if (BPF_SRC(ins->code) == BPF_X) {
655 		rs = st->rv[ins->src_reg];
656 		eval_apply_mask(&rs, msk);
657 	} else
658 		eval_fill_imm(&rs, msk, ins->imm);
659 
660 	eval_apply_mask(rd, msk);
661 
662 	op = BPF_OP(ins->code);
663 
664 	/* Allow self-xor as way to zero register */
665 	if (op == BPF_XOR && BPF_SRC(ins->code) == BPF_X &&
666 	    ins->src_reg == ins->dst_reg) {
667 		eval_fill_imm(&rs, UINT64_MAX, 0);
668 		eval_fill_imm(rd, UINT64_MAX, 0);
669 	}
670 
671 	err = eval_defined((op != EBPF_MOV) ? rd : NULL,
672 			   (op != BPF_NEG) ? &rs : NULL);
673 	if (err != NULL)
674 		return err;
675 
676 	if (op == BPF_ADD)
677 		eval_add(rd, &rs, msk);
678 	else if (op == BPF_SUB)
679 		eval_sub(rd, &rs, msk);
680 	else if (op == BPF_LSH)
681 		eval_lsh(rd, &rs, opsz, msk);
682 	else if (op == BPF_RSH)
683 		eval_rsh(rd, &rs, opsz, msk);
684 	else if (op == EBPF_ARSH)
685 		eval_arsh(rd, &rs, opsz, msk);
686 	else if (op == BPF_AND)
687 		eval_and(rd, &rs, opsz, msk);
688 	else if (op == BPF_OR)
689 		eval_or(rd, &rs, opsz, msk);
690 	else if (op == BPF_XOR)
691 		eval_xor(rd, &rs, opsz, msk);
692 	else if (op == BPF_MUL)
693 		eval_mul(rd, &rs, opsz, msk);
694 	else if (op == BPF_DIV || op == BPF_MOD)
695 		err = eval_divmod(op, rd, &rs, opsz, msk);
696 	else if (op == BPF_NEG)
697 		eval_neg(rd, opsz, msk);
698 	else if (op == EBPF_MOV)
699 		*rd = rs;
700 	else
701 		eval_max_bound(rd, msk);
702 
703 	return err;
704 }
705 
706 static const char *
707 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
708 {
709 	uint64_t msk;
710 	struct bpf_eval_state *st;
711 	struct bpf_reg_val *rd;
712 	const char *err;
713 
714 	msk = RTE_LEN2MASK(ins->imm, uint64_t);
715 
716 	st = bvf->evst;
717 	rd = st->rv + ins->dst_reg;
718 
719 	err = eval_defined(rd, NULL);
720 	if (err != NULL)
721 		return err;
722 
723 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
724 	if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
725 		eval_max_bound(rd, msk);
726 	else
727 		eval_apply_mask(rd, msk);
728 #else
729 	if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
730 		eval_max_bound(rd, msk);
731 	else
732 		eval_apply_mask(rd, msk);
733 #endif
734 
735 	return NULL;
736 }
737 
738 static const char *
739 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
740 	uint32_t align, int16_t off)
741 {
742 	struct bpf_reg_val rv;
743 
744 	/* calculate reg + offset */
745 	eval_fill_imm(&rv, rm->mask, off);
746 	eval_add(rm, &rv, rm->mask);
747 
748 	if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
749 		return "destination is not a pointer";
750 
751 	if (rm->mask != UINT64_MAX)
752 		return "pointer truncation";
753 
754 	if (rm->u.max + opsz > rm->v.size ||
755 			(uint64_t)rm->s.max + opsz > rm->v.size ||
756 			rm->s.min < 0)
757 		return "memory boundary violation";
758 
759 	if (rm->u.max % align !=  0)
760 		return "unaligned memory access";
761 
762 	if (rm->v.type == BPF_ARG_PTR_STACK) {
763 
764 		if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
765 				rm->u.max != (uint64_t)rm->s.max)
766 			return "stack access with variable offset";
767 
768 		bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
769 
770 	/* pointer to mbuf */
771 	} else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
772 
773 		if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
774 				rm->u.max != (uint64_t)rm->s.max)
775 			return "mbuf access with variable offset";
776 	}
777 
778 	return NULL;
779 }
780 
781 static void
782 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
783 {
784 	eval_umax_bound(rv, mask);
785 
786 	/* full 64-bit load */
787 	if (mask == UINT64_MAX)
788 		eval_smax_bound(rv, mask);
789 
790 	/* zero-extend load */
791 	rv->s.min = rv->u.min;
792 	rv->s.max = rv->u.max;
793 }
794 
795 
796 static const char *
797 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
798 {
799 	uint32_t opsz;
800 	uint64_t msk;
801 	const char *err;
802 	struct bpf_eval_state *st;
803 	struct bpf_reg_val *rd, rs;
804 	const struct bpf_reg_val *sv;
805 
806 	st = bvf->evst;
807 	rd = st->rv + ins->dst_reg;
808 	rs = st->rv[ins->src_reg];
809 	opsz = bpf_size(BPF_SIZE(ins->code));
810 	msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
811 
812 	err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
813 	if (err != NULL)
814 		return err;
815 
816 	if (rs.v.type == BPF_ARG_PTR_STACK) {
817 
818 		sv = st->sv + rs.u.max / sizeof(uint64_t);
819 		if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
820 			return "undefined value on the stack";
821 
822 		*rd = *sv;
823 
824 	/* pointer to mbuf */
825 	} else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
826 
827 		if (rs.u.max == offsetof(struct rte_mbuf, next)) {
828 			eval_fill_imm(rd, msk, 0);
829 			rd->v = rs.v;
830 		} else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
831 			eval_fill_imm(rd, msk, 0);
832 			rd->v.type = RTE_BPF_ARG_PTR;
833 			rd->v.size = rs.v.buf_size;
834 		} else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
835 			eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
836 			rd->v.type = RTE_BPF_ARG_RAW;
837 		} else {
838 			eval_max_load(rd, msk);
839 			rd->v.type = RTE_BPF_ARG_RAW;
840 		}
841 
842 	/* pointer to raw data */
843 	} else {
844 		eval_max_load(rd, msk);
845 		rd->v.type = RTE_BPF_ARG_RAW;
846 	}
847 
848 	return NULL;
849 }
850 
851 static const char *
852 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
853 {
854 	uint32_t i;
855 
856 	static const struct {
857 		size_t off;
858 		size_t sz;
859 	} mbuf_ro_fileds[] = {
860 		{ .off = offsetof(struct rte_mbuf, buf_addr), },
861 		{ .off = offsetof(struct rte_mbuf, refcnt), },
862 		{ .off = offsetof(struct rte_mbuf, nb_segs), },
863 		{ .off = offsetof(struct rte_mbuf, buf_len), },
864 		{ .off = offsetof(struct rte_mbuf, pool), },
865 		{ .off = offsetof(struct rte_mbuf, next), },
866 		{ .off = offsetof(struct rte_mbuf, priv_size), },
867 	};
868 
869 	for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
870 			(mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
871 			rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
872 			i++)
873 		;
874 
875 	if (i != RTE_DIM(mbuf_ro_fileds))
876 		return "store to the read-only mbuf field";
877 
878 	return NULL;
879 
880 }
881 
882 static const char *
883 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
884 {
885 	uint32_t opsz;
886 	uint64_t msk;
887 	const char *err;
888 	struct bpf_eval_state *st;
889 	struct bpf_reg_val rd, rs, *sv;
890 
891 	opsz = bpf_size(BPF_SIZE(ins->code));
892 	msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
893 
894 	st = bvf->evst;
895 	rd = st->rv[ins->dst_reg];
896 
897 	if (BPF_CLASS(ins->code) == BPF_STX) {
898 		rs = st->rv[ins->src_reg];
899 		eval_apply_mask(&rs, msk);
900 	} else
901 		eval_fill_imm(&rs, msk, ins->imm);
902 
903 	err = eval_defined(NULL, &rs);
904 	if (err != NULL)
905 		return err;
906 
907 	err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
908 	if (err != NULL)
909 		return err;
910 
911 	if (rd.v.type == BPF_ARG_PTR_STACK) {
912 
913 		sv = st->sv + rd.u.max / sizeof(uint64_t);
914 		if (BPF_CLASS(ins->code) == BPF_STX &&
915 				BPF_MODE(ins->code) == EBPF_XADD)
916 			eval_max_bound(sv, msk);
917 		else
918 			*sv = rs;
919 
920 	/* pointer to mbuf */
921 	} else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
922 		err = eval_mbuf_store(&rd, opsz);
923 		if (err != NULL)
924 			return err;
925 	}
926 
927 	return NULL;
928 }
929 
930 static const char *
931 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
932 	struct bpf_reg_val *rv)
933 {
934 	uint32_t i, n;
935 	struct bpf_eval_state *st;
936 	const char *err;
937 
938 	st = bvf->evst;
939 
940 	if (rv->v.type == RTE_BPF_ARG_UNDEF)
941 		return "Undefined argument type";
942 
943 	if (arg->type != rv->v.type &&
944 			arg->type != RTE_BPF_ARG_RAW &&
945 			(arg->type != RTE_BPF_ARG_PTR ||
946 			RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
947 		return "Invalid argument type";
948 
949 	err = NULL;
950 
951 	/* argument is a pointer */
952 	if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
953 
954 		err = eval_ptr(bvf, rv, arg->size, 1, 0);
955 
956 		/*
957 		 * pointer to the variable on the stack is passed
958 		 * as an argument, mark stack space it occupies as initialized.
959 		 */
960 		if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
961 
962 			i = rv->u.max / sizeof(uint64_t);
963 			n = i + arg->size / sizeof(uint64_t);
964 			while (i != n) {
965 				eval_fill_max_bound(st->sv + i, UINT64_MAX);
966 				i++;
967 			};
968 		}
969 	}
970 
971 	return err;
972 }
973 
974 static const char *
975 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
976 {
977 	uint32_t i, idx;
978 	struct bpf_reg_val *rv;
979 	const struct rte_bpf_xsym *xsym;
980 	const char *err;
981 
982 	idx = ins->imm;
983 
984 	if (idx >= bvf->prm->nb_xsym ||
985 			bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
986 		return "invalid external function index";
987 
988 	/* for now don't support function calls on 32 bit platform */
989 	if (sizeof(uint64_t) != sizeof(uintptr_t))
990 		return "function calls are supported only for 64 bit apps";
991 
992 	xsym = bvf->prm->xsym + idx;
993 
994 	/* evaluate function arguments */
995 	err = NULL;
996 	for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
997 		err = eval_func_arg(bvf, xsym->func.args + i,
998 			bvf->evst->rv + EBPF_REG_1 + i);
999 	}
1000 
1001 	/* R1-R5 argument/scratch registers */
1002 	for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
1003 		bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
1004 
1005 	/* update return value */
1006 
1007 	rv = bvf->evst->rv + EBPF_REG_0;
1008 	rv->v = xsym->func.ret;
1009 	if (rv->v.type == RTE_BPF_ARG_RAW)
1010 		eval_fill_max_bound(rv,
1011 			RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
1012 	else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
1013 		eval_fill_imm64(rv, UINTPTR_MAX, 0);
1014 
1015 	return err;
1016 }
1017 
1018 static void
1019 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
1020 {
1021 	/* sreg is constant */
1022 	if (trs->u.min == trs->u.max) {
1023 		trd->u = trs->u;
1024 	/* dreg is constant */
1025 	} else if (trd->u.min == trd->u.max) {
1026 		trs->u = trd->u;
1027 	} else {
1028 		trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
1029 		trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
1030 		trs->u = trd->u;
1031 	}
1032 
1033 	/* sreg is constant */
1034 	if (trs->s.min == trs->s.max) {
1035 		trd->s = trs->s;
1036 	/* dreg is constant */
1037 	} else if (trd->s.min == trd->s.max) {
1038 		trs->s = trd->s;
1039 	} else {
1040 		trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
1041 		trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1042 		trs->s = trd->s;
1043 	}
1044 }
1045 
1046 static void
1047 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1048 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1049 {
1050 	frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1051 	trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1052 }
1053 
1054 static void
1055 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1056 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1057 {
1058 	frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1059 	trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1060 }
1061 
1062 static void
1063 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1064 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1065 {
1066 	frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1067 	trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1068 }
1069 
1070 static void
1071 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1072 	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1073 {
1074 	frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1075 	trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1076 }
1077 
1078 static const char *
1079 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1080 {
1081 	uint32_t op;
1082 	const char *err;
1083 	struct bpf_eval_state *fst, *tst;
1084 	struct bpf_reg_val *frd, *frs, *trd, *trs;
1085 	struct bpf_reg_val rvf, rvt;
1086 
1087 	tst = bvf->evst;
1088 	fst = bvf->evin->evst;
1089 
1090 	frd = fst->rv + ins->dst_reg;
1091 	trd = tst->rv + ins->dst_reg;
1092 
1093 	if (BPF_SRC(ins->code) == BPF_X) {
1094 		frs = fst->rv + ins->src_reg;
1095 		trs = tst->rv + ins->src_reg;
1096 	} else {
1097 		frs = &rvf;
1098 		trs = &rvt;
1099 		eval_fill_imm(frs, UINT64_MAX, ins->imm);
1100 		eval_fill_imm(trs, UINT64_MAX, ins->imm);
1101 	}
1102 
1103 	err = eval_defined(trd, trs);
1104 	if (err != NULL)
1105 		return err;
1106 
1107 	op = BPF_OP(ins->code);
1108 
1109 	if (op == BPF_JEQ)
1110 		eval_jeq_jne(trd, trs);
1111 	else if (op == EBPF_JNE)
1112 		eval_jeq_jne(frd, frs);
1113 	else if (op == BPF_JGT)
1114 		eval_jgt_jle(trd, trs, frd, frs);
1115 	else if (op == EBPF_JLE)
1116 		eval_jgt_jle(frd, frs, trd, trs);
1117 	else if (op == EBPF_JLT)
1118 		eval_jlt_jge(trd, trs, frd, frs);
1119 	else if (op == BPF_JGE)
1120 		eval_jlt_jge(frd, frs, trd, trs);
1121 	else if (op == EBPF_JSGT)
1122 		eval_jsgt_jsle(trd, trs, frd, frs);
1123 	else if (op == EBPF_JSLE)
1124 		eval_jsgt_jsle(frd, frs, trd, trs);
1125 	else if (op == EBPF_JSLT)
1126 		eval_jslt_jsge(trd, trs, frd, frs);
1127 	else if (op == EBPF_JSGE)
1128 		eval_jslt_jsge(frd, frs, trd, trs);
1129 
1130 	return NULL;
1131 }
1132 
1133 /*
1134  * validate parameters for each instruction type.
1135  */
1136 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1137 	/* ALU IMM 32-bit instructions */
1138 	[(BPF_ALU | BPF_ADD | BPF_K)] = {
1139 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1140 		.off = { .min = 0, .max = 0},
1141 		.imm = { .min = 0, .max = UINT32_MAX,},
1142 		.eval = eval_alu,
1143 	},
1144 	[(BPF_ALU | BPF_SUB | BPF_K)] = {
1145 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1146 		.off = { .min = 0, .max = 0},
1147 		.imm = { .min = 0, .max = UINT32_MAX,},
1148 		.eval = eval_alu,
1149 	},
1150 	[(BPF_ALU | BPF_AND | BPF_K)] = {
1151 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1152 		.off = { .min = 0, .max = 0},
1153 		.imm = { .min = 0, .max = UINT32_MAX,},
1154 		.eval = eval_alu,
1155 	},
1156 	[(BPF_ALU | BPF_OR | BPF_K)] = {
1157 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1158 		.off = { .min = 0, .max = 0},
1159 		.imm = { .min = 0, .max = UINT32_MAX,},
1160 		.eval = eval_alu,
1161 	},
1162 	[(BPF_ALU | BPF_LSH | BPF_K)] = {
1163 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1164 		.off = { .min = 0, .max = 0},
1165 		.imm = { .min = 0, .max = UINT32_MAX,},
1166 		.eval = eval_alu,
1167 	},
1168 	[(BPF_ALU | BPF_RSH | BPF_K)] = {
1169 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1170 		.off = { .min = 0, .max = 0},
1171 		.imm = { .min = 0, .max = UINT32_MAX,},
1172 		.eval = eval_alu,
1173 	},
1174 	[(BPF_ALU | BPF_XOR | BPF_K)] = {
1175 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1176 		.off = { .min = 0, .max = 0},
1177 		.imm = { .min = 0, .max = UINT32_MAX,},
1178 		.eval = eval_alu,
1179 	},
1180 	[(BPF_ALU | BPF_MUL | BPF_K)] = {
1181 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1182 		.off = { .min = 0, .max = 0},
1183 		.imm = { .min = 0, .max = UINT32_MAX,},
1184 		.eval = eval_alu,
1185 	},
1186 	[(BPF_ALU | EBPF_MOV | BPF_K)] = {
1187 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1188 		.off = { .min = 0, .max = 0},
1189 		.imm = { .min = 0, .max = UINT32_MAX,},
1190 		.eval = eval_alu,
1191 	},
1192 	[(BPF_ALU | BPF_DIV | BPF_K)] = {
1193 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1194 		.off = { .min = 0, .max = 0},
1195 		.imm = { .min = 1, .max = UINT32_MAX},
1196 		.eval = eval_alu,
1197 	},
1198 	[(BPF_ALU | BPF_MOD | BPF_K)] = {
1199 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1200 		.off = { .min = 0, .max = 0},
1201 		.imm = { .min = 1, .max = UINT32_MAX},
1202 		.eval = eval_alu,
1203 	},
1204 	/* ALU IMM 64-bit instructions */
1205 	[(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1206 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1207 		.off = { .min = 0, .max = 0},
1208 		.imm = { .min = 0, .max = UINT32_MAX,},
1209 		.eval = eval_alu,
1210 	},
1211 	[(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1212 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1213 		.off = { .min = 0, .max = 0},
1214 		.imm = { .min = 0, .max = UINT32_MAX,},
1215 		.eval = eval_alu,
1216 	},
1217 	[(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1218 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1219 		.off = { .min = 0, .max = 0},
1220 		.imm = { .min = 0, .max = UINT32_MAX,},
1221 		.eval = eval_alu,
1222 	},
1223 	[(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1224 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1225 		.off = { .min = 0, .max = 0},
1226 		.imm = { .min = 0, .max = UINT32_MAX,},
1227 		.eval = eval_alu,
1228 	},
1229 	[(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1230 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1231 		.off = { .min = 0, .max = 0},
1232 		.imm = { .min = 0, .max = UINT32_MAX,},
1233 		.eval = eval_alu,
1234 	},
1235 	[(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1236 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1237 		.off = { .min = 0, .max = 0},
1238 		.imm = { .min = 0, .max = UINT32_MAX,},
1239 		.eval = eval_alu,
1240 	},
1241 	[(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1242 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1243 		.off = { .min = 0, .max = 0},
1244 		.imm = { .min = 0, .max = UINT32_MAX,},
1245 		.eval = eval_alu,
1246 	},
1247 	[(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1248 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1249 		.off = { .min = 0, .max = 0},
1250 		.imm = { .min = 0, .max = UINT32_MAX,},
1251 		.eval = eval_alu,
1252 	},
1253 	[(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1254 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1255 		.off = { .min = 0, .max = 0},
1256 		.imm = { .min = 0, .max = UINT32_MAX,},
1257 		.eval = eval_alu,
1258 	},
1259 	[(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1260 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1261 		.off = { .min = 0, .max = 0},
1262 		.imm = { .min = 0, .max = UINT32_MAX,},
1263 		.eval = eval_alu,
1264 	},
1265 	[(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1266 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1267 		.off = { .min = 0, .max = 0},
1268 		.imm = { .min = 1, .max = UINT32_MAX},
1269 		.eval = eval_alu,
1270 	},
1271 	[(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1272 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1273 		.off = { .min = 0, .max = 0},
1274 		.imm = { .min = 1, .max = UINT32_MAX},
1275 		.eval = eval_alu,
1276 	},
1277 	/* ALU REG 32-bit instructions */
1278 	[(BPF_ALU | BPF_ADD | BPF_X)] = {
1279 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1280 		.off = { .min = 0, .max = 0},
1281 		.imm = { .min = 0, .max = 0},
1282 		.eval = eval_alu,
1283 	},
1284 	[(BPF_ALU | BPF_SUB | BPF_X)] = {
1285 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1286 		.off = { .min = 0, .max = 0},
1287 		.imm = { .min = 0, .max = 0},
1288 		.eval = eval_alu,
1289 	},
1290 	[(BPF_ALU | BPF_AND | BPF_X)] = {
1291 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1292 		.off = { .min = 0, .max = 0},
1293 		.imm = { .min = 0, .max = 0},
1294 		.eval = eval_alu,
1295 	},
1296 	[(BPF_ALU | BPF_OR | BPF_X)] = {
1297 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1298 		.off = { .min = 0, .max = 0},
1299 		.imm = { .min = 0, .max = 0},
1300 		.eval = eval_alu,
1301 	},
1302 	[(BPF_ALU | BPF_LSH | BPF_X)] = {
1303 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1304 		.off = { .min = 0, .max = 0},
1305 		.imm = { .min = 0, .max = 0},
1306 		.eval = eval_alu,
1307 	},
1308 	[(BPF_ALU | BPF_RSH | BPF_X)] = {
1309 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1310 		.off = { .min = 0, .max = 0},
1311 		.imm = { .min = 0, .max = 0},
1312 		.eval = eval_alu,
1313 	},
1314 	[(BPF_ALU | BPF_XOR | BPF_X)] = {
1315 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1316 		.off = { .min = 0, .max = 0},
1317 		.imm = { .min = 0, .max = 0},
1318 		.eval = eval_alu,
1319 	},
1320 	[(BPF_ALU | BPF_MUL | BPF_X)] = {
1321 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1322 		.off = { .min = 0, .max = 0},
1323 		.imm = { .min = 0, .max = 0},
1324 		.eval = eval_alu,
1325 	},
1326 	[(BPF_ALU | BPF_DIV | BPF_X)] = {
1327 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1328 		.off = { .min = 0, .max = 0},
1329 		.imm = { .min = 0, .max = 0},
1330 		.eval = eval_alu,
1331 	},
1332 	[(BPF_ALU | BPF_MOD | BPF_X)] = {
1333 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1334 		.off = { .min = 0, .max = 0},
1335 		.imm = { .min = 0, .max = 0},
1336 		.eval = eval_alu,
1337 	},
1338 	[(BPF_ALU | EBPF_MOV | BPF_X)] = {
1339 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1340 		.off = { .min = 0, .max = 0},
1341 		.imm = { .min = 0, .max = 0},
1342 		.eval = eval_alu,
1343 	},
1344 	[(BPF_ALU | BPF_NEG)] = {
1345 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1346 		.off = { .min = 0, .max = 0},
1347 		.imm = { .min = 0, .max = 0},
1348 		.eval = eval_alu,
1349 	},
1350 	[(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1351 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1352 		.off = { .min = 0, .max = 0},
1353 		.imm = { .min = 16, .max = 64},
1354 		.check = check_alu_bele,
1355 		.eval = eval_bele,
1356 	},
1357 	[(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1358 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1359 		.off = { .min = 0, .max = 0},
1360 		.imm = { .min = 16, .max = 64},
1361 		.check = check_alu_bele,
1362 		.eval = eval_bele,
1363 	},
1364 	/* ALU REG 64-bit instructions */
1365 	[(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1366 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1367 		.off = { .min = 0, .max = 0},
1368 		.imm = { .min = 0, .max = 0},
1369 		.eval = eval_alu,
1370 	},
1371 	[(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1372 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1373 		.off = { .min = 0, .max = 0},
1374 		.imm = { .min = 0, .max = 0},
1375 		.eval = eval_alu,
1376 	},
1377 	[(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1378 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1379 		.off = { .min = 0, .max = 0},
1380 		.imm = { .min = 0, .max = 0},
1381 		.eval = eval_alu,
1382 	},
1383 	[(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1384 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1385 		.off = { .min = 0, .max = 0},
1386 		.imm = { .min = 0, .max = 0},
1387 		.eval = eval_alu,
1388 	},
1389 	[(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1390 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1391 		.off = { .min = 0, .max = 0},
1392 		.imm = { .min = 0, .max = 0},
1393 		.eval = eval_alu,
1394 	},
1395 	[(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1396 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1397 		.off = { .min = 0, .max = 0},
1398 		.imm = { .min = 0, .max = 0},
1399 		.eval = eval_alu,
1400 	},
1401 	[(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1402 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1403 		.off = { .min = 0, .max = 0},
1404 		.imm = { .min = 0, .max = 0},
1405 		.eval = eval_alu,
1406 	},
1407 	[(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1408 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1409 		.off = { .min = 0, .max = 0},
1410 		.imm = { .min = 0, .max = 0},
1411 		.eval = eval_alu,
1412 	},
1413 	[(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1414 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1415 		.off = { .min = 0, .max = 0},
1416 		.imm = { .min = 0, .max = 0},
1417 		.eval = eval_alu,
1418 	},
1419 	[(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1420 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1421 		.off = { .min = 0, .max = 0},
1422 		.imm = { .min = 0, .max = 0},
1423 		.eval = eval_alu,
1424 	},
1425 	[(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1426 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1427 		.off = { .min = 0, .max = 0},
1428 		.imm = { .min = 0, .max = 0},
1429 		.eval = eval_alu,
1430 	},
1431 	[(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1432 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1433 		.off = { .min = 0, .max = 0},
1434 		.imm = { .min = 0, .max = 0},
1435 		.eval = eval_alu,
1436 	},
1437 	[(EBPF_ALU64 | BPF_NEG)] = {
1438 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1439 		.off = { .min = 0, .max = 0},
1440 		.imm = { .min = 0, .max = 0},
1441 		.eval = eval_alu,
1442 	},
1443 	/* load instructions */
1444 	[(BPF_LDX | BPF_MEM | BPF_B)] = {
1445 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1446 		.off = { .min = 0, .max = UINT16_MAX},
1447 		.imm = { .min = 0, .max = 0},
1448 		.eval = eval_load,
1449 	},
1450 	[(BPF_LDX | BPF_MEM | BPF_H)] = {
1451 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1452 		.off = { .min = 0, .max = UINT16_MAX},
1453 		.imm = { .min = 0, .max = 0},
1454 		.eval = eval_load,
1455 	},
1456 	[(BPF_LDX | BPF_MEM | BPF_W)] = {
1457 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1458 		.off = { .min = 0, .max = UINT16_MAX},
1459 		.imm = { .min = 0, .max = 0},
1460 		.eval = eval_load,
1461 	},
1462 	[(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1463 		.mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1464 		.off = { .min = 0, .max = UINT16_MAX},
1465 		.imm = { .min = 0, .max = 0},
1466 		.eval = eval_load,
1467 	},
1468 	/* load 64 bit immediate value */
1469 	[(BPF_LD | BPF_IMM | EBPF_DW)] = {
1470 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1471 		.off = { .min = 0, .max = 0},
1472 		.imm = { .min = 0, .max = UINT32_MAX},
1473 		.eval = eval_ld_imm64,
1474 	},
1475 	/* load absolute instructions */
1476 	[(BPF_LD | BPF_ABS | BPF_B)] = {
1477 		.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1478 		.off = { .min = 0, .max = 0},
1479 		.imm = { .min = 0, .max = INT32_MAX},
1480 		.eval = eval_ld_mbuf,
1481 	},
1482 	[(BPF_LD | BPF_ABS | BPF_H)] = {
1483 		.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1484 		.off = { .min = 0, .max = 0},
1485 		.imm = { .min = 0, .max = INT32_MAX},
1486 		.eval = eval_ld_mbuf,
1487 	},
1488 	[(BPF_LD | BPF_ABS | BPF_W)] = {
1489 		.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1490 		.off = { .min = 0, .max = 0},
1491 		.imm = { .min = 0, .max = INT32_MAX},
1492 		.eval = eval_ld_mbuf,
1493 	},
1494 	/* load indirect instructions */
1495 	[(BPF_LD | BPF_IND | BPF_B)] = {
1496 		.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1497 		.off = { .min = 0, .max = 0},
1498 		.imm = { .min = 0, .max = UINT32_MAX},
1499 		.eval = eval_ld_mbuf,
1500 	},
1501 	[(BPF_LD | BPF_IND | BPF_H)] = {
1502 		.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1503 		.off = { .min = 0, .max = 0},
1504 		.imm = { .min = 0, .max = UINT32_MAX},
1505 		.eval = eval_ld_mbuf,
1506 	},
1507 	[(BPF_LD | BPF_IND | BPF_W)] = {
1508 		.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1509 		.off = { .min = 0, .max = 0},
1510 		.imm = { .min = 0, .max = UINT32_MAX},
1511 		.eval = eval_ld_mbuf,
1512 	},
1513 	/* store REG instructions */
1514 	[(BPF_STX | BPF_MEM | BPF_B)] = {
1515 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1516 		.off = { .min = 0, .max = UINT16_MAX},
1517 		.imm = { .min = 0, .max = 0},
1518 		.eval = eval_store,
1519 	},
1520 	[(BPF_STX | BPF_MEM | BPF_H)] = {
1521 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1522 		.off = { .min = 0, .max = UINT16_MAX},
1523 		.imm = { .min = 0, .max = 0},
1524 		.eval = eval_store,
1525 	},
1526 	[(BPF_STX | BPF_MEM | BPF_W)] = {
1527 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1528 		.off = { .min = 0, .max = UINT16_MAX},
1529 		.imm = { .min = 0, .max = 0},
1530 		.eval = eval_store,
1531 	},
1532 	[(BPF_STX | BPF_MEM | EBPF_DW)] = {
1533 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1534 		.off = { .min = 0, .max = UINT16_MAX},
1535 		.imm = { .min = 0, .max = 0},
1536 		.eval = eval_store,
1537 	},
1538 	/* atomic add instructions */
1539 	[(BPF_STX | EBPF_XADD | BPF_W)] = {
1540 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1541 		.off = { .min = 0, .max = UINT16_MAX},
1542 		.imm = { .min = 0, .max = 0},
1543 		.eval = eval_store,
1544 	},
1545 	[(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1546 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1547 		.off = { .min = 0, .max = UINT16_MAX},
1548 		.imm = { .min = 0, .max = 0},
1549 		.eval = eval_store,
1550 	},
1551 	/* store IMM instructions */
1552 	[(BPF_ST | BPF_MEM | BPF_B)] = {
1553 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1554 		.off = { .min = 0, .max = UINT16_MAX},
1555 		.imm = { .min = 0, .max = UINT32_MAX},
1556 		.eval = eval_store,
1557 	},
1558 	[(BPF_ST | BPF_MEM | BPF_H)] = {
1559 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1560 		.off = { .min = 0, .max = UINT16_MAX},
1561 		.imm = { .min = 0, .max = UINT32_MAX},
1562 		.eval = eval_store,
1563 	},
1564 	[(BPF_ST | BPF_MEM | BPF_W)] = {
1565 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1566 		.off = { .min = 0, .max = UINT16_MAX},
1567 		.imm = { .min = 0, .max = UINT32_MAX},
1568 		.eval = eval_store,
1569 	},
1570 	[(BPF_ST | BPF_MEM | EBPF_DW)] = {
1571 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1572 		.off = { .min = 0, .max = UINT16_MAX},
1573 		.imm = { .min = 0, .max = UINT32_MAX},
1574 		.eval = eval_store,
1575 	},
1576 	/* jump instruction */
1577 	[(BPF_JMP | BPF_JA)] = {
1578 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1579 		.off = { .min = 0, .max = UINT16_MAX},
1580 		.imm = { .min = 0, .max = 0},
1581 	},
1582 	/* jcc IMM instructions */
1583 	[(BPF_JMP | BPF_JEQ | BPF_K)] = {
1584 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1585 		.off = { .min = 0, .max = UINT16_MAX},
1586 		.imm = { .min = 0, .max = UINT32_MAX},
1587 		.eval = eval_jcc,
1588 	},
1589 	[(BPF_JMP | EBPF_JNE | BPF_K)] = {
1590 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1591 		.off = { .min = 0, .max = UINT16_MAX},
1592 		.imm = { .min = 0, .max = UINT32_MAX},
1593 		.eval = eval_jcc,
1594 	},
1595 	[(BPF_JMP | BPF_JGT | BPF_K)] = {
1596 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1597 		.off = { .min = 0, .max = UINT16_MAX},
1598 		.imm = { .min = 0, .max = UINT32_MAX},
1599 		.eval = eval_jcc,
1600 	},
1601 	[(BPF_JMP | EBPF_JLT | BPF_K)] = {
1602 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1603 		.off = { .min = 0, .max = UINT16_MAX},
1604 		.imm = { .min = 0, .max = UINT32_MAX},
1605 		.eval = eval_jcc,
1606 	},
1607 	[(BPF_JMP | BPF_JGE | BPF_K)] = {
1608 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1609 		.off = { .min = 0, .max = UINT16_MAX},
1610 		.imm = { .min = 0, .max = UINT32_MAX},
1611 		.eval = eval_jcc,
1612 	},
1613 	[(BPF_JMP | EBPF_JLE | BPF_K)] = {
1614 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1615 		.off = { .min = 0, .max = UINT16_MAX},
1616 		.imm = { .min = 0, .max = UINT32_MAX},
1617 		.eval = eval_jcc,
1618 	},
1619 	[(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1620 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1621 		.off = { .min = 0, .max = UINT16_MAX},
1622 		.imm = { .min = 0, .max = UINT32_MAX},
1623 		.eval = eval_jcc,
1624 	},
1625 	[(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1626 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1627 		.off = { .min = 0, .max = UINT16_MAX},
1628 		.imm = { .min = 0, .max = UINT32_MAX},
1629 		.eval = eval_jcc,
1630 	},
1631 	[(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1632 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1633 		.off = { .min = 0, .max = UINT16_MAX},
1634 		.imm = { .min = 0, .max = UINT32_MAX},
1635 		.eval = eval_jcc,
1636 	},
1637 	[(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1638 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1639 		.off = { .min = 0, .max = UINT16_MAX},
1640 		.imm = { .min = 0, .max = UINT32_MAX},
1641 		.eval = eval_jcc,
1642 	},
1643 	[(BPF_JMP | BPF_JSET | BPF_K)] = {
1644 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1645 		.off = { .min = 0, .max = UINT16_MAX},
1646 		.imm = { .min = 0, .max = UINT32_MAX},
1647 		.eval = eval_jcc,
1648 	},
1649 	/* jcc REG instructions */
1650 	[(BPF_JMP | BPF_JEQ | BPF_X)] = {
1651 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1652 		.off = { .min = 0, .max = UINT16_MAX},
1653 		.imm = { .min = 0, .max = 0},
1654 		.eval = eval_jcc,
1655 	},
1656 	[(BPF_JMP | EBPF_JNE | BPF_X)] = {
1657 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1658 		.off = { .min = 0, .max = UINT16_MAX},
1659 		.imm = { .min = 0, .max = 0},
1660 		.eval = eval_jcc,
1661 	},
1662 	[(BPF_JMP | BPF_JGT | BPF_X)] = {
1663 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1664 		.off = { .min = 0, .max = UINT16_MAX},
1665 		.imm = { .min = 0, .max = 0},
1666 		.eval = eval_jcc,
1667 	},
1668 	[(BPF_JMP | EBPF_JLT | BPF_X)] = {
1669 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1670 		.off = { .min = 0, .max = UINT16_MAX},
1671 		.imm = { .min = 0, .max = 0},
1672 		.eval = eval_jcc,
1673 	},
1674 	[(BPF_JMP | BPF_JGE | BPF_X)] = {
1675 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1676 		.off = { .min = 0, .max = UINT16_MAX},
1677 		.imm = { .min = 0, .max = 0},
1678 		.eval = eval_jcc,
1679 	},
1680 	[(BPF_JMP | EBPF_JLE | BPF_X)] = {
1681 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1682 		.off = { .min = 0, .max = UINT16_MAX},
1683 		.imm = { .min = 0, .max = 0},
1684 		.eval = eval_jcc,
1685 	},
1686 	[(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1687 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1688 		.off = { .min = 0, .max = UINT16_MAX},
1689 		.imm = { .min = 0, .max = 0},
1690 		.eval = eval_jcc,
1691 	},
1692 	[(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1693 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1694 		.off = { .min = 0, .max = UINT16_MAX},
1695 		.imm = { .min = 0, .max = 0},
1696 	},
1697 	[(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1698 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1699 		.off = { .min = 0, .max = UINT16_MAX},
1700 		.imm = { .min = 0, .max = 0},
1701 		.eval = eval_jcc,
1702 	},
1703 	[(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1704 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1705 		.off = { .min = 0, .max = UINT16_MAX},
1706 		.imm = { .min = 0, .max = 0},
1707 		.eval = eval_jcc,
1708 	},
1709 	[(BPF_JMP | BPF_JSET | BPF_X)] = {
1710 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1711 		.off = { .min = 0, .max = UINT16_MAX},
1712 		.imm = { .min = 0, .max = 0},
1713 		.eval = eval_jcc,
1714 	},
1715 	/* call instruction */
1716 	[(BPF_JMP | EBPF_CALL)] = {
1717 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1718 		.off = { .min = 0, .max = 0},
1719 		.imm = { .min = 0, .max = UINT32_MAX},
1720 		.eval = eval_call,
1721 	},
1722 	/* ret instruction */
1723 	[(BPF_JMP | EBPF_EXIT)] = {
1724 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1725 		.off = { .min = 0, .max = 0},
1726 		.imm = { .min = 0, .max = 0},
1727 		.eval = eval_exit,
1728 	},
1729 };
1730 
1731 /*
1732  * make sure that instruction syntax is valid,
1733  * and it fields don't violate partciular instrcution type restrictions.
1734  */
1735 static const char *
1736 check_syntax(const struct ebpf_insn *ins)
1737 {
1738 
1739 	uint8_t op;
1740 	uint16_t off;
1741 	uint32_t imm;
1742 
1743 	op = ins->code;
1744 
1745 	if (ins_chk[op].mask.dreg == 0)
1746 		return "invalid opcode";
1747 
1748 	if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1749 		return "invalid dst-reg field";
1750 
1751 	if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1752 		return "invalid src-reg field";
1753 
1754 	off = ins->off;
1755 	if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1756 		return "invalid off field";
1757 
1758 	imm = ins->imm;
1759 	if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1760 		return "invalid imm field";
1761 
1762 	if (ins_chk[op].check != NULL)
1763 		return ins_chk[op].check(ins);
1764 
1765 	return NULL;
1766 }
1767 
1768 /*
1769  * helper function, return instruction index for the given node.
1770  */
1771 static uint32_t
1772 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1773 {
1774 	return node - bvf->in;
1775 }
1776 
1777 /*
1778  * helper function, used to walk through constructed CFG.
1779  */
1780 static struct inst_node *
1781 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1782 {
1783 	uint32_t ce, ne, dst;
1784 
1785 	ne = node->nb_edge;
1786 	ce = node->cur_edge;
1787 	if (ce == ne)
1788 		return NULL;
1789 
1790 	node->cur_edge++;
1791 	dst = node->edge_dest[ce];
1792 	return bvf->in + dst;
1793 }
1794 
1795 static void
1796 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1797 	uint32_t new)
1798 {
1799 	uint32_t prev;
1800 
1801 	prev = node->colour;
1802 	node->colour = new;
1803 
1804 	bvf->node_colour[prev]--;
1805 	bvf->node_colour[new]++;
1806 }
1807 
1808 /*
1809  * helper function, add new edge between two nodes.
1810  */
1811 static int
1812 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1813 {
1814 	uint32_t ne;
1815 
1816 	if (nidx > bvf->prm->nb_ins) {
1817 		RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1818 			"next pc: %u\n",
1819 			__func__, get_node_idx(bvf, node), nidx);
1820 		return -EINVAL;
1821 	}
1822 
1823 	ne = node->nb_edge;
1824 	if (ne >= RTE_DIM(node->edge_dest)) {
1825 		RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1826 			__func__, get_node_idx(bvf, node));
1827 		return -EINVAL;
1828 	}
1829 
1830 	node->edge_dest[ne] = nidx;
1831 	node->nb_edge = ne + 1;
1832 	return 0;
1833 }
1834 
1835 /*
1836  * helper function, determine type of edge between two nodes.
1837  */
1838 static void
1839 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1840 	const struct inst_node *next)
1841 {
1842 	uint32_t ce, clr, type;
1843 
1844 	ce = node->cur_edge - 1;
1845 	clr = next->colour;
1846 
1847 	type = UNKNOWN_EDGE;
1848 
1849 	if (clr == WHITE)
1850 		type = TREE_EDGE;
1851 	else if (clr == GREY)
1852 		type = BACK_EDGE;
1853 	else if (clr == BLACK)
1854 		/*
1855 		 * in fact it could be either direct or cross edge,
1856 		 * but for now, we don't need to distinguish between them.
1857 		 */
1858 		type = CROSS_EDGE;
1859 
1860 	node->edge_type[ce] = type;
1861 	bvf->edge_type[type]++;
1862 }
1863 
1864 static struct inst_node *
1865 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1866 {
1867 	return  bvf->in + node->prev_node;
1868 }
1869 
1870 /*
1871  * Depth-First Search (DFS) through previously constructed
1872  * Control Flow Graph (CFG).
1873  * Information collected at this path would be used later
1874  * to determine is there any loops, and/or unreachable instructions.
1875  */
1876 static void
1877 dfs(struct bpf_verifier *bvf)
1878 {
1879 	struct inst_node *next, *node;
1880 
1881 	node = bvf->in;
1882 	while (node != NULL) {
1883 
1884 		if (node->colour == WHITE)
1885 			set_node_colour(bvf, node, GREY);
1886 
1887 		if (node->colour == GREY) {
1888 
1889 			/* find next unprocessed child node */
1890 			do {
1891 				next = get_next_node(bvf, node);
1892 				if (next == NULL)
1893 					break;
1894 				set_edge_type(bvf, node, next);
1895 			} while (next->colour != WHITE);
1896 
1897 			if (next != NULL) {
1898 				/* proceed with next child */
1899 				next->prev_node = get_node_idx(bvf, node);
1900 				node = next;
1901 			} else {
1902 				/*
1903 				 * finished with current node and all it's kids,
1904 				 * proceed with parent
1905 				 */
1906 				set_node_colour(bvf, node, BLACK);
1907 				node->cur_edge = 0;
1908 				node = get_prev_node(bvf, node);
1909 			}
1910 		} else
1911 			node = NULL;
1912 	}
1913 }
1914 
1915 /*
1916  * report unreachable instructions.
1917  */
1918 static void
1919 log_unreachable(const struct bpf_verifier *bvf)
1920 {
1921 	uint32_t i;
1922 	struct inst_node *node;
1923 	const struct ebpf_insn *ins;
1924 
1925 	for (i = 0; i != bvf->prm->nb_ins; i++) {
1926 
1927 		node = bvf->in + i;
1928 		ins = bvf->prm->ins + i;
1929 
1930 		if (node->colour == WHITE &&
1931 				ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1932 			RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1933 	}
1934 }
1935 
1936 /*
1937  * report loops detected.
1938  */
1939 static void
1940 log_loop(const struct bpf_verifier *bvf)
1941 {
1942 	uint32_t i, j;
1943 	struct inst_node *node;
1944 
1945 	for (i = 0; i != bvf->prm->nb_ins; i++) {
1946 
1947 		node = bvf->in + i;
1948 		if (node->colour != BLACK)
1949 			continue;
1950 
1951 		for (j = 0; j != node->nb_edge; j++) {
1952 			if (node->edge_type[j] == BACK_EDGE)
1953 				RTE_BPF_LOG(ERR,
1954 					"loop at pc:%u --> pc:%u;\n",
1955 					i, node->edge_dest[j]);
1956 		}
1957 	}
1958 }
1959 
1960 /*
1961  * First pass goes though all instructions in the set, checks that each
1962  * instruction is a valid one (correct syntax, valid field values, etc.)
1963  * and constructs control flow graph (CFG).
1964  * Then deapth-first search is performed over the constructed graph.
1965  * Programs with unreachable instructions and/or loops will be rejected.
1966  */
1967 static int
1968 validate(struct bpf_verifier *bvf)
1969 {
1970 	int32_t rc;
1971 	uint32_t i;
1972 	struct inst_node *node;
1973 	const struct ebpf_insn *ins;
1974 	const char *err;
1975 
1976 	rc = 0;
1977 	for (i = 0; i < bvf->prm->nb_ins; i++) {
1978 
1979 		ins = bvf->prm->ins + i;
1980 		node = bvf->in + i;
1981 
1982 		err = check_syntax(ins);
1983 		if (err != 0) {
1984 			RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1985 				__func__, err, i);
1986 			rc |= -EINVAL;
1987 		}
1988 
1989 		/*
1990 		 * construct CFG, jcc nodes have to outgoing edges,
1991 		 * 'exit' nodes - none, all others nodes have exaclty one
1992 		 * outgoing edge.
1993 		 */
1994 		switch (ins->code) {
1995 		case (BPF_JMP | EBPF_EXIT):
1996 			break;
1997 		case (BPF_JMP | BPF_JEQ | BPF_K):
1998 		case (BPF_JMP | EBPF_JNE | BPF_K):
1999 		case (BPF_JMP | BPF_JGT | BPF_K):
2000 		case (BPF_JMP | EBPF_JLT | BPF_K):
2001 		case (BPF_JMP | BPF_JGE | BPF_K):
2002 		case (BPF_JMP | EBPF_JLE | BPF_K):
2003 		case (BPF_JMP | EBPF_JSGT | BPF_K):
2004 		case (BPF_JMP | EBPF_JSLT | BPF_K):
2005 		case (BPF_JMP | EBPF_JSGE | BPF_K):
2006 		case (BPF_JMP | EBPF_JSLE | BPF_K):
2007 		case (BPF_JMP | BPF_JSET | BPF_K):
2008 		case (BPF_JMP | BPF_JEQ | BPF_X):
2009 		case (BPF_JMP | EBPF_JNE | BPF_X):
2010 		case (BPF_JMP | BPF_JGT | BPF_X):
2011 		case (BPF_JMP | EBPF_JLT | BPF_X):
2012 		case (BPF_JMP | BPF_JGE | BPF_X):
2013 		case (BPF_JMP | EBPF_JLE | BPF_X):
2014 		case (BPF_JMP | EBPF_JSGT | BPF_X):
2015 		case (BPF_JMP | EBPF_JSLT | BPF_X):
2016 		case (BPF_JMP | EBPF_JSGE | BPF_X):
2017 		case (BPF_JMP | EBPF_JSLE | BPF_X):
2018 		case (BPF_JMP | BPF_JSET | BPF_X):
2019 			rc |= add_edge(bvf, node, i + ins->off + 1);
2020 			rc |= add_edge(bvf, node, i + 1);
2021 			bvf->nb_jcc_nodes++;
2022 			break;
2023 		case (BPF_JMP | BPF_JA):
2024 			rc |= add_edge(bvf, node, i + ins->off + 1);
2025 			break;
2026 		/* load 64 bit immediate value */
2027 		case (BPF_LD | BPF_IMM | EBPF_DW):
2028 			rc |= add_edge(bvf, node, i + 2);
2029 			i++;
2030 			break;
2031 		case (BPF_LD | BPF_ABS | BPF_B):
2032 		case (BPF_LD | BPF_ABS | BPF_H):
2033 		case (BPF_LD | BPF_ABS | BPF_W):
2034 		case (BPF_LD | BPF_IND | BPF_B):
2035 		case (BPF_LD | BPF_IND | BPF_H):
2036 		case (BPF_LD | BPF_IND | BPF_W):
2037 			bvf->nb_ldmb_nodes++;
2038 			/* fallthrough */
2039 		default:
2040 			rc |= add_edge(bvf, node, i + 1);
2041 			break;
2042 		}
2043 
2044 		bvf->nb_nodes++;
2045 		bvf->node_colour[WHITE]++;
2046 	}
2047 
2048 	if (rc != 0)
2049 		return rc;
2050 
2051 	dfs(bvf);
2052 
2053 	RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
2054 		"nb_nodes=%u;\n"
2055 		"nb_jcc_nodes=%u;\n"
2056 		"node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
2057 		"edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
2058 		__func__, bvf,
2059 		bvf->nb_nodes,
2060 		bvf->nb_jcc_nodes,
2061 		bvf->node_colour[WHITE], bvf->node_colour[GREY],
2062 			bvf->node_colour[BLACK],
2063 		bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
2064 		bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
2065 
2066 	if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
2067 		RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
2068 			__func__, bvf);
2069 		log_unreachable(bvf);
2070 		return -EINVAL;
2071 	}
2072 
2073 	if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
2074 			bvf->edge_type[UNKNOWN_EDGE] != 0) {
2075 		RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
2076 			__func__, bvf);
2077 		return -EINVAL;
2078 	}
2079 
2080 	if (bvf->edge_type[BACK_EDGE] != 0) {
2081 		RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
2082 			__func__, bvf);
2083 		log_loop(bvf);
2084 		return -EINVAL;
2085 	}
2086 
2087 	return 0;
2088 }
2089 
2090 /*
2091  * helper functions get/free eval states.
2092  */
2093 static struct bpf_eval_state *
2094 pull_eval_state(struct bpf_verifier *bvf)
2095 {
2096 	uint32_t n;
2097 
2098 	n = bvf->evst_pool.cur;
2099 	if (n == bvf->evst_pool.num)
2100 		return NULL;
2101 
2102 	bvf->evst_pool.cur = n + 1;
2103 	return bvf->evst_pool.ent + n;
2104 }
2105 
2106 static void
2107 push_eval_state(struct bpf_verifier *bvf)
2108 {
2109 	bvf->evst_pool.cur--;
2110 }
2111 
2112 static void
2113 evst_pool_fini(struct bpf_verifier *bvf)
2114 {
2115 	bvf->evst = NULL;
2116 	free(bvf->evst_pool.ent);
2117 	memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2118 }
2119 
2120 static int
2121 evst_pool_init(struct bpf_verifier *bvf)
2122 {
2123 	uint32_t n;
2124 
2125 	n = bvf->nb_jcc_nodes + 1;
2126 
2127 	bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2128 	if (bvf->evst_pool.ent == NULL)
2129 		return -ENOMEM;
2130 
2131 	bvf->evst_pool.num = n;
2132 	bvf->evst_pool.cur = 0;
2133 
2134 	bvf->evst = pull_eval_state(bvf);
2135 	return 0;
2136 }
2137 
2138 /*
2139  * Save current eval state.
2140  */
2141 static int
2142 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2143 {
2144 	struct bpf_eval_state *st;
2145 
2146 	/* get new eval_state for this node */
2147 	st = pull_eval_state(bvf);
2148 	if (st == NULL) {
2149 		RTE_BPF_LOG(ERR,
2150 			"%s: internal error (out of space) at pc: %u\n",
2151 			__func__, get_node_idx(bvf, node));
2152 		return -ENOMEM;
2153 	}
2154 
2155 	/* make a copy of current state */
2156 	memcpy(st, bvf->evst, sizeof(*st));
2157 
2158 	/* swap current state with new one */
2159 	node->evst = bvf->evst;
2160 	bvf->evst = st;
2161 
2162 	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2163 		__func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2164 
2165 	return 0;
2166 }
2167 
2168 /*
2169  * Restore previous eval state and mark current eval state as free.
2170  */
2171 static void
2172 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2173 {
2174 	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2175 		__func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2176 
2177 	bvf->evst = node->evst;
2178 	node->evst = NULL;
2179 	push_eval_state(bvf);
2180 }
2181 
2182 static void
2183 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2184 	uint32_t pc, int32_t loglvl)
2185 {
2186 	const struct bpf_eval_state *st;
2187 	const struct bpf_reg_val *rv;
2188 
2189 	rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2190 
2191 	st = bvf->evst;
2192 	rv = st->rv + ins->dst_reg;
2193 
2194 	rte_log(loglvl, rte_bpf_logtype,
2195 		"r%u={\n"
2196 		"\tv={type=%u, size=%zu},\n"
2197 		"\tmask=0x%" PRIx64 ",\n"
2198 		"\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2199 		"\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2200 		"};\n",
2201 		ins->dst_reg,
2202 		rv->v.type, rv->v.size,
2203 		rv->mask,
2204 		rv->u.min, rv->u.max,
2205 		rv->s.min, rv->s.max);
2206 }
2207 
2208 /*
2209  * Do second pass through CFG and try to evaluate instructions
2210  * via each possible path.
2211  * Right now evaluation functionality is quite limited.
2212  * Still need to add extra checks for:
2213  * - use/return uninitialized registers.
2214  * - use uninitialized data from the stack.
2215  * - memory boundaries violation.
2216  */
2217 static int
2218 evaluate(struct bpf_verifier *bvf)
2219 {
2220 	int32_t rc;
2221 	uint32_t idx, op;
2222 	const char *err;
2223 	const struct ebpf_insn *ins;
2224 	struct inst_node *next, *node;
2225 
2226 	/* initial state of frame pointer */
2227 	static const struct bpf_reg_val rvfp = {
2228 		.v = {
2229 			.type = BPF_ARG_PTR_STACK,
2230 			.size = MAX_BPF_STACK_SIZE,
2231 		},
2232 		.mask = UINT64_MAX,
2233 		.u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2234 		.s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2235 	};
2236 
2237 	bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2238 	bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2239 	if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2240 		eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2241 
2242 	bvf->evst->rv[EBPF_REG_10] = rvfp;
2243 
2244 	ins = bvf->prm->ins;
2245 	node = bvf->in;
2246 	next = node;
2247 	rc = 0;
2248 
2249 	while (node != NULL && rc == 0) {
2250 
2251 		/*
2252 		 * current node evaluation, make sure we evaluate
2253 		 * each node only once.
2254 		 */
2255 		if (next != NULL) {
2256 
2257 			bvf->evin = node;
2258 			idx = get_node_idx(bvf, node);
2259 			op = ins[idx].code;
2260 
2261 			/* for jcc node make a copy of evaluatoion state */
2262 			if (node->nb_edge > 1)
2263 				rc |= save_eval_state(bvf, node);
2264 
2265 			if (ins_chk[op].eval != NULL && rc == 0) {
2266 				err = ins_chk[op].eval(bvf, ins + idx);
2267 				if (err != NULL) {
2268 					RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2269 						__func__, err, idx);
2270 					rc = -EINVAL;
2271 				}
2272 			}
2273 
2274 			log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2275 			bvf->evin = NULL;
2276 		}
2277 
2278 		/* proceed through CFG */
2279 		next = get_next_node(bvf, node);
2280 		if (next != NULL) {
2281 
2282 			/* proceed with next child */
2283 			if (node->cur_edge == node->nb_edge &&
2284 					node->evst != NULL)
2285 				restore_eval_state(bvf, node);
2286 
2287 			next->prev_node = get_node_idx(bvf, node);
2288 			node = next;
2289 		} else {
2290 			/*
2291 			 * finished with current node and all it's kids,
2292 			 * proceed with parent
2293 			 */
2294 			node->cur_edge = 0;
2295 			node = get_prev_node(bvf, node);
2296 
2297 			/* finished */
2298 			if (node == bvf->in)
2299 				node = NULL;
2300 		}
2301 	}
2302 
2303 	return rc;
2304 }
2305 
2306 int
2307 bpf_validate(struct rte_bpf *bpf)
2308 {
2309 	int32_t rc;
2310 	struct bpf_verifier bvf;
2311 
2312 	/* check input argument type, don't allow mbuf ptr on 32-bit */
2313 	if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2314 			bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2315 			(sizeof(uint64_t) != sizeof(uintptr_t) ||
2316 			bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2317 		RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2318 		return -ENOTSUP;
2319 	}
2320 
2321 	memset(&bvf, 0, sizeof(bvf));
2322 	bvf.prm = &bpf->prm;
2323 	bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2324 	if (bvf.in == NULL)
2325 		return -ENOMEM;
2326 
2327 	rc = validate(&bvf);
2328 
2329 	if (rc == 0) {
2330 		rc = evst_pool_init(&bvf);
2331 		if (rc == 0)
2332 			rc = evaluate(&bvf);
2333 		evst_pool_fini(&bvf);
2334 	}
2335 
2336 	free(bvf.in);
2337 
2338 	/* copy collected info */
2339 	if (rc == 0) {
2340 		bpf->stack_sz = bvf.stack_sz;
2341 
2342 		/* for LD_ABS/LD_IND, we'll need extra space on the stack */
2343 		if (bvf.nb_ldmb_nodes != 0)
2344 			bpf->stack_sz = RTE_ALIGN_CEIL(bpf->stack_sz +
2345 				sizeof(uint64_t), sizeof(uint64_t));
2346 	}
2347 
2348 	return rc;
2349 }
2350