xref: /netbsd-src/external/bsd/libpcap/dist/optimize.c (revision ccd9df534e375a4366c5b55f23782053c7a98d82)
1 /*	$NetBSD: optimize.c,v 1.12 2023/08/17 15:18:12 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that: (1) source code distributions
9  * retain the above copyright notice and this paragraph in its entirety, (2)
10  * distributions including binary code include the above copyright notice and
11  * this paragraph in its entirety in the documentation or other materials
12  * provided with the distribution, and (3) all advertising materials mentioning
13  * features or use of this software display the following acknowledgement:
14  * ``This product includes software developed by the University of California,
15  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
16  * the University nor the names of its contributors may be used to endorse
17  * or promote products derived from this software without specific prior
18  * written permission.
19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
20  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22  *
23  *  Optimization module for BPF code intermediate representation.
24  */
25 
26 #include <sys/cdefs.h>
27 __RCSID("$NetBSD: optimize.c,v 1.12 2023/08/17 15:18:12 christos Exp $");
28 
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32 
33 #include <pcap-types.h>
34 
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <memory.h>
38 #include <setjmp.h>
39 #include <string.h>
40 #include <limits.h> /* for SIZE_MAX */
41 #include <errno.h>
42 
43 #include "pcap-int.h"
44 
45 #include "gencode.h"
46 #include "optimize.h"
47 #include "diag-control.h"
48 
49 #ifdef HAVE_OS_PROTO_H
50 #include "os-proto.h"
51 #endif
52 
53 #ifdef BDEBUG
54 /*
55  * The internal "debug printout" flag for the filter expression optimizer.
56  * The code to print that stuff is present only if BDEBUG is defined, so
57  * the flag, and the routine to set it, are defined only if BDEBUG is
58  * defined.
59  */
60 static int pcap_optimizer_debug;
61 
62 /*
63  * Routine to set that flag.
64  *
65  * This is intended for libpcap developers, not for general use.
66  * If you want to set these in a program, you'll have to declare this
67  * routine yourself, with the appropriate DLL import attribute on Windows;
68  * it's not declared in any header file, and won't be declared in any
69  * header file provided by libpcap.
70  */
71 PCAP_API void pcap_set_optimizer_debug(int value);
72 
73 PCAP_API_DEF void
74 pcap_set_optimizer_debug(int value)
75 {
76 	pcap_optimizer_debug = value;
77 }
78 
79 /*
80  * The internal "print dot graph" flag for the filter expression optimizer.
81  * The code to print that stuff is present only if BDEBUG is defined, so
82  * the flag, and the routine to set it, are defined only if BDEBUG is
83  * defined.
84  */
85 static int pcap_print_dot_graph;
86 
87 /*
88  * Routine to set that flag.
89  *
90  * This is intended for libpcap developers, not for general use.
91  * If you want to set these in a program, you'll have to declare this
92  * routine yourself, with the appropriate DLL import attribute on Windows;
93  * it's not declared in any header file, and won't be declared in any
94  * header file provided by libpcap.
95  */
96 PCAP_API void pcap_set_print_dot_graph(int value);
97 
98 PCAP_API_DEF void
99 pcap_set_print_dot_graph(int value)
100 {
101 	pcap_print_dot_graph = value;
102 }
103 
104 #endif
105 
106 /*
107  * lowest_set_bit().
108  *
109  * Takes a 32-bit integer as an argument.
110  *
111  * If handed a non-zero value, returns the index of the lowest set bit,
112  * counting upwards from zero.
113  *
114  * If handed zero, the results are platform- and compiler-dependent.
115  * Keep it out of the light, don't give it any water, don't feed it
116  * after midnight, and don't pass zero to it.
117  *
118  * This is the same as the count of trailing zeroes in the word.
119  */
120 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
121   /*
122    * GCC 3.4 and later; we have __builtin_ctz().
123    */
124   #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
125 #elif defined(_MSC_VER)
126   /*
127    * Visual Studio; we support only 2005 and later, so use
128    * _BitScanForward().
129    */
130 #include <intrin.h>
131 
132 #ifndef __clang__
133 #pragma intrinsic(_BitScanForward)
134 #endif
135 
136 static __forceinline u_int
137 lowest_set_bit(int mask)
138 {
139 	unsigned long bit;
140 
141 	/*
142 	 * Don't sign-extend mask if long is longer than int.
143 	 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
144 	 */
145 	if (_BitScanForward(&bit, (unsigned int)mask) == 0)
146 		abort();	/* mask is zero */
147 	return (u_int)bit;
148 }
149 #elif defined(MSDOS) && defined(__DJGPP__)
150   /*
151    * MS-DOS with DJGPP, which declares ffs() in <string.h>, which
152    * we've already included.
153    */
154   #define lowest_set_bit(mask)	((u_int)(ffs((mask)) - 1))
155 #elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
156   /*
157    * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
158    * or some other platform (UN*X conforming to a sufficient recent version
159    * of the Single UNIX Specification).
160    */
161   #include <strings.h>
162   #define lowest_set_bit(mask)	(u_int)((ffs((mask)) - 1))
163 #else
164 /*
165  * None of the above.
166  * Use a perfect-hash-function-based function.
167  */
168 static u_int
169 lowest_set_bit(int mask)
170 {
171 	unsigned int v = (unsigned int)mask;
172 
173 	static const u_int MultiplyDeBruijnBitPosition[32] = {
174 		0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
175 		31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
176 	};
177 
178 	/*
179 	 * We strip off all but the lowermost set bit (v & ~v),
180 	 * and perform a minimal perfect hash on it to look up the
181 	 * number of low-order zero bits in a table.
182 	 *
183 	 * See:
184 	 *
185 	 *	http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
186 	 *
187 	 *	http://supertech.csail.mit.edu/papers/debruijn.pdf
188 	 */
189 	return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
190 }
191 #endif
192 
193 /*
194  * Represents a deleted instruction.
195  */
196 #define NOP -1
197 
198 /*
199  * Register numbers for use-def values.
200  * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
201  * location.  A_ATOM is the accumulator and X_ATOM is the index
202  * register.
203  */
204 #define A_ATOM BPF_MEMWORDS
205 #define X_ATOM (BPF_MEMWORDS+1)
206 
207 /*
208  * This define is used to represent *both* the accumulator and
209  * x register in use-def computations.
210  * Currently, the use-def code assumes only one definition per instruction.
211  */
212 #define AX_ATOM N_ATOMS
213 
214 /*
215  * These data structures are used in a Cocke and Shwarz style
216  * value numbering scheme.  Since the flowgraph is acyclic,
217  * exit values can be propagated from a node's predecessors
218  * provided it is uniquely defined.
219  */
220 struct valnode {
221 	int code;
222 	bpf_u_int32 v0, v1;
223 	int val;		/* the value number */
224 	struct valnode *next;
225 };
226 
227 /* Integer constants mapped with the load immediate opcode. */
228 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
229 
230 struct vmapinfo {
231 	int is_const;
232 	bpf_u_int32 const_val;
233 };
234 
235 typedef struct {
236 	/*
237 	 * Place to longjmp to on an error.
238 	 */
239 	jmp_buf top_ctx;
240 
241 	/*
242 	 * The buffer into which to put error message.
243 	 */
244 	char *errbuf;
245 
246 	/*
247 	 * A flag to indicate that further optimization is needed.
248 	 * Iterative passes are continued until a given pass yields no
249 	 * code simplification or branch movement.
250 	 */
251 	int done;
252 
253 	/*
254 	 * XXX - detect loops that do nothing but repeated AND/OR pullups
255 	 * and edge moves.
256 	 * If 100 passes in a row do nothing but that, treat that as a
257 	 * sign that we're in a loop that just shuffles in a cycle in
258 	 * which each pass just shuffles the code and we eventually
259 	 * get back to the original configuration.
260 	 *
261 	 * XXX - we need a non-heuristic way of detecting, or preventing,
262 	 * such a cycle.
263 	 */
264 	int non_branch_movement_performed;
265 
266 	u_int n_blocks;		/* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
267 	struct block **blocks;
268 	u_int n_edges;		/* twice n_blocks, so guaranteed to be > 0 */
269 	struct edge **edges;
270 
271 	/*
272 	 * A bit vector set representation of the dominators.
273 	 * We round up the set size to the next power of two.
274 	 */
275 	u_int nodewords;	/* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
276 	u_int edgewords;	/* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
277 	struct block **levels;
278 	bpf_u_int32 *space;
279 
280 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
281 /*
282  * True if a is in uset {p}
283  */
284 #define SET_MEMBER(p, a) \
285 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
286 
287 /*
288  * Add 'a' to uset p.
289  */
290 #define SET_INSERT(p, a) \
291 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
292 
293 /*
294  * Delete 'a' from uset p.
295  */
296 #define SET_DELETE(p, a) \
297 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
298 
299 /*
300  * a := a intersect b
301  * n must be guaranteed to be > 0
302  */
303 #define SET_INTERSECT(a, b, n)\
304 {\
305 	register bpf_u_int32 *_x = a, *_y = b;\
306 	register u_int _n = n;\
307 	do *_x++ &= *_y++; while (--_n != 0);\
308 }
309 
310 /*
311  * a := a - b
312  * n must be guaranteed to be > 0
313  */
314 #define SET_SUBTRACT(a, b, n)\
315 {\
316 	register bpf_u_int32 *_x = a, *_y = b;\
317 	register u_int _n = n;\
318 	do *_x++ &=~ *_y++; while (--_n != 0);\
319 }
320 
321 /*
322  * a := a union b
323  * n must be guaranteed to be > 0
324  */
325 #define SET_UNION(a, b, n)\
326 {\
327 	register bpf_u_int32 *_x = a, *_y = b;\
328 	register u_int _n = n;\
329 	do *_x++ |= *_y++; while (--_n != 0);\
330 }
331 
332 	uset all_dom_sets;
333 	uset all_closure_sets;
334 	uset all_edge_sets;
335 
336 #define MODULUS 213
337 	struct valnode *hashtbl[MODULUS];
338 	bpf_u_int32 curval;
339 	bpf_u_int32 maxval;
340 
341 	struct vmapinfo *vmap;
342 	struct valnode *vnode_base;
343 	struct valnode *next_vnode;
344 } opt_state_t;
345 
346 typedef struct {
347 	/*
348 	 * Place to longjmp to on an error.
349 	 */
350 	jmp_buf top_ctx;
351 
352 	/*
353 	 * The buffer into which to put error message.
354 	 */
355 	char *errbuf;
356 
357 	/*
358 	 * Some pointers used to convert the basic block form of the code,
359 	 * into the array form that BPF requires.  'fstart' will point to
360 	 * the malloc'd array while 'ftail' is used during the recursive
361 	 * traversal.
362 	 */
363 	struct bpf_insn *fstart;
364 	struct bpf_insn *ftail;
365 } conv_state_t;
366 
367 static void opt_init(opt_state_t *, struct icode *);
368 static void opt_cleanup(opt_state_t *);
369 static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
370     PCAP_PRINTFLIKE(2, 3);
371 
372 static void intern_blocks(opt_state_t *, struct icode *);
373 
374 static void find_inedges(opt_state_t *, struct block *);
375 #ifdef BDEBUG
376 static void opt_dump(opt_state_t *, struct icode *);
377 #endif
378 
379 #ifndef MAX
380 #define MAX(a,b) ((a)>(b)?(a):(b))
381 #endif
382 
383 static void
384 find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
385 {
386 	int level;
387 
388 	if (isMarked(ic, b))
389 		return;
390 
391 	Mark(ic, b);
392 	b->link = 0;
393 
394 	if (JT(b)) {
395 		find_levels_r(opt_state, ic, JT(b));
396 		find_levels_r(opt_state, ic, JF(b));
397 		level = MAX(JT(b)->level, JF(b)->level) + 1;
398 	} else
399 		level = 0;
400 	b->level = level;
401 	b->link = opt_state->levels[level];
402 	opt_state->levels[level] = b;
403 }
404 
405 /*
406  * Level graph.  The levels go from 0 at the leaves to
407  * N_LEVELS at the root.  The opt_state->levels[] array points to the
408  * first node of the level list, whose elements are linked
409  * with the 'link' field of the struct block.
410  */
411 static void
412 find_levels(opt_state_t *opt_state, struct icode *ic)
413 {
414 	memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
415 	unMarkAll(ic);
416 	find_levels_r(opt_state, ic, ic->root);
417 }
418 
419 /*
420  * Find dominator relationships.
421  * Assumes graph has been leveled.
422  */
423 static void
424 find_dom(opt_state_t *opt_state, struct block *root)
425 {
426 	u_int i;
427 	int level;
428 	struct block *b;
429 	bpf_u_int32 *x;
430 
431 	/*
432 	 * Initialize sets to contain all nodes.
433 	 */
434 	x = opt_state->all_dom_sets;
435 	/*
436 	 * In opt_init(), we've made sure the product doesn't overflow.
437 	 */
438 	i = opt_state->n_blocks * opt_state->nodewords;
439 	while (i != 0) {
440 		--i;
441 		*x++ = 0xFFFFFFFFU;
442 	}
443 	/* Root starts off empty. */
444 	for (i = opt_state->nodewords; i != 0;) {
445 		--i;
446 		root->dom[i] = 0;
447 	}
448 
449 	/* root->level is the highest level no found. */
450 	for (level = root->level; level >= 0; --level) {
451 		for (b = opt_state->levels[level]; b; b = b->link) {
452 			SET_INSERT(b->dom, b->id);
453 			if (JT(b) == 0)
454 				continue;
455 			SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
456 			SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
457 		}
458 	}
459 }
460 
461 static void
462 propedom(opt_state_t *opt_state, struct edge *ep)
463 {
464 	SET_INSERT(ep->edom, ep->id);
465 	if (ep->succ) {
466 		SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
467 		SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
468 	}
469 }
470 
471 /*
472  * Compute edge dominators.
473  * Assumes graph has been leveled and predecessors established.
474  */
475 static void
476 find_edom(opt_state_t *opt_state, struct block *root)
477 {
478 	u_int i;
479 	uset x;
480 	int level;
481 	struct block *b;
482 
483 	x = opt_state->all_edge_sets;
484 	/*
485 	 * In opt_init(), we've made sure the product doesn't overflow.
486 	 */
487 	for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
488 		--i;
489 		x[i] = 0xFFFFFFFFU;
490 	}
491 
492 	/* root->level is the highest level no found. */
493 	memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
494 	memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
495 	for (level = root->level; level >= 0; --level) {
496 		for (b = opt_state->levels[level]; b != 0; b = b->link) {
497 			propedom(opt_state, &b->et);
498 			propedom(opt_state, &b->ef);
499 		}
500 	}
501 }
502 
503 /*
504  * Find the backwards transitive closure of the flow graph.  These sets
505  * are backwards in the sense that we find the set of nodes that reach
506  * a given node, not the set of nodes that can be reached by a node.
507  *
508  * Assumes graph has been leveled.
509  */
510 static void
511 find_closure(opt_state_t *opt_state, struct block *root)
512 {
513 	int level;
514 	struct block *b;
515 
516 	/*
517 	 * Initialize sets to contain no nodes.
518 	 */
519 	memset((char *)opt_state->all_closure_sets, 0,
520 	      opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
521 
522 	/* root->level is the highest level no found. */
523 	for (level = root->level; level >= 0; --level) {
524 		for (b = opt_state->levels[level]; b; b = b->link) {
525 			SET_INSERT(b->closure, b->id);
526 			if (JT(b) == 0)
527 				continue;
528 			SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
529 			SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
530 		}
531 	}
532 }
533 
534 /*
535  * Return the register number that is used by s.
536  *
537  * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
538  * are used, the scratch memory location's number if a scratch memory
539  * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
540  *
541  * The implementation should probably change to an array access.
542  */
543 static int
544 atomuse(struct stmt *s)
545 {
546 	register int c = s->code;
547 
548 	if (c == NOP)
549 		return -1;
550 
551 	switch (BPF_CLASS(c)) {
552 
553 	case BPF_RET:
554 		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
555 			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
556 
557 	case BPF_LD:
558 	case BPF_LDX:
559 		/*
560 		 * As there are fewer than 2^31 memory locations,
561 		 * s->k should be convertible to int without problems.
562 		 */
563 		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
564 			(BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
565 
566 	case BPF_ST:
567 		return A_ATOM;
568 
569 	case BPF_STX:
570 		return X_ATOM;
571 
572 	case BPF_JMP:
573 	case BPF_ALU:
574 		if (BPF_SRC(c) == BPF_X)
575 			return AX_ATOM;
576 		return A_ATOM;
577 
578 	case BPF_MISC:
579 		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
580 	}
581 	abort();
582 	/* NOTREACHED */
583 }
584 
585 /*
586  * Return the register number that is defined by 's'.  We assume that
587  * a single stmt cannot define more than one register.  If no register
588  * is defined, return -1.
589  *
590  * The implementation should probably change to an array access.
591  */
592 static int
593 atomdef(struct stmt *s)
594 {
595 	if (s->code == NOP)
596 		return -1;
597 
598 	switch (BPF_CLASS(s->code)) {
599 
600 	case BPF_LD:
601 	case BPF_ALU:
602 		return A_ATOM;
603 
604 	case BPF_LDX:
605 		return X_ATOM;
606 
607 	case BPF_ST:
608 	case BPF_STX:
609 		return s->k;
610 
611 	case BPF_MISC:
612 		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
613 	}
614 	return -1;
615 }
616 
617 /*
618  * Compute the sets of registers used, defined, and killed by 'b'.
619  *
620  * "Used" means that a statement in 'b' uses the register before any
621  * statement in 'b' defines it, i.e. it uses the value left in
622  * that register by a predecessor block of this block.
623  * "Defined" means that a statement in 'b' defines it.
624  * "Killed" means that a statement in 'b' defines it before any
625  * statement in 'b' uses it, i.e. it kills the value left in that
626  * register by a predecessor block of this block.
627  */
628 static void
629 compute_local_ud(struct block *b)
630 {
631 	struct slist *s;
632 	atomset def = 0, use = 0, killed = 0;
633 	int atom;
634 
635 	for (s = b->stmts; s; s = s->next) {
636 		if (s->s.code == NOP)
637 			continue;
638 		atom = atomuse(&s->s);
639 		if (atom >= 0) {
640 			if (atom == AX_ATOM) {
641 				if (!ATOMELEM(def, X_ATOM))
642 					use |= ATOMMASK(X_ATOM);
643 				if (!ATOMELEM(def, A_ATOM))
644 					use |= ATOMMASK(A_ATOM);
645 			}
646 			else if (atom < N_ATOMS) {
647 				if (!ATOMELEM(def, atom))
648 					use |= ATOMMASK(atom);
649 			}
650 			else
651 				abort();
652 		}
653 		atom = atomdef(&s->s);
654 		if (atom >= 0) {
655 			if (!ATOMELEM(use, atom))
656 				killed |= ATOMMASK(atom);
657 			def |= ATOMMASK(atom);
658 		}
659 	}
660 	if (BPF_CLASS(b->s.code) == BPF_JMP) {
661 		/*
662 		 * XXX - what about RET?
663 		 */
664 		atom = atomuse(&b->s);
665 		if (atom >= 0) {
666 			if (atom == AX_ATOM) {
667 				if (!ATOMELEM(def, X_ATOM))
668 					use |= ATOMMASK(X_ATOM);
669 				if (!ATOMELEM(def, A_ATOM))
670 					use |= ATOMMASK(A_ATOM);
671 			}
672 			else if (atom < N_ATOMS) {
673 				if (!ATOMELEM(def, atom))
674 					use |= ATOMMASK(atom);
675 			}
676 			else
677 				abort();
678 		}
679 	}
680 
681 	b->def = def;
682 	b->kill = killed;
683 	b->in_use = use;
684 }
685 
686 /*
687  * Assume graph is already leveled.
688  */
689 static void
690 find_ud(opt_state_t *opt_state, struct block *root)
691 {
692 	int i, maxlevel;
693 	struct block *p;
694 
695 	/*
696 	 * root->level is the highest level no found;
697 	 * count down from there.
698 	 */
699 	maxlevel = root->level;
700 	for (i = maxlevel; i >= 0; --i)
701 		for (p = opt_state->levels[i]; p; p = p->link) {
702 			compute_local_ud(p);
703 			p->out_use = 0;
704 		}
705 
706 	for (i = 1; i <= maxlevel; ++i) {
707 		for (p = opt_state->levels[i]; p; p = p->link) {
708 			p->out_use |= JT(p)->in_use | JF(p)->in_use;
709 			p->in_use |= p->out_use &~ p->kill;
710 		}
711 	}
712 }
713 static void
714 init_val(opt_state_t *opt_state)
715 {
716 	opt_state->curval = 0;
717 	opt_state->next_vnode = opt_state->vnode_base;
718 	memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
719 	memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
720 }
721 
722 /*
723  * Because we really don't have an IR, this stuff is a little messy.
724  *
725  * This routine looks in the table of existing value number for a value
726  * with generated from an operation with the specified opcode and
727  * the specified values.  If it finds it, it returns its value number,
728  * otherwise it makes a new entry in the table and returns the
729  * value number of that entry.
730  */
731 static bpf_u_int32
732 F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
733 {
734 	u_int hash;
735 	bpf_u_int32 val;
736 	struct valnode *p;
737 
738 	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
739 	hash %= MODULUS;
740 
741 	for (p = opt_state->hashtbl[hash]; p; p = p->next)
742 		if (p->code == code && p->v0 == v0 && p->v1 == v1)
743 			return p->val;
744 
745 	/*
746 	 * Not found.  Allocate a new value, and assign it a new
747 	 * value number.
748 	 *
749 	 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
750 	 * increment it before using it as the new value number, which
751 	 * means we never assign VAL_UNKNOWN.
752 	 *
753 	 * XXX - unless we overflow, but we probably won't have 2^32-1
754 	 * values; we treat 32 bits as effectively infinite.
755 	 */
756 	val = ++opt_state->curval;
757 	if (BPF_MODE(code) == BPF_IMM &&
758 	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
759 		opt_state->vmap[val].const_val = v0;
760 		opt_state->vmap[val].is_const = 1;
761 	}
762 	p = opt_state->next_vnode++;
763 	p->val = val;
764 	p->code = code;
765 	p->v0 = v0;
766 	p->v1 = v1;
767 	p->next = opt_state->hashtbl[hash];
768 	opt_state->hashtbl[hash] = p;
769 
770 	return val;
771 }
772 
773 static inline void
774 vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
775 {
776 	if (alter && newval != VAL_UNKNOWN && *valp == newval)
777 		s->code = NOP;
778 	else
779 		*valp = newval;
780 }
781 
782 /*
783  * Do constant-folding on binary operators.
784  * (Unary operators are handled elsewhere.)
785  */
786 static void
787 fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
788 {
789 	bpf_u_int32 a, b;
790 
791 	a = opt_state->vmap[v0].const_val;
792 	b = opt_state->vmap[v1].const_val;
793 
794 	switch (BPF_OP(s->code)) {
795 	case BPF_ADD:
796 		a += b;
797 		break;
798 
799 	case BPF_SUB:
800 		a -= b;
801 		break;
802 
803 	case BPF_MUL:
804 		a *= b;
805 		break;
806 
807 	case BPF_DIV:
808 		if (b == 0)
809 			opt_error(opt_state, "division by zero");
810 		a /= b;
811 		break;
812 
813 	case BPF_MOD:
814 		if (b == 0)
815 			opt_error(opt_state, "modulus by zero");
816 		a %= b;
817 		break;
818 
819 	case BPF_AND:
820 		a &= b;
821 		break;
822 
823 	case BPF_OR:
824 		a |= b;
825 		break;
826 
827 	case BPF_XOR:
828 		a ^= b;
829 		break;
830 
831 	case BPF_LSH:
832 		/*
833 		 * A left shift of more than the width of the type
834 		 * is undefined in C; we'll just treat it as shifting
835 		 * all the bits out.
836 		 *
837 		 * XXX - the BPF interpreter doesn't check for this,
838 		 * so its behavior is dependent on the behavior of
839 		 * the processor on which it's running.  There are
840 		 * processors on which it shifts all the bits out
841 		 * and processors on which it does no shift.
842 		 */
843 		if (b < 32)
844 			a <<= b;
845 		else
846 			a = 0;
847 		break;
848 
849 	case BPF_RSH:
850 		/*
851 		 * A right shift of more than the width of the type
852 		 * is undefined in C; we'll just treat it as shifting
853 		 * all the bits out.
854 		 *
855 		 * XXX - the BPF interpreter doesn't check for this,
856 		 * so its behavior is dependent on the behavior of
857 		 * the processor on which it's running.  There are
858 		 * processors on which it shifts all the bits out
859 		 * and processors on which it does no shift.
860 		 */
861 		if (b < 32)
862 			a >>= b;
863 		else
864 			a = 0;
865 		break;
866 
867 	default:
868 		abort();
869 	}
870 	s->k = a;
871 	s->code = BPF_LD|BPF_IMM;
872 	/*
873 	 * XXX - optimizer loop detection.
874 	 */
875 	opt_state->non_branch_movement_performed = 1;
876 	opt_state->done = 0;
877 }
878 
879 static inline struct slist *
880 this_op(struct slist *s)
881 {
882 	while (s != 0 && s->s.code == NOP)
883 		s = s->next;
884 	return s;
885 }
886 
887 static void
888 opt_not(struct block *b)
889 {
890 	struct block *tmp = JT(b);
891 
892 	JT(b) = JF(b);
893 	JF(b) = tmp;
894 }
895 
896 static void
897 opt_peep(opt_state_t *opt_state, struct block *b)
898 {
899 	struct slist *s;
900 	struct slist *next, *last;
901 	bpf_u_int32 val;
902 
903 	s = b->stmts;
904 	if (s == 0)
905 		return;
906 
907 	last = s;
908 	for (/*empty*/; /*empty*/; s = next) {
909 		/*
910 		 * Skip over nops.
911 		 */
912 		s = this_op(s);
913 		if (s == 0)
914 			break;	/* nothing left in the block */
915 
916 		/*
917 		 * Find the next real instruction after that one
918 		 * (skipping nops).
919 		 */
920 		next = this_op(s->next);
921 		if (next == 0)
922 			break;	/* no next instruction */
923 		last = next;
924 
925 		/*
926 		 * st  M[k]	-->	st  M[k]
927 		 * ldx M[k]		tax
928 		 */
929 		if (s->s.code == BPF_ST &&
930 		    next->s.code == (BPF_LDX|BPF_MEM) &&
931 		    s->s.k == next->s.k) {
932 			/*
933 			 * XXX - optimizer loop detection.
934 			 */
935 			opt_state->non_branch_movement_performed = 1;
936 			opt_state->done = 0;
937 			next->s.code = BPF_MISC|BPF_TAX;
938 		}
939 		/*
940 		 * ld  #k	-->	ldx  #k
941 		 * tax			txa
942 		 */
943 		if (s->s.code == (BPF_LD|BPF_IMM) &&
944 		    next->s.code == (BPF_MISC|BPF_TAX)) {
945 			s->s.code = BPF_LDX|BPF_IMM;
946 			next->s.code = BPF_MISC|BPF_TXA;
947 			/*
948 			 * XXX - optimizer loop detection.
949 			 */
950 			opt_state->non_branch_movement_performed = 1;
951 			opt_state->done = 0;
952 		}
953 		/*
954 		 * This is an ugly special case, but it happens
955 		 * when you say tcp[k] or udp[k] where k is a constant.
956 		 */
957 		if (s->s.code == (BPF_LD|BPF_IMM)) {
958 			struct slist *add, *tax, *ild;
959 
960 			/*
961 			 * Check that X isn't used on exit from this
962 			 * block (which the optimizer might cause).
963 			 * We know the code generator won't generate
964 			 * any local dependencies.
965 			 */
966 			if (ATOMELEM(b->out_use, X_ATOM))
967 				continue;
968 
969 			/*
970 			 * Check that the instruction following the ldi
971 			 * is an addx, or it's an ldxms with an addx
972 			 * following it (with 0 or more nops between the
973 			 * ldxms and addx).
974 			 */
975 			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
976 				add = next;
977 			else
978 				add = this_op(next->next);
979 			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
980 				continue;
981 
982 			/*
983 			 * Check that a tax follows that (with 0 or more
984 			 * nops between them).
985 			 */
986 			tax = this_op(add->next);
987 			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
988 				continue;
989 
990 			/*
991 			 * Check that an ild follows that (with 0 or more
992 			 * nops between them).
993 			 */
994 			ild = this_op(tax->next);
995 			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
996 			    BPF_MODE(ild->s.code) != BPF_IND)
997 				continue;
998 			/*
999 			 * We want to turn this sequence:
1000 			 *
1001 			 * (004) ldi     #0x2		{s}
1002 			 * (005) ldxms   [14]		{next}  -- optional
1003 			 * (006) addx			{add}
1004 			 * (007) tax			{tax}
1005 			 * (008) ild     [x+0]		{ild}
1006 			 *
1007 			 * into this sequence:
1008 			 *
1009 			 * (004) nop
1010 			 * (005) ldxms   [14]
1011 			 * (006) nop
1012 			 * (007) nop
1013 			 * (008) ild     [x+2]
1014 			 *
1015 			 * XXX We need to check that X is not
1016 			 * subsequently used, because we want to change
1017 			 * what'll be in it after this sequence.
1018 			 *
1019 			 * We know we can eliminate the accumulator
1020 			 * modifications earlier in the sequence since
1021 			 * it is defined by the last stmt of this sequence
1022 			 * (i.e., the last statement of the sequence loads
1023 			 * a value into the accumulator, so we can eliminate
1024 			 * earlier operations on the accumulator).
1025 			 */
1026 			ild->s.k += s->s.k;
1027 			s->s.code = NOP;
1028 			add->s.code = NOP;
1029 			tax->s.code = NOP;
1030 			/*
1031 			 * XXX - optimizer loop detection.
1032 			 */
1033 			opt_state->non_branch_movement_performed = 1;
1034 			opt_state->done = 0;
1035 		}
1036 	}
1037 	/*
1038 	 * If the comparison at the end of a block is an equality
1039 	 * comparison against a constant, and nobody uses the value
1040 	 * we leave in the A register at the end of a block, and
1041 	 * the operation preceding the comparison is an arithmetic
1042 	 * operation, we can sometime optimize it away.
1043 	 */
1044 	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1045 	    !ATOMELEM(b->out_use, A_ATOM)) {
1046 		/*
1047 		 * We can optimize away certain subtractions of the
1048 		 * X register.
1049 		 */
1050 		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1051 			val = b->val[X_ATOM];
1052 			if (opt_state->vmap[val].is_const) {
1053 				/*
1054 				 * If we have a subtract to do a comparison,
1055 				 * and the X register is a known constant,
1056 				 * we can merge this value into the
1057 				 * comparison:
1058 				 *
1059 				 * sub x  ->	nop
1060 				 * jeq #y	jeq #(x+y)
1061 				 */
1062 				b->s.k += opt_state->vmap[val].const_val;
1063 				last->s.code = NOP;
1064 				/*
1065 				 * XXX - optimizer loop detection.
1066 				 */
1067 				opt_state->non_branch_movement_performed = 1;
1068 				opt_state->done = 0;
1069 			} else if (b->s.k == 0) {
1070 				/*
1071 				 * If the X register isn't a constant,
1072 				 * and the comparison in the test is
1073 				 * against 0, we can compare with the
1074 				 * X register, instead:
1075 				 *
1076 				 * sub x  ->	nop
1077 				 * jeq #0	jeq x
1078 				 */
1079 				last->s.code = NOP;
1080 				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1081 				/*
1082 				 * XXX - optimizer loop detection.
1083 				 */
1084 				opt_state->non_branch_movement_performed = 1;
1085 				opt_state->done = 0;
1086 			}
1087 		}
1088 		/*
1089 		 * Likewise, a constant subtract can be simplified:
1090 		 *
1091 		 * sub #x ->	nop
1092 		 * jeq #y ->	jeq #(x+y)
1093 		 */
1094 		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1095 			last->s.code = NOP;
1096 			b->s.k += last->s.k;
1097 			/*
1098 			 * XXX - optimizer loop detection.
1099 			 */
1100 			opt_state->non_branch_movement_performed = 1;
1101 			opt_state->done = 0;
1102 		}
1103 		/*
1104 		 * And, similarly, a constant AND can be simplified
1105 		 * if we're testing against 0, i.e.:
1106 		 *
1107 		 * and #k	nop
1108 		 * jeq #0  ->	jset #k
1109 		 */
1110 		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1111 		    b->s.k == 0) {
1112 			b->s.k = last->s.k;
1113 			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1114 			last->s.code = NOP;
1115 			/*
1116 			 * XXX - optimizer loop detection.
1117 			 */
1118 			opt_state->non_branch_movement_performed = 1;
1119 			opt_state->done = 0;
1120 			opt_not(b);
1121 		}
1122 	}
1123 	/*
1124 	 * jset #0        ->   never
1125 	 * jset #ffffffff ->   always
1126 	 */
1127 	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1128 		if (b->s.k == 0)
1129 			JT(b) = JF(b);
1130 		if (b->s.k == 0xffffffffU)
1131 			JF(b) = JT(b);
1132 	}
1133 	/*
1134 	 * If we're comparing against the index register, and the index
1135 	 * register is a known constant, we can just compare against that
1136 	 * constant.
1137 	 */
1138 	val = b->val[X_ATOM];
1139 	if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1140 		bpf_u_int32 v = opt_state->vmap[val].const_val;
1141 		b->s.code &= ~BPF_X;
1142 		b->s.k = v;
1143 	}
1144 	/*
1145 	 * If the accumulator is a known constant, we can compute the
1146 	 * comparison result.
1147 	 */
1148 	val = b->val[A_ATOM];
1149 	if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1150 		bpf_u_int32 v = opt_state->vmap[val].const_val;
1151 		switch (BPF_OP(b->s.code)) {
1152 
1153 		case BPF_JEQ:
1154 			v = v == b->s.k;
1155 			break;
1156 
1157 		case BPF_JGT:
1158 			v = v > b->s.k;
1159 			break;
1160 
1161 		case BPF_JGE:
1162 			v = v >= b->s.k;
1163 			break;
1164 
1165 		case BPF_JSET:
1166 			v &= b->s.k;
1167 			break;
1168 
1169 		default:
1170 			abort();
1171 		}
1172 		if (JF(b) != JT(b)) {
1173 			/*
1174 			 * XXX - optimizer loop detection.
1175 			 */
1176 			opt_state->non_branch_movement_performed = 1;
1177 			opt_state->done = 0;
1178 		}
1179 		if (v)
1180 			JF(b) = JT(b);
1181 		else
1182 			JT(b) = JF(b);
1183 	}
1184 }
1185 
1186 /*
1187  * Compute the symbolic value of expression of 's', and update
1188  * anything it defines in the value table 'val'.  If 'alter' is true,
1189  * do various optimizations.  This code would be cleaner if symbolic
1190  * evaluation and code transformations weren't folded together.
1191  */
1192 static void
1193 opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1194 {
1195 	int op;
1196 	bpf_u_int32 v;
1197 
1198 	switch (s->code) {
1199 
1200 	case BPF_LD|BPF_ABS|BPF_W:
1201 	case BPF_LD|BPF_ABS|BPF_H:
1202 	case BPF_LD|BPF_ABS|BPF_B:
1203 		v = F(opt_state, s->code, s->k, 0L);
1204 		vstore(s, &val[A_ATOM], v, alter);
1205 		break;
1206 
1207 	case BPF_LD|BPF_IND|BPF_W:
1208 	case BPF_LD|BPF_IND|BPF_H:
1209 	case BPF_LD|BPF_IND|BPF_B:
1210 		v = val[X_ATOM];
1211 		if (alter && opt_state->vmap[v].is_const) {
1212 			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1213 			s->k += opt_state->vmap[v].const_val;
1214 			v = F(opt_state, s->code, s->k, 0L);
1215 			/*
1216 			 * XXX - optimizer loop detection.
1217 			 */
1218 			opt_state->non_branch_movement_performed = 1;
1219 			opt_state->done = 0;
1220 		}
1221 		else
1222 			v = F(opt_state, s->code, s->k, v);
1223 		vstore(s, &val[A_ATOM], v, alter);
1224 		break;
1225 
1226 	case BPF_LD|BPF_LEN:
1227 		v = F(opt_state, s->code, 0L, 0L);
1228 		vstore(s, &val[A_ATOM], v, alter);
1229 		break;
1230 
1231 	case BPF_LD|BPF_IMM:
1232 		v = K(s->k);
1233 		vstore(s, &val[A_ATOM], v, alter);
1234 		break;
1235 
1236 	case BPF_LDX|BPF_IMM:
1237 		v = K(s->k);
1238 		vstore(s, &val[X_ATOM], v, alter);
1239 		break;
1240 
1241 	case BPF_LDX|BPF_MSH|BPF_B:
1242 		v = F(opt_state, s->code, s->k, 0L);
1243 		vstore(s, &val[X_ATOM], v, alter);
1244 		break;
1245 
1246 	case BPF_ALU|BPF_NEG:
1247 		if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1248 			s->code = BPF_LD|BPF_IMM;
1249 			/*
1250 			 * Do this negation as unsigned arithmetic; that's
1251 			 * what modern BPF engines do, and it guarantees
1252 			 * that all possible values can be negated.  (Yeah,
1253 			 * negating 0x80000000, the minimum signed 32-bit
1254 			 * two's-complement value, results in 0x80000000,
1255 			 * so it's still negative, but we *should* be doing
1256 			 * all unsigned arithmetic here, to match what
1257 			 * modern BPF engines do.)
1258 			 *
1259 			 * Express it as 0U - (unsigned value) so that we
1260 			 * don't get compiler warnings about negating an
1261 			 * unsigned value and don't get UBSan warnings
1262 			 * about the result of negating 0x80000000 being
1263 			 * undefined.
1264 			 */
1265 			s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1266 			val[A_ATOM] = K(s->k);
1267 		}
1268 		else
1269 			val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1270 		break;
1271 
1272 	case BPF_ALU|BPF_ADD|BPF_K:
1273 	case BPF_ALU|BPF_SUB|BPF_K:
1274 	case BPF_ALU|BPF_MUL|BPF_K:
1275 	case BPF_ALU|BPF_DIV|BPF_K:
1276 	case BPF_ALU|BPF_MOD|BPF_K:
1277 	case BPF_ALU|BPF_AND|BPF_K:
1278 	case BPF_ALU|BPF_OR|BPF_K:
1279 	case BPF_ALU|BPF_XOR|BPF_K:
1280 	case BPF_ALU|BPF_LSH|BPF_K:
1281 	case BPF_ALU|BPF_RSH|BPF_K:
1282 		op = BPF_OP(s->code);
1283 		if (alter) {
1284 			if (s->k == 0) {
1285 				/*
1286 				 * Optimize operations where the constant
1287 				 * is zero.
1288 				 *
1289 				 * Don't optimize away "sub #0"
1290 				 * as it may be needed later to
1291 				 * fixup the generated math code.
1292 				 *
1293 				 * Fail if we're dividing by zero or taking
1294 				 * a modulus by zero.
1295 				 */
1296 				if (op == BPF_ADD ||
1297 				    op == BPF_LSH || op == BPF_RSH ||
1298 				    op == BPF_OR || op == BPF_XOR) {
1299 					s->code = NOP;
1300 					break;
1301 				}
1302 				if (op == BPF_MUL || op == BPF_AND) {
1303 					s->code = BPF_LD|BPF_IMM;
1304 					val[A_ATOM] = K(s->k);
1305 					break;
1306 				}
1307 				if (op == BPF_DIV)
1308 					opt_error(opt_state,
1309 					    "division by zero");
1310 				if (op == BPF_MOD)
1311 					opt_error(opt_state,
1312 					    "modulus by zero");
1313 			}
1314 			if (opt_state->vmap[val[A_ATOM]].is_const) {
1315 				fold_op(opt_state, s, val[A_ATOM], K(s->k));
1316 				val[A_ATOM] = K(s->k);
1317 				break;
1318 			}
1319 		}
1320 		val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1321 		break;
1322 
1323 	case BPF_ALU|BPF_ADD|BPF_X:
1324 	case BPF_ALU|BPF_SUB|BPF_X:
1325 	case BPF_ALU|BPF_MUL|BPF_X:
1326 	case BPF_ALU|BPF_DIV|BPF_X:
1327 	case BPF_ALU|BPF_MOD|BPF_X:
1328 	case BPF_ALU|BPF_AND|BPF_X:
1329 	case BPF_ALU|BPF_OR|BPF_X:
1330 	case BPF_ALU|BPF_XOR|BPF_X:
1331 	case BPF_ALU|BPF_LSH|BPF_X:
1332 	case BPF_ALU|BPF_RSH|BPF_X:
1333 		op = BPF_OP(s->code);
1334 		if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1335 			if (opt_state->vmap[val[A_ATOM]].is_const) {
1336 				fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1337 				val[A_ATOM] = K(s->k);
1338 			}
1339 			else {
1340 				s->code = BPF_ALU|BPF_K|op;
1341 				s->k = opt_state->vmap[val[X_ATOM]].const_val;
1342 				if ((op == BPF_LSH || op == BPF_RSH) &&
1343 				    s->k > 31)
1344 					opt_error(opt_state,
1345 					    "shift by more than 31 bits");
1346 				/*
1347 				 * XXX - optimizer loop detection.
1348 				 */
1349 				opt_state->non_branch_movement_performed = 1;
1350 				opt_state->done = 0;
1351 				val[A_ATOM] =
1352 					F(opt_state, s->code, val[A_ATOM], K(s->k));
1353 			}
1354 			break;
1355 		}
1356 		/*
1357 		 * Check if we're doing something to an accumulator
1358 		 * that is 0, and simplify.  This may not seem like
1359 		 * much of a simplification but it could open up further
1360 		 * optimizations.
1361 		 * XXX We could also check for mul by 1, etc.
1362 		 */
1363 		if (alter && opt_state->vmap[val[A_ATOM]].is_const
1364 		    && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1365 			if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1366 				s->code = BPF_MISC|BPF_TXA;
1367 				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1368 				break;
1369 			}
1370 			else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1371 				 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1372 				s->code = BPF_LD|BPF_IMM;
1373 				s->k = 0;
1374 				vstore(s, &val[A_ATOM], K(s->k), alter);
1375 				break;
1376 			}
1377 			else if (op == BPF_NEG) {
1378 				s->code = NOP;
1379 				break;
1380 			}
1381 		}
1382 		val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1383 		break;
1384 
1385 	case BPF_MISC|BPF_TXA:
1386 		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1387 		break;
1388 
1389 	case BPF_LD|BPF_MEM:
1390 		v = val[s->k];
1391 		if (alter && opt_state->vmap[v].is_const) {
1392 			s->code = BPF_LD|BPF_IMM;
1393 			s->k = opt_state->vmap[v].const_val;
1394 			/*
1395 			 * XXX - optimizer loop detection.
1396 			 */
1397 			opt_state->non_branch_movement_performed = 1;
1398 			opt_state->done = 0;
1399 		}
1400 		vstore(s, &val[A_ATOM], v, alter);
1401 		break;
1402 
1403 	case BPF_MISC|BPF_TAX:
1404 		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1405 		break;
1406 
1407 	case BPF_LDX|BPF_MEM:
1408 		v = val[s->k];
1409 		if (alter && opt_state->vmap[v].is_const) {
1410 			s->code = BPF_LDX|BPF_IMM;
1411 			s->k = opt_state->vmap[v].const_val;
1412 			/*
1413 			 * XXX - optimizer loop detection.
1414 			 */
1415 			opt_state->non_branch_movement_performed = 1;
1416 			opt_state->done = 0;
1417 		}
1418 		vstore(s, &val[X_ATOM], v, alter);
1419 		break;
1420 
1421 	case BPF_ST:
1422 		vstore(s, &val[s->k], val[A_ATOM], alter);
1423 		break;
1424 
1425 	case BPF_STX:
1426 		vstore(s, &val[s->k], val[X_ATOM], alter);
1427 		break;
1428 	}
1429 }
1430 
1431 static void
1432 deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1433 {
1434 	register int atom;
1435 
1436 	atom = atomuse(s);
1437 	if (atom >= 0) {
1438 		if (atom == AX_ATOM) {
1439 			last[X_ATOM] = 0;
1440 			last[A_ATOM] = 0;
1441 		}
1442 		else
1443 			last[atom] = 0;
1444 	}
1445 	atom = atomdef(s);
1446 	if (atom >= 0) {
1447 		if (last[atom]) {
1448 			/*
1449 			 * XXX - optimizer loop detection.
1450 			 */
1451 			opt_state->non_branch_movement_performed = 1;
1452 			opt_state->done = 0;
1453 			last[atom]->code = NOP;
1454 		}
1455 		last[atom] = s;
1456 	}
1457 }
1458 
1459 static void
1460 opt_deadstores(opt_state_t *opt_state, register struct block *b)
1461 {
1462 	register struct slist *s;
1463 	register int atom;
1464 	struct stmt *last[N_ATOMS];
1465 
1466 	memset((char *)last, 0, sizeof last);
1467 
1468 	for (s = b->stmts; s != 0; s = s->next)
1469 		deadstmt(opt_state, &s->s, last);
1470 	deadstmt(opt_state, &b->s, last);
1471 
1472 	for (atom = 0; atom < N_ATOMS; ++atom)
1473 		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1474 			last[atom]->code = NOP;
1475 			/*
1476 			 * XXX - optimizer loop detection.
1477 			 */
1478 			opt_state->non_branch_movement_performed = 1;
1479 			opt_state->done = 0;
1480 		}
1481 }
1482 
1483 static void
1484 opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1485 {
1486 	struct slist *s;
1487 	struct edge *p;
1488 	int i;
1489 	bpf_u_int32 aval, xval;
1490 
1491 #if 0
1492 	for (s = b->stmts; s && s->next; s = s->next)
1493 		if (BPF_CLASS(s->s.code) == BPF_JMP) {
1494 			do_stmts = 0;
1495 			break;
1496 		}
1497 #endif
1498 
1499 	/*
1500 	 * Initialize the atom values.
1501 	 */
1502 	p = b->in_edges;
1503 	if (p == 0) {
1504 		/*
1505 		 * We have no predecessors, so everything is undefined
1506 		 * upon entry to this block.
1507 		 */
1508 		memset((char *)b->val, 0, sizeof(b->val));
1509 	} else {
1510 		/*
1511 		 * Inherit values from our predecessors.
1512 		 *
1513 		 * First, get the values from the predecessor along the
1514 		 * first edge leading to this node.
1515 		 */
1516 		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1517 		/*
1518 		 * Now look at all the other nodes leading to this node.
1519 		 * If, for the predecessor along that edge, a register
1520 		 * has a different value from the one we have (i.e.,
1521 		 * control paths are merging, and the merging paths
1522 		 * assign different values to that register), give the
1523 		 * register the undefined value of 0.
1524 		 */
1525 		while ((p = p->next) != NULL) {
1526 			for (i = 0; i < N_ATOMS; ++i)
1527 				if (b->val[i] != p->pred->val[i])
1528 					b->val[i] = 0;
1529 		}
1530 	}
1531 	aval = b->val[A_ATOM];
1532 	xval = b->val[X_ATOM];
1533 	for (s = b->stmts; s; s = s->next)
1534 		opt_stmt(opt_state, &s->s, b->val, do_stmts);
1535 
1536 	/*
1537 	 * This is a special case: if we don't use anything from this
1538 	 * block, and we load the accumulator or index register with a
1539 	 * value that is already there, or if this block is a return,
1540 	 * eliminate all the statements.
1541 	 *
1542 	 * XXX - what if it does a store?  Presumably that falls under
1543 	 * the heading of "if we don't use anything from this block",
1544 	 * i.e., if we use any memory location set to a different
1545 	 * value by this block, then we use something from this block.
1546 	 *
1547 	 * XXX - why does it matter whether we use anything from this
1548 	 * block?  If the accumulator or index register doesn't change
1549 	 * its value, isn't that OK even if we use that value?
1550 	 *
1551 	 * XXX - if we load the accumulator with a different value,
1552 	 * and the block ends with a conditional branch, we obviously
1553 	 * can't eliminate it, as the branch depends on that value.
1554 	 * For the index register, the conditional branch only depends
1555 	 * on the index register value if the test is against the index
1556 	 * register value rather than a constant; if nothing uses the
1557 	 * value we put into the index register, and we're not testing
1558 	 * against the index register's value, and there aren't any
1559 	 * other problems that would keep us from eliminating this
1560 	 * block, can we eliminate it?
1561 	 */
1562 	if (do_stmts &&
1563 	    ((b->out_use == 0 &&
1564 	      aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1565 	      xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1566 	     BPF_CLASS(b->s.code) == BPF_RET)) {
1567 		if (b->stmts != 0) {
1568 			b->stmts = 0;
1569 			/*
1570 			 * XXX - optimizer loop detection.
1571 			 */
1572 			opt_state->non_branch_movement_performed = 1;
1573 			opt_state->done = 0;
1574 		}
1575 	} else {
1576 		opt_peep(opt_state, b);
1577 		opt_deadstores(opt_state, b);
1578 	}
1579 	/*
1580 	 * Set up values for branch optimizer.
1581 	 */
1582 	if (BPF_SRC(b->s.code) == BPF_K)
1583 		b->oval = K(b->s.k);
1584 	else
1585 		b->oval = b->val[X_ATOM];
1586 	b->et.code = b->s.code;
1587 	b->ef.code = -b->s.code;
1588 }
1589 
1590 /*
1591  * Return true if any register that is used on exit from 'succ', has
1592  * an exit value that is different from the corresponding exit value
1593  * from 'b'.
1594  */
1595 static int
1596 use_conflict(struct block *b, struct block *succ)
1597 {
1598 	int atom;
1599 	atomset use = succ->out_use;
1600 
1601 	if (use == 0)
1602 		return 0;
1603 
1604 	for (atom = 0; atom < N_ATOMS; ++atom)
1605 		if (ATOMELEM(use, atom))
1606 			if (b->val[atom] != succ->val[atom])
1607 				return 1;
1608 	return 0;
1609 }
1610 
1611 /*
1612  * Given a block that is the successor of an edge, and an edge that
1613  * dominates that edge, return either a pointer to a child of that
1614  * block (a block to which that block jumps) if that block is a
1615  * candidate to replace the successor of the latter edge or NULL
1616  * if neither of the children of the first block are candidates.
1617  */
1618 static struct block *
1619 fold_edge(struct block *child, struct edge *ep)
1620 {
1621 	int sense;
1622 	bpf_u_int32 aval0, aval1, oval0, oval1;
1623 	int code = ep->code;
1624 
1625 	if (code < 0) {
1626 		/*
1627 		 * This edge is a "branch if false" edge.
1628 		 */
1629 		code = -code;
1630 		sense = 0;
1631 	} else {
1632 		/*
1633 		 * This edge is a "branch if true" edge.
1634 		 */
1635 		sense = 1;
1636 	}
1637 
1638 	/*
1639 	 * If the opcode for the branch at the end of the block we
1640 	 * were handed isn't the same as the opcode for the branch
1641 	 * to which the edge we were handed corresponds, the tests
1642 	 * for those branches aren't testing the same conditions,
1643 	 * so the blocks to which the first block branches aren't
1644 	 * candidates to replace the successor of the edge.
1645 	 */
1646 	if (child->s.code != code)
1647 		return 0;
1648 
1649 	aval0 = child->val[A_ATOM];
1650 	oval0 = child->oval;
1651 	aval1 = ep->pred->val[A_ATOM];
1652 	oval1 = ep->pred->oval;
1653 
1654 	/*
1655 	 * If the A register value on exit from the successor block
1656 	 * isn't the same as the A register value on exit from the
1657 	 * predecessor of the edge, the blocks to which the first
1658 	 * block branches aren't candidates to replace the successor
1659 	 * of the edge.
1660 	 */
1661 	if (aval0 != aval1)
1662 		return 0;
1663 
1664 	if (oval0 == oval1)
1665 		/*
1666 		 * The operands of the branch instructions are
1667 		 * identical, so the branches are testing the
1668 		 * same condition, and the result is true if a true
1669 		 * branch was taken to get here, otherwise false.
1670 		 */
1671 		return sense ? JT(child) : JF(child);
1672 
1673 	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1674 		/*
1675 		 * At this point, we only know the comparison if we
1676 		 * came down the true branch, and it was an equality
1677 		 * comparison with a constant.
1678 		 *
1679 		 * I.e., if we came down the true branch, and the branch
1680 		 * was an equality comparison with a constant, we know the
1681 		 * accumulator contains that constant.  If we came down
1682 		 * the false branch, or the comparison wasn't with a
1683 		 * constant, we don't know what was in the accumulator.
1684 		 *
1685 		 * We rely on the fact that distinct constants have distinct
1686 		 * value numbers.
1687 		 */
1688 		return JF(child);
1689 
1690 	return 0;
1691 }
1692 
1693 /*
1694  * If we can make this edge go directly to a child of the edge's current
1695  * successor, do so.
1696  */
1697 static void
1698 opt_j(opt_state_t *opt_state, struct edge *ep)
1699 {
1700 	register u_int i, k;
1701 	register struct block *target;
1702 
1703 	/*
1704 	 * Does this edge go to a block where, if the test
1705 	 * at the end of it succeeds, it goes to a block
1706 	 * that's a leaf node of the DAG, i.e. a return
1707 	 * statement?
1708 	 * If so, there's nothing to optimize.
1709 	 */
1710 	if (JT(ep->succ) == 0)
1711 		return;
1712 
1713 	/*
1714 	 * Does this edge go to a block that goes, in turn, to
1715 	 * the same block regardless of whether the test at the
1716 	 * end succeeds or fails?
1717 	 */
1718 	if (JT(ep->succ) == JF(ep->succ)) {
1719 		/*
1720 		 * Common branch targets can be eliminated, provided
1721 		 * there is no data dependency.
1722 		 *
1723 		 * Check whether any register used on exit from the
1724 		 * block to which the successor of this edge goes
1725 		 * has a value at that point that's different from
1726 		 * the value it has on exit from the predecessor of
1727 		 * this edge.  If not, the predecessor of this edge
1728 		 * can just go to the block to which the successor
1729 		 * of this edge goes, bypassing the successor of this
1730 		 * edge, as the successor of this edge isn't doing
1731 		 * any calculations whose results are different
1732 		 * from what the blocks before it did and isn't
1733 		 * doing any tests the results of which matter.
1734 		 */
1735 		if (!use_conflict(ep->pred, JT(ep->succ))) {
1736 			/*
1737 			 * No, there isn't.
1738 			 * Make this edge go to the block to
1739 			 * which the successor of that edge
1740 			 * goes.
1741 			 *
1742 			 * XXX - optimizer loop detection.
1743 			 */
1744 			opt_state->non_branch_movement_performed = 1;
1745 			opt_state->done = 0;
1746 			ep->succ = JT(ep->succ);
1747 		}
1748 	}
1749 	/*
1750 	 * For each edge dominator that matches the successor of this
1751 	 * edge, promote the edge successor to the its grandchild.
1752 	 *
1753 	 * XXX We violate the set abstraction here in favor a reasonably
1754 	 * efficient loop.
1755 	 */
1756  top:
1757 	for (i = 0; i < opt_state->edgewords; ++i) {
1758 		/* i'th word in the bitset of dominators */
1759 		register bpf_u_int32 x = ep->edom[i];
1760 
1761 		while (x != 0) {
1762 			/* Find the next dominator in that word and mark it as found */
1763 			k = lowest_set_bit(x);
1764 			x &=~ ((bpf_u_int32)1 << k);
1765 			k += i * BITS_PER_WORD;
1766 
1767 			target = fold_edge(ep->succ, opt_state->edges[k]);
1768 			/*
1769 			 * We have a candidate to replace the successor
1770 			 * of ep.
1771 			 *
1772 			 * Check that there is no data dependency between
1773 			 * nodes that will be violated if we move the edge;
1774 			 * i.e., if any register used on exit from the
1775 			 * candidate has a value at that point different
1776 			 * from the value it has when we exit the
1777 			 * predecessor of that edge, there's a data
1778 			 * dependency that will be violated.
1779 			 */
1780 			if (target != 0 && !use_conflict(ep->pred, target)) {
1781 				/*
1782 				 * It's safe to replace the successor of
1783 				 * ep; do so, and note that we've made
1784 				 * at least one change.
1785 				 *
1786 				 * XXX - this is one of the operations that
1787 				 * happens when the optimizer gets into
1788 				 * one of those infinite loops.
1789 				 */
1790 				opt_state->done = 0;
1791 				ep->succ = target;
1792 				if (JT(target) != 0)
1793 					/*
1794 					 * Start over unless we hit a leaf.
1795 					 */
1796 					goto top;
1797 				return;
1798 			}
1799 		}
1800 	}
1801 }
1802 
1803 /*
1804  * XXX - is this, and and_pullup(), what's described in section 6.1.2
1805  * "Predicate Assertion Propagation" in the BPF+ paper?
1806  *
1807  * Note that this looks at block dominators, not edge dominators.
1808  * Don't think so.
1809  *
1810  * "A or B" compiles into
1811  *
1812  *          A
1813  *       t / \ f
1814  *        /   B
1815  *       / t / \ f
1816  *      \   /
1817  *       \ /
1818  *        X
1819  *
1820  *
1821  */
1822 static void
1823 or_pullup(opt_state_t *opt_state, struct block *b)
1824 {
1825 	bpf_u_int32 val;
1826 	int at_top;
1827 	struct block *pull;
1828 	struct block **diffp, **samep;
1829 	struct edge *ep;
1830 
1831 	ep = b->in_edges;
1832 	if (ep == 0)
1833 		return;
1834 
1835 	/*
1836 	 * Make sure each predecessor loads the same value.
1837 	 * XXX why?
1838 	 */
1839 	val = ep->pred->val[A_ATOM];
1840 	for (ep = ep->next; ep != 0; ep = ep->next)
1841 		if (val != ep->pred->val[A_ATOM])
1842 			return;
1843 
1844 	/*
1845 	 * For the first edge in the list of edges coming into this block,
1846 	 * see whether the predecessor of that edge comes here via a true
1847 	 * branch or a false branch.
1848 	 */
1849 	if (JT(b->in_edges->pred) == b)
1850 		diffp = &JT(b->in_edges->pred);	/* jt */
1851 	else
1852 		diffp = &JF(b->in_edges->pred);	/* jf */
1853 
1854 	/*
1855 	 * diffp is a pointer to a pointer to the block.
1856 	 *
1857 	 * Go down the false chain looking as far as you can,
1858 	 * making sure that each jump-compare is doing the
1859 	 * same as the original block.
1860 	 *
1861 	 * If you reach the bottom before you reach a
1862 	 * different jump-compare, just exit.  There's nothing
1863 	 * to do here.  XXX - no, this version is checking for
1864 	 * the value leaving the block; that's from the BPF+
1865 	 * pullup routine.
1866 	 */
1867 	at_top = 1;
1868 	for (;;) {
1869 		/*
1870 		 * Done if that's not going anywhere XXX
1871 		 */
1872 		if (*diffp == 0)
1873 			return;
1874 
1875 		/*
1876 		 * Done if that predecessor blah blah blah isn't
1877 		 * going the same place we're going XXX
1878 		 *
1879 		 * Does the true edge of this block point to the same
1880 		 * location as the true edge of b?
1881 		 */
1882 		if (JT(*diffp) != JT(b))
1883 			return;
1884 
1885 		/*
1886 		 * Done if this node isn't a dominator of that
1887 		 * node blah blah blah XXX
1888 		 *
1889 		 * Does b dominate diffp?
1890 		 */
1891 		if (!SET_MEMBER((*diffp)->dom, b->id))
1892 			return;
1893 
1894 		/*
1895 		 * Break out of the loop if that node's value of A
1896 		 * isn't the value of A above XXX
1897 		 */
1898 		if ((*diffp)->val[A_ATOM] != val)
1899 			break;
1900 
1901 		/*
1902 		 * Get the JF for that node XXX
1903 		 * Go down the false path.
1904 		 */
1905 		diffp = &JF(*diffp);
1906 		at_top = 0;
1907 	}
1908 
1909 	/*
1910 	 * Now that we've found a different jump-compare in a chain
1911 	 * below b, search further down until we find another
1912 	 * jump-compare that looks at the original value.  This
1913 	 * jump-compare should get pulled up.  XXX again we're
1914 	 * comparing values not jump-compares.
1915 	 */
1916 	samep = &JF(*diffp);
1917 	for (;;) {
1918 		/*
1919 		 * Done if that's not going anywhere XXX
1920 		 */
1921 		if (*samep == 0)
1922 			return;
1923 
1924 		/*
1925 		 * Done if that predecessor blah blah blah isn't
1926 		 * going the same place we're going XXX
1927 		 */
1928 		if (JT(*samep) != JT(b))
1929 			return;
1930 
1931 		/*
1932 		 * Done if this node isn't a dominator of that
1933 		 * node blah blah blah XXX
1934 		 *
1935 		 * Does b dominate samep?
1936 		 */
1937 		if (!SET_MEMBER((*samep)->dom, b->id))
1938 			return;
1939 
1940 		/*
1941 		 * Break out of the loop if that node's value of A
1942 		 * is the value of A above XXX
1943 		 */
1944 		if ((*samep)->val[A_ATOM] == val)
1945 			break;
1946 
1947 		/* XXX Need to check that there are no data dependencies
1948 		   between dp0 and dp1.  Currently, the code generator
1949 		   will not produce such dependencies. */
1950 		samep = &JF(*samep);
1951 	}
1952 #ifdef notdef
1953 	/* XXX This doesn't cover everything. */
1954 	for (i = 0; i < N_ATOMS; ++i)
1955 		if ((*samep)->val[i] != pred->val[i])
1956 			return;
1957 #endif
1958 	/* Pull up the node. */
1959 	pull = *samep;
1960 	*samep = JF(pull);
1961 	JF(pull) = *diffp;
1962 
1963 	/*
1964 	 * At the top of the chain, each predecessor needs to point at the
1965 	 * pulled up node.  Inside the chain, there is only one predecessor
1966 	 * to worry about.
1967 	 */
1968 	if (at_top) {
1969 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1970 			if (JT(ep->pred) == b)
1971 				JT(ep->pred) = pull;
1972 			else
1973 				JF(ep->pred) = pull;
1974 		}
1975 	}
1976 	else
1977 		*diffp = pull;
1978 
1979 	/*
1980 	 * XXX - this is one of the operations that happens when the
1981 	 * optimizer gets into one of those infinite loops.
1982 	 */
1983 	opt_state->done = 0;
1984 }
1985 
1986 static void
1987 and_pullup(opt_state_t *opt_state, struct block *b)
1988 {
1989 	bpf_u_int32 val;
1990 	int at_top;
1991 	struct block *pull;
1992 	struct block **diffp, **samep;
1993 	struct edge *ep;
1994 
1995 	ep = b->in_edges;
1996 	if (ep == 0)
1997 		return;
1998 
1999 	/*
2000 	 * Make sure each predecessor loads the same value.
2001 	 */
2002 	val = ep->pred->val[A_ATOM];
2003 	for (ep = ep->next; ep != 0; ep = ep->next)
2004 		if (val != ep->pred->val[A_ATOM])
2005 			return;
2006 
2007 	if (JT(b->in_edges->pred) == b)
2008 		diffp = &JT(b->in_edges->pred);
2009 	else
2010 		diffp = &JF(b->in_edges->pred);
2011 
2012 	at_top = 1;
2013 	for (;;) {
2014 		if (*diffp == 0)
2015 			return;
2016 
2017 		if (JF(*diffp) != JF(b))
2018 			return;
2019 
2020 		if (!SET_MEMBER((*diffp)->dom, b->id))
2021 			return;
2022 
2023 		if ((*diffp)->val[A_ATOM] != val)
2024 			break;
2025 
2026 		diffp = &JT(*diffp);
2027 		at_top = 0;
2028 	}
2029 	samep = &JT(*diffp);
2030 	for (;;) {
2031 		if (*samep == 0)
2032 			return;
2033 
2034 		if (JF(*samep) != JF(b))
2035 			return;
2036 
2037 		if (!SET_MEMBER((*samep)->dom, b->id))
2038 			return;
2039 
2040 		if ((*samep)->val[A_ATOM] == val)
2041 			break;
2042 
2043 		/* XXX Need to check that there are no data dependencies
2044 		   between diffp and samep.  Currently, the code generator
2045 		   will not produce such dependencies. */
2046 		samep = &JT(*samep);
2047 	}
2048 #ifdef notdef
2049 	/* XXX This doesn't cover everything. */
2050 	for (i = 0; i < N_ATOMS; ++i)
2051 		if ((*samep)->val[i] != pred->val[i])
2052 			return;
2053 #endif
2054 	/* Pull up the node. */
2055 	pull = *samep;
2056 	*samep = JT(pull);
2057 	JT(pull) = *diffp;
2058 
2059 	/*
2060 	 * At the top of the chain, each predecessor needs to point at the
2061 	 * pulled up node.  Inside the chain, there is only one predecessor
2062 	 * to worry about.
2063 	 */
2064 	if (at_top) {
2065 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
2066 			if (JT(ep->pred) == b)
2067 				JT(ep->pred) = pull;
2068 			else
2069 				JF(ep->pred) = pull;
2070 		}
2071 	}
2072 	else
2073 		*diffp = pull;
2074 
2075 	/*
2076 	 * XXX - this is one of the operations that happens when the
2077 	 * optimizer gets into one of those infinite loops.
2078 	 */
2079 	opt_state->done = 0;
2080 }
2081 
2082 static void
2083 opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2084 {
2085 	int i, maxlevel;
2086 	struct block *p;
2087 
2088 	init_val(opt_state);
2089 	maxlevel = ic->root->level;
2090 
2091 	find_inedges(opt_state, ic->root);
2092 	for (i = maxlevel; i >= 0; --i)
2093 		for (p = opt_state->levels[i]; p; p = p->link)
2094 			opt_blk(opt_state, p, do_stmts);
2095 
2096 	if (do_stmts)
2097 		/*
2098 		 * No point trying to move branches; it can't possibly
2099 		 * make a difference at this point.
2100 		 *
2101 		 * XXX - this might be after we detect a loop where
2102 		 * we were just looping infinitely moving branches
2103 		 * in such a fashion that we went through two or more
2104 		 * versions of the machine code, eventually returning
2105 		 * to the first version.  (We're really not doing a
2106 		 * full loop detection, we're just testing for two
2107 		 * passes in a row where we do nothing but
2108 		 * move branches.)
2109 		 */
2110 		return;
2111 
2112 	/*
2113 	 * Is this what the BPF+ paper describes in sections 6.1.1,
2114 	 * 6.1.2, and 6.1.3?
2115 	 */
2116 	for (i = 1; i <= maxlevel; ++i) {
2117 		for (p = opt_state->levels[i]; p; p = p->link) {
2118 			opt_j(opt_state, &p->et);
2119 			opt_j(opt_state, &p->ef);
2120 		}
2121 	}
2122 
2123 	find_inedges(opt_state, ic->root);
2124 	for (i = 1; i <= maxlevel; ++i) {
2125 		for (p = opt_state->levels[i]; p; p = p->link) {
2126 			or_pullup(opt_state, p);
2127 			and_pullup(opt_state, p);
2128 		}
2129 	}
2130 }
2131 
2132 static inline void
2133 link_inedge(struct edge *parent, struct block *child)
2134 {
2135 	parent->next = child->in_edges;
2136 	child->in_edges = parent;
2137 }
2138 
2139 static void
2140 find_inedges(opt_state_t *opt_state, struct block *root)
2141 {
2142 	u_int i;
2143 	int level;
2144 	struct block *b;
2145 
2146 	for (i = 0; i < opt_state->n_blocks; ++i)
2147 		opt_state->blocks[i]->in_edges = 0;
2148 
2149 	/*
2150 	 * Traverse the graph, adding each edge to the predecessor
2151 	 * list of its successors.  Skip the leaves (i.e. level 0).
2152 	 */
2153 	for (level = root->level; level > 0; --level) {
2154 		for (b = opt_state->levels[level]; b != 0; b = b->link) {
2155 			link_inedge(&b->et, JT(b));
2156 			link_inedge(&b->ef, JF(b));
2157 		}
2158 	}
2159 }
2160 
2161 static void
2162 opt_root(struct block **b)
2163 {
2164 	struct slist *tmp, *s;
2165 
2166 	s = (*b)->stmts;
2167 	(*b)->stmts = 0;
2168 	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2169 		*b = JT(*b);
2170 
2171 	tmp = (*b)->stmts;
2172 	if (tmp != 0)
2173 		sappend(s, tmp);
2174 	(*b)->stmts = s;
2175 
2176 	/*
2177 	 * If the root node is a return, then there is no
2178 	 * point executing any statements (since the bpf machine
2179 	 * has no side effects).
2180 	 */
2181 	if (BPF_CLASS((*b)->s.code) == BPF_RET)
2182 		(*b)->stmts = 0;
2183 }
2184 
2185 static void
2186 opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2187 {
2188 
2189 #ifdef BDEBUG
2190 	if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2191 		printf("opt_loop(root, %d) begin\n", do_stmts);
2192 		opt_dump(opt_state, ic);
2193 	}
2194 #endif
2195 
2196 	/*
2197 	 * XXX - optimizer loop detection.
2198 	 */
2199 	int loop_count = 0;
2200 	for (;;) {
2201 		opt_state->done = 1;
2202 		/*
2203 		 * XXX - optimizer loop detection.
2204 		 */
2205 		opt_state->non_branch_movement_performed = 0;
2206 		find_levels(opt_state, ic);
2207 		find_dom(opt_state, ic->root);
2208 		find_closure(opt_state, ic->root);
2209 		find_ud(opt_state, ic->root);
2210 		find_edom(opt_state, ic->root);
2211 		opt_blks(opt_state, ic, do_stmts);
2212 #ifdef BDEBUG
2213 		if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2214 			printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2215 			opt_dump(opt_state, ic);
2216 		}
2217 #endif
2218 
2219 		/*
2220 		 * Was anything done in this optimizer pass?
2221 		 */
2222 		if (opt_state->done) {
2223 			/*
2224 			 * No, so we've reached a fixed point.
2225 			 * We're done.
2226 			 */
2227 			break;
2228 		}
2229 
2230 		/*
2231 		 * XXX - was anything done other than branch movement
2232 		 * in this pass?
2233 		 */
2234 		if (opt_state->non_branch_movement_performed) {
2235 			/*
2236 			 * Yes.  Clear any loop-detection counter;
2237 			 * we're making some form of progress (assuming
2238 			 * we can't get into a cycle doing *other*
2239 			 * optimizations...).
2240 			 */
2241 			loop_count = 0;
2242 		} else {
2243 			/*
2244 			 * No - increment the counter, and quit if
2245 			 * it's up to 100.
2246 			 */
2247 			loop_count++;
2248 			if (loop_count >= 100) {
2249 				/*
2250 				 * We've done nothing but branch movement
2251 				 * for 100 passes; we're probably
2252 				 * in a cycle and will never reach a
2253 				 * fixed point.
2254 				 *
2255 				 * XXX - yes, we really need a non-
2256 				 * heuristic way of detecting a cycle.
2257 				 */
2258 				opt_state->done = 1;
2259 				break;
2260 			}
2261 		}
2262 	}
2263 }
2264 
2265 /*
2266  * Optimize the filter code in its dag representation.
2267  * Return 0 on success, -1 on error.
2268  */
2269 int
2270 bpf_optimize(struct icode *ic, char *errbuf)
2271 {
2272 	opt_state_t opt_state;
2273 
2274 	memset(&opt_state, 0, sizeof(opt_state));
2275 	opt_state.errbuf = errbuf;
2276 	opt_state.non_branch_movement_performed = 0;
2277 	if (setjmp(opt_state.top_ctx)) {
2278 		opt_cleanup(&opt_state);
2279 		return -1;
2280 	}
2281 	opt_init(&opt_state, ic);
2282 	opt_loop(&opt_state, ic, 0);
2283 	opt_loop(&opt_state, ic, 1);
2284 	intern_blocks(&opt_state, ic);
2285 #ifdef BDEBUG
2286 	if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2287 		printf("after intern_blocks()\n");
2288 		opt_dump(&opt_state, ic);
2289 	}
2290 #endif
2291 	opt_root(&ic->root);
2292 #ifdef BDEBUG
2293 	if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2294 		printf("after opt_root()\n");
2295 		opt_dump(&opt_state, ic);
2296 	}
2297 #endif
2298 	opt_cleanup(&opt_state);
2299 	return 0;
2300 }
2301 
2302 static void
2303 make_marks(struct icode *ic, struct block *p)
2304 {
2305 	if (!isMarked(ic, p)) {
2306 		Mark(ic, p);
2307 		if (BPF_CLASS(p->s.code) != BPF_RET) {
2308 			make_marks(ic, JT(p));
2309 			make_marks(ic, JF(p));
2310 		}
2311 	}
2312 }
2313 
2314 /*
2315  * Mark code array such that isMarked(ic->cur_mark, i) is true
2316  * only for nodes that are alive.
2317  */
2318 static void
2319 mark_code(struct icode *ic)
2320 {
2321 	ic->cur_mark += 1;
2322 	make_marks(ic, ic->root);
2323 }
2324 
2325 /*
2326  * True iff the two stmt lists load the same value from the packet into
2327  * the accumulator.
2328  */
2329 static int
2330 eq_slist(struct slist *x, struct slist *y)
2331 {
2332 	for (;;) {
2333 		while (x && x->s.code == NOP)
2334 			x = x->next;
2335 		while (y && y->s.code == NOP)
2336 			y = y->next;
2337 		if (x == 0)
2338 			return y == 0;
2339 		if (y == 0)
2340 			return x == 0;
2341 		if (x->s.code != y->s.code || x->s.k != y->s.k)
2342 			return 0;
2343 		x = x->next;
2344 		y = y->next;
2345 	}
2346 }
2347 
2348 static inline int
2349 eq_blk(struct block *b0, struct block *b1)
2350 {
2351 	if (b0->s.code == b1->s.code &&
2352 	    b0->s.k == b1->s.k &&
2353 	    b0->et.succ == b1->et.succ &&
2354 	    b0->ef.succ == b1->ef.succ)
2355 		return eq_slist(b0->stmts, b1->stmts);
2356 	return 0;
2357 }
2358 
2359 static void
2360 intern_blocks(opt_state_t *opt_state, struct icode *ic)
2361 {
2362 	struct block *p;
2363 	u_int i, j;
2364 	int done1; /* don't shadow global */
2365  top:
2366 	done1 = 1;
2367 	for (i = 0; i < opt_state->n_blocks; ++i)
2368 		opt_state->blocks[i]->link = 0;
2369 
2370 	mark_code(ic);
2371 
2372 	for (i = opt_state->n_blocks - 1; i != 0; ) {
2373 		--i;
2374 		if (!isMarked(ic, opt_state->blocks[i]))
2375 			continue;
2376 		for (j = i + 1; j < opt_state->n_blocks; ++j) {
2377 			if (!isMarked(ic, opt_state->blocks[j]))
2378 				continue;
2379 			if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2380 				opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2381 					opt_state->blocks[j]->link : opt_state->blocks[j];
2382 				break;
2383 			}
2384 		}
2385 	}
2386 	for (i = 0; i < opt_state->n_blocks; ++i) {
2387 		p = opt_state->blocks[i];
2388 		if (JT(p) == 0)
2389 			continue;
2390 		if (JT(p)->link) {
2391 			done1 = 0;
2392 			JT(p) = JT(p)->link;
2393 		}
2394 		if (JF(p)->link) {
2395 			done1 = 0;
2396 			JF(p) = JF(p)->link;
2397 		}
2398 	}
2399 	if (!done1)
2400 		goto top;
2401 }
2402 
2403 static void
2404 opt_cleanup(opt_state_t *opt_state)
2405 {
2406 	free((void *)opt_state->vnode_base);
2407 	free((void *)opt_state->vmap);
2408 	free((void *)opt_state->edges);
2409 	free((void *)opt_state->space);
2410 	free((void *)opt_state->levels);
2411 	free((void *)opt_state->blocks);
2412 }
2413 
2414 /*
2415  * For optimizer errors.
2416  */
2417 static void PCAP_NORETURN
2418 opt_error(opt_state_t *opt_state, const char *fmt, ...)
2419 {
2420 	va_list ap;
2421 
2422 	if (opt_state->errbuf != NULL) {
2423 		va_start(ap, fmt);
2424 		(void)vsnprintf(opt_state->errbuf,
2425 		    PCAP_ERRBUF_SIZE, fmt, ap);
2426 		va_end(ap);
2427 	}
2428 	longjmp(opt_state->top_ctx, 1);
2429 	/* NOTREACHED */
2430 #ifdef _AIX
2431 	PCAP_UNREACHABLE
2432 #endif /* _AIX */
2433 }
2434 
2435 /*
2436  * Return the number of stmts in 's'.
2437  */
2438 static u_int
2439 slength(struct slist *s)
2440 {
2441 	u_int n = 0;
2442 
2443 	for (; s; s = s->next)
2444 		if (s->s.code != NOP)
2445 			++n;
2446 	return n;
2447 }
2448 
2449 /*
2450  * Return the number of nodes reachable by 'p'.
2451  * All nodes should be initially unmarked.
2452  */
2453 static int
2454 count_blocks(struct icode *ic, struct block *p)
2455 {
2456 	if (p == 0 || isMarked(ic, p))
2457 		return 0;
2458 	Mark(ic, p);
2459 	return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2460 }
2461 
2462 /*
2463  * Do a depth first search on the flow graph, numbering the
2464  * the basic blocks, and entering them into the 'blocks' array.`
2465  */
2466 static void
2467 number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2468 {
2469 	u_int n;
2470 
2471 	if (p == 0 || isMarked(ic, p))
2472 		return;
2473 
2474 	Mark(ic, p);
2475 	n = opt_state->n_blocks++;
2476 	if (opt_state->n_blocks == 0) {
2477 		/*
2478 		 * Overflow.
2479 		 */
2480 		opt_error(opt_state, "filter is too complex to optimize");
2481 	}
2482 	p->id = n;
2483 	opt_state->blocks[n] = p;
2484 
2485 	number_blks_r(opt_state, ic, JT(p));
2486 	number_blks_r(opt_state, ic, JF(p));
2487 }
2488 
2489 /*
2490  * Return the number of stmts in the flowgraph reachable by 'p'.
2491  * The nodes should be unmarked before calling.
2492  *
2493  * Note that "stmts" means "instructions", and that this includes
2494  *
2495  *	side-effect statements in 'p' (slength(p->stmts));
2496  *
2497  *	statements in the true branch from 'p' (count_stmts(JT(p)));
2498  *
2499  *	statements in the false branch from 'p' (count_stmts(JF(p)));
2500  *
2501  *	the conditional jump itself (1);
2502  *
2503  *	an extra long jump if the true branch requires it (p->longjt);
2504  *
2505  *	an extra long jump if the false branch requires it (p->longjf).
2506  */
2507 static u_int
2508 count_stmts(struct icode *ic, struct block *p)
2509 {
2510 	u_int n;
2511 
2512 	if (p == 0 || isMarked(ic, p))
2513 		return 0;
2514 	Mark(ic, p);
2515 	n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2516 	return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2517 }
2518 
2519 /*
2520  * Allocate memory.  All allocation is done before optimization
2521  * is begun.  A linear bound on the size of all data structures is computed
2522  * from the total number of blocks and/or statements.
2523  */
2524 static void
2525 opt_init(opt_state_t *opt_state, struct icode *ic)
2526 {
2527 	bpf_u_int32 *p;
2528 	int i, n, max_stmts;
2529 	u_int product;
2530 	size_t block_memsize, edge_memsize;
2531 
2532 	/*
2533 	 * First, count the blocks, so we can malloc an array to map
2534 	 * block number to block.  Then, put the blocks into the array.
2535 	 */
2536 	unMarkAll(ic);
2537 	n = count_blocks(ic, ic->root);
2538 	opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2539 	if (opt_state->blocks == NULL)
2540 		opt_error(opt_state, "malloc");
2541 	unMarkAll(ic);
2542 	opt_state->n_blocks = 0;
2543 	number_blks_r(opt_state, ic, ic->root);
2544 
2545 	/*
2546 	 * This "should not happen".
2547 	 */
2548 	if (opt_state->n_blocks == 0)
2549 		opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2550 
2551 	opt_state->n_edges = 2 * opt_state->n_blocks;
2552 	if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2553 		/*
2554 		 * Overflow.
2555 		 */
2556 		opt_error(opt_state, "filter is too complex to optimize");
2557 	}
2558 	opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2559 	if (opt_state->edges == NULL) {
2560 		opt_error(opt_state, "malloc");
2561 	}
2562 
2563 	/*
2564 	 * The number of levels is bounded by the number of nodes.
2565 	 */
2566 	opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2567 	if (opt_state->levels == NULL) {
2568 		opt_error(opt_state, "malloc");
2569 	}
2570 
2571 	opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2572 	opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2573 
2574 	/*
2575 	 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2576 	 * in a u_int; we use it as a u_int number-of-iterations
2577 	 * value.
2578 	 */
2579 	product = opt_state->n_blocks * opt_state->nodewords;
2580 	if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2581 		/*
2582 		 * XXX - just punt and don't try to optimize?
2583 		 * In practice, this is unlikely to happen with
2584 		 * a normal filter.
2585 		 */
2586 		opt_error(opt_state, "filter is too complex to optimize");
2587 	}
2588 
2589 	/*
2590 	 * Make sure the total memory required for that doesn't
2591 	 * overflow.
2592 	 */
2593 	block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2594 	if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2595 		opt_error(opt_state, "filter is too complex to optimize");
2596 	}
2597 
2598 	/*
2599 	 * Make sure opt_state->n_edges * opt_state->edgewords fits
2600 	 * in a u_int; we use it as a u_int number-of-iterations
2601 	 * value.
2602 	 */
2603 	product = opt_state->n_edges * opt_state->edgewords;
2604 	if ((product / opt_state->n_edges) != opt_state->edgewords) {
2605 		opt_error(opt_state, "filter is too complex to optimize");
2606 	}
2607 
2608 	/*
2609 	 * Make sure the total memory required for that doesn't
2610 	 * overflow.
2611 	 */
2612 	edge_memsize = (size_t)product * sizeof(*opt_state->space);
2613 	if (edge_memsize / product != sizeof(*opt_state->space)) {
2614 		opt_error(opt_state, "filter is too complex to optimize");
2615 	}
2616 
2617 	/*
2618 	 * Make sure the total memory required for both of them doesn't
2619 	 * overflow.
2620 	 */
2621 	if (block_memsize > SIZE_MAX - edge_memsize) {
2622 		opt_error(opt_state, "filter is too complex to optimize");
2623 	}
2624 
2625 	/* XXX */
2626 	opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2627 	if (opt_state->space == NULL) {
2628 		opt_error(opt_state, "malloc");
2629 	}
2630 	p = opt_state->space;
2631 	opt_state->all_dom_sets = p;
2632 	for (i = 0; i < n; ++i) {
2633 		opt_state->blocks[i]->dom = p;
2634 		p += opt_state->nodewords;
2635 	}
2636 	opt_state->all_closure_sets = p;
2637 	for (i = 0; i < n; ++i) {
2638 		opt_state->blocks[i]->closure = p;
2639 		p += opt_state->nodewords;
2640 	}
2641 	opt_state->all_edge_sets = p;
2642 	for (i = 0; i < n; ++i) {
2643 		register struct block *b = opt_state->blocks[i];
2644 
2645 		b->et.edom = p;
2646 		p += opt_state->edgewords;
2647 		b->ef.edom = p;
2648 		p += opt_state->edgewords;
2649 		b->et.id = i;
2650 		opt_state->edges[i] = &b->et;
2651 		b->ef.id = opt_state->n_blocks + i;
2652 		opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2653 		b->et.pred = b;
2654 		b->ef.pred = b;
2655 	}
2656 	max_stmts = 0;
2657 	for (i = 0; i < n; ++i)
2658 		max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2659 	/*
2660 	 * We allocate at most 3 value numbers per statement,
2661 	 * so this is an upper bound on the number of valnodes
2662 	 * we'll need.
2663 	 */
2664 	opt_state->maxval = 3 * max_stmts;
2665 	opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2666 	if (opt_state->vmap == NULL) {
2667 		opt_error(opt_state, "malloc");
2668 	}
2669 	opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2670 	if (opt_state->vnode_base == NULL) {
2671 		opt_error(opt_state, "malloc");
2672 	}
2673 }
2674 
2675 /*
2676  * This is only used when supporting optimizer debugging.  It is
2677  * global state, so do *not* do more than one compile in parallel
2678  * and expect it to provide meaningful information.
2679  */
2680 #ifdef BDEBUG
2681 int bids[NBIDS];
2682 #endif
2683 
2684 static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2685     PCAP_PRINTFLIKE(2, 3);
2686 
2687 /*
2688  * Returns true if successful.  Returns false if a branch has
2689  * an offset that is too large.  If so, we have marked that
2690  * branch so that on a subsequent iteration, it will be treated
2691  * properly.
2692  */
2693 static int
2694 convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2695 {
2696 	struct bpf_insn *dst;
2697 	struct slist *src;
2698 	u_int slen;
2699 	u_int off;
2700 	struct slist **offset = NULL;
2701 
2702 	if (p == 0 || isMarked(ic, p))
2703 		return (1);
2704 	Mark(ic, p);
2705 
2706 	if (convert_code_r(conv_state, ic, JF(p)) == 0)
2707 		return (0);
2708 	if (convert_code_r(conv_state, ic, JT(p)) == 0)
2709 		return (0);
2710 
2711 	slen = slength(p->stmts);
2712 	dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2713 		/* inflate length by any extra jumps */
2714 
2715 	p->offset = (int)(dst - conv_state->fstart);
2716 
2717 	/* generate offset[] for convenience  */
2718 	if (slen) {
2719 		offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2720 		if (!offset) {
2721 			conv_error(conv_state, "not enough core");
2722 			/*NOTREACHED*/
2723 		}
2724 	}
2725 	src = p->stmts;
2726 	for (off = 0; off < slen && src; off++) {
2727 #if 0
2728 		printf("off=%d src=%x\n", off, src);
2729 #endif
2730 		offset[off] = src;
2731 		src = src->next;
2732 	}
2733 
2734 	off = 0;
2735 	for (src = p->stmts; src; src = src->next) {
2736 		if (src->s.code == NOP)
2737 			continue;
2738 		dst->code = (u_short)src->s.code;
2739 		dst->k = src->s.k;
2740 
2741 		/* fill block-local relative jump */
2742 		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2743 #if 0
2744 			if (src->s.jt || src->s.jf) {
2745 				free(offset);
2746 				conv_error(conv_state, "illegal jmp destination");
2747 				/*NOTREACHED*/
2748 			}
2749 #endif
2750 			goto filled;
2751 		}
2752 		if (off == slen - 2)	/*???*/
2753 			goto filled;
2754 
2755 	    {
2756 		u_int i;
2757 		int jt, jf;
2758 		const char ljerr[] = "%s for block-local relative jump: off=%d";
2759 
2760 #if 0
2761 		printf("code=%x off=%d %x %x\n", src->s.code,
2762 			off, src->s.jt, src->s.jf);
2763 #endif
2764 
2765 		if (!src->s.jt || !src->s.jf) {
2766 			free(offset);
2767 			conv_error(conv_state, ljerr, "no jmp destination", off);
2768 			/*NOTREACHED*/
2769 		}
2770 
2771 		jt = jf = 0;
2772 		for (i = 0; i < slen; i++) {
2773 			if (offset[i] == src->s.jt) {
2774 				if (jt) {
2775 					free(offset);
2776 					conv_error(conv_state, ljerr, "multiple matches", off);
2777 					/*NOTREACHED*/
2778 				}
2779 
2780 				if (i - off - 1 >= 256) {
2781 					free(offset);
2782 					conv_error(conv_state, ljerr, "out-of-range jump", off);
2783 					/*NOTREACHED*/
2784 				}
2785 				dst->jt = (u_char)(i - off - 1);
2786 				jt++;
2787 			}
2788 			if (offset[i] == src->s.jf) {
2789 				if (jf) {
2790 					free(offset);
2791 					conv_error(conv_state, ljerr, "multiple matches", off);
2792 					/*NOTREACHED*/
2793 				}
2794 				if (i - off - 1 >= 256) {
2795 					free(offset);
2796 					conv_error(conv_state, ljerr, "out-of-range jump", off);
2797 					/*NOTREACHED*/
2798 				}
2799 				dst->jf = (u_char)(i - off - 1);
2800 				jf++;
2801 			}
2802 		}
2803 		if (!jt || !jf) {
2804 			free(offset);
2805 			conv_error(conv_state, ljerr, "no destination found", off);
2806 			/*NOTREACHED*/
2807 		}
2808 	    }
2809 filled:
2810 		++dst;
2811 		++off;
2812 	}
2813 	if (offset)
2814 		free(offset);
2815 
2816 #ifdef BDEBUG
2817 	if (dst - conv_state->fstart < NBIDS)
2818 		bids[dst - conv_state->fstart] = p->id + 1;
2819 #endif
2820 	dst->code = (u_short)p->s.code;
2821 	dst->k = p->s.k;
2822 	if (JT(p)) {
2823 		/* number of extra jumps inserted */
2824 		u_char extrajmps = 0;
2825 		off = JT(p)->offset - (p->offset + slen) - 1;
2826 		if (off >= 256) {
2827 		    /* offset too large for branch, must add a jump */
2828 		    if (p->longjt == 0) {
2829 			/* mark this instruction and retry */
2830 			p->longjt++;
2831 			return(0);
2832 		    }
2833 		    dst->jt = extrajmps;
2834 		    extrajmps++;
2835 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2836 		    dst[extrajmps].k = off - extrajmps;
2837 		}
2838 		else
2839 		    dst->jt = (u_char)off;
2840 		off = JF(p)->offset - (p->offset + slen) - 1;
2841 		if (off >= 256) {
2842 		    /* offset too large for branch, must add a jump */
2843 		    if (p->longjf == 0) {
2844 			/* mark this instruction and retry */
2845 			p->longjf++;
2846 			return(0);
2847 		    }
2848 		    /* branch if F to following jump */
2849 		    /* if two jumps are inserted, F goes to second one */
2850 		    dst->jf = extrajmps;
2851 		    extrajmps++;
2852 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2853 		    dst[extrajmps].k = off - extrajmps;
2854 		}
2855 		else
2856 		    dst->jf = (u_char)off;
2857 	}
2858 	return (1);
2859 }
2860 
2861 
2862 /*
2863  * Convert flowgraph intermediate representation to the
2864  * BPF array representation.  Set *lenp to the number of instructions.
2865  *
2866  * This routine does *NOT* leak the memory pointed to by fp.  It *must
2867  * not* do free(fp) before returning fp; doing so would make no sense,
2868  * as the BPF array pointed to by the return value of icode_to_fcode()
2869  * must be valid - it's being returned for use in a bpf_program structure.
2870  *
2871  * If it appears that icode_to_fcode() is leaking, the problem is that
2872  * the program using pcap_compile() is failing to free the memory in
2873  * the BPF program when it's done - the leak is in the program, not in
2874  * the routine that happens to be allocating the memory.  (By analogy, if
2875  * a program calls fopen() without ever calling fclose() on the FILE *,
2876  * it will leak the FILE structure; the leak is not in fopen(), it's in
2877  * the program.)  Change the program to use pcap_freecode() when it's
2878  * done with the filter program.  See the pcap man page.
2879  */
2880 struct bpf_insn *
2881 icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2882     char *errbuf)
2883 {
2884 	u_int n;
2885 	struct bpf_insn *fp;
2886 	conv_state_t conv_state;
2887 
2888 	conv_state.fstart = NULL;
2889 	conv_state.errbuf = errbuf;
2890 	if (setjmp(conv_state.top_ctx) != 0) {
2891 		free(conv_state.fstart);
2892 		return NULL;
2893 	}
2894 
2895 	/*
2896 	 * Loop doing convert_code_r() until no branches remain
2897 	 * with too-large offsets.
2898 	 */
2899 	for (;;) {
2900 	    unMarkAll(ic);
2901 	    n = *lenp = count_stmts(ic, root);
2902 
2903 	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2904 	    if (fp == NULL) {
2905 		(void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2906 		    "malloc");
2907 		return NULL;
2908 	    }
2909 	    memset((char *)fp, 0, sizeof(*fp) * n);
2910 	    conv_state.fstart = fp;
2911 	    conv_state.ftail = fp + n;
2912 
2913 	    unMarkAll(ic);
2914 	    if (convert_code_r(&conv_state, ic, root))
2915 		break;
2916 	    free(fp);
2917 	}
2918 
2919 	return fp;
2920 }
2921 
2922 /*
2923  * For iconv_to_fconv() errors.
2924  */
2925 static void PCAP_NORETURN
2926 conv_error(conv_state_t *conv_state, const char *fmt, ...)
2927 {
2928 	va_list ap;
2929 
2930 	va_start(ap, fmt);
2931 	(void)vsnprintf(conv_state->errbuf,
2932 	    PCAP_ERRBUF_SIZE, fmt, ap);
2933 	va_end(ap);
2934 	longjmp(conv_state->top_ctx, 1);
2935 	/* NOTREACHED */
2936 #ifdef _AIX
2937 	PCAP_UNREACHABLE
2938 #endif /* _AIX */
2939 }
2940 
2941 /*
2942  * Make a copy of a BPF program and put it in the "fcode" member of
2943  * a "pcap_t".
2944  *
2945  * If we fail to allocate memory for the copy, fill in the "errbuf"
2946  * member of the "pcap_t" with an error message, and return -1;
2947  * otherwise, return 0.
2948  */
2949 int
2950 install_bpf_program(pcap_t *p, struct bpf_program *fp)
2951 {
2952 	size_t prog_size;
2953 
2954 	/*
2955 	 * Validate the program.
2956 	 */
2957 	if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
2958 		snprintf(p->errbuf, sizeof(p->errbuf),
2959 			"BPF program is not valid");
2960 		return (-1);
2961 	}
2962 
2963 	/*
2964 	 * Free up any already installed program.
2965 	 */
2966 	pcap_freecode(&p->fcode);
2967 
2968 	prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2969 	p->fcode.bf_len = fp->bf_len;
2970 	p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2971 	if (p->fcode.bf_insns == NULL) {
2972 		pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2973 		    errno, "malloc");
2974 		return (-1);
2975 	}
2976 	memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2977 	return (0);
2978 }
2979 
2980 #ifdef BDEBUG
2981 static void
2982 dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2983     FILE *out)
2984 {
2985 	int icount, noffset;
2986 	int i;
2987 
2988 	if (block == NULL || isMarked(ic, block))
2989 		return;
2990 	Mark(ic, block);
2991 
2992 	icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2993 	noffset = min(block->offset + icount, (int)prog->bf_len);
2994 
2995 	fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2996 	for (i = block->offset; i < noffset; i++) {
2997 		fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2998 	}
2999 	fprintf(out, "\" tooltip=\"");
3000 	for (i = 0; i < BPF_MEMWORDS; i++)
3001 		if (block->val[i] != VAL_UNKNOWN)
3002 			fprintf(out, "val[%d]=%d ", i, block->val[i]);
3003 	fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
3004 	fprintf(out, "val[X]=%d", block->val[X_ATOM]);
3005 	fprintf(out, "\"");
3006 	if (JT(block) == NULL)
3007 		fprintf(out, ", peripheries=2");
3008 	fprintf(out, "];\n");
3009 
3010 	dot_dump_node(ic, JT(block), prog, out);
3011 	dot_dump_node(ic, JF(block), prog, out);
3012 }
3013 
3014 static void
3015 dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3016 {
3017 	if (block == NULL || isMarked(ic, block))
3018 		return;
3019 	Mark(ic, block);
3020 
3021 	if (JT(block)) {
3022 		fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3023 				block->id, JT(block)->id);
3024 		fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3025 			   block->id, JF(block)->id);
3026 	}
3027 	dot_dump_edge(ic, JT(block), out);
3028 	dot_dump_edge(ic, JF(block), out);
3029 }
3030 
3031 /* Output the block CFG using graphviz/DOT language
3032  * In the CFG, block's code, value index for each registers at EXIT,
3033  * and the jump relationship is show.
3034  *
3035  * example DOT for BPF `ip src host 1.1.1.1' is:
3036     digraph BPF {
3037 	block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh      [12]\n(001) jeq      #0x800           jt 2	jf 5" tooltip="val[A]=0 val[X]=0"];
3038 	block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld       [26]\n(003) jeq      #0x1010101       jt 4	jf 5" tooltip="val[A]=0 val[X]=0"];
3039 	block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret      #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3040 	block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret      #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3041 	"block0":se -> "block1":n [label="T"];
3042 	"block0":sw -> "block3":n [label="F"];
3043 	"block1":se -> "block2":n [label="T"];
3044 	"block1":sw -> "block3":n [label="F"];
3045     }
3046  *
3047  *  After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3048  *  and run `dot -Tpng -O bpf.dot' to draw the graph.
3049  */
3050 static int
3051 dot_dump(struct icode *ic, char *errbuf)
3052 {
3053 	struct bpf_program f;
3054 	FILE *out = stdout;
3055 
3056 	memset(bids, 0, sizeof bids);
3057 	f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3058 	if (f.bf_insns == NULL)
3059 		return -1;
3060 
3061 	fprintf(out, "digraph BPF {\n");
3062 	unMarkAll(ic);
3063 	dot_dump_node(ic, ic->root, &f, out);
3064 	unMarkAll(ic);
3065 	dot_dump_edge(ic, ic->root, out);
3066 	fprintf(out, "}\n");
3067 
3068 	free((char *)f.bf_insns);
3069 	return 0;
3070 }
3071 
3072 static int
3073 plain_dump(struct icode *ic, char *errbuf)
3074 {
3075 	struct bpf_program f;
3076 
3077 	memset(bids, 0, sizeof bids);
3078 	f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3079 	if (f.bf_insns == NULL)
3080 		return -1;
3081 	bpf_dump(&f, 1);
3082 	putchar('\n');
3083 	free((char *)f.bf_insns);
3084 	return 0;
3085 }
3086 
3087 static void
3088 opt_dump(opt_state_t *opt_state, struct icode *ic)
3089 {
3090 	int status;
3091 	char errbuf[PCAP_ERRBUF_SIZE];
3092 
3093 	/*
3094 	 * If the CFG, in DOT format, is requested, output it rather than
3095 	 * the code that would be generated from that graph.
3096 	 */
3097 	if (pcap_print_dot_graph)
3098 		status = dot_dump(ic, errbuf);
3099 	else
3100 		status = plain_dump(ic, errbuf);
3101 	if (status == -1)
3102 		opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3103 }
3104 #endif
3105