xref: /openbsd-src/lib/libpcap/gencode.c (revision 4c52d65c01901316c6a1469f3001cbbb9176b48f)
1 /*	$OpenBSD: gencode.c,v 1.10 1998/08/31 19:53:19 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that: (1) source code distributions
9  * retain the above copyright notice and this paragraph in its entirety, (2)
10  * distributions including binary code include the above copyright notice and
11  * this paragraph in its entirety in the documentation or other materials
12  * provided with the distribution, and (3) all advertising materials mentioning
13  * features or use of this software display the following acknowledgement:
14  * ``This product includes software developed by the University of California,
15  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
16  * the University nor the names of its contributors may be used to endorse
17  * or promote products derived from this software without specific prior
18  * written permission.
19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
20  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22  */
23 #ifndef lint
24 static char rcsid[] =
25     "@(#) Header: gencode.c,v 1.81 96/06/19 23:09:09 leres Exp (LBL)";
26 #endif
27 
28 #include <sys/types.h>
29 #include <sys/socket.h>
30 #include <sys/time.h>
31 
32 #ifdef __STDC__
33 struct mbuf;
34 struct rtentry;
35 #endif
36 
37 #include <net/if.h>
38 #include <net/bpf.h>
39 
40 #include <netinet/in.h>
41 #include <netinet/if_ether.h>
42 
43 #include <stdlib.h>
44 #include <memory.h>
45 #include <pcap.h>
46 #include <pcap-namedb.h>
47 #include <setjmp.h>
48 #ifdef __STDC__
49 #include <stdarg.h>
50 #else
51 #include <varargs.h>
52 #endif
53 
54 #ifdef HAVE_OS_PROTO_H
55 #include "os-proto.h"
56 #endif
57 
58 #include "pcap-int.h"
59 
60 #include "gencode.h"
61 
62 #ifndef ETHERTYPE_REVARP
63 #define ETHERTYPE_REVARP	0x8035
64 #endif
65 #ifndef	ETHERTYPE_MOPDL
66 #define	ETHERTYPE_MOPDL		0x6001
67 #endif
68 #ifndef	ETHERTYPE_MOPRC
69 #define	ETHERTYPE_MOPRC		0x6002
70 #endif
71 #ifndef	ETHERTYPE_DN
72 #define	ETHERTYPE_DN		0x6003
73 #endif
74 #ifndef	ETHERTYPE_LAT
75 #define	ETHERTYPE_LAT		0x6004
76 #endif
77 
78 #define JMP(c) ((c)|BPF_JMP|BPF_K)
79 
80 /* Locals */
81 static jmp_buf top_ctx;
82 static pcap_t *bpf_pcap;
83 
84 /* XXX */
85 #ifdef PCAP_FDDIPAD
86 int	pcap_fddipad = PCAP_FDDIPAD;
87 #else
88 int	pcap_fddipad;
89 #endif
90 
91 /* VARARGS */
92 __dead void
93 #ifdef __STDC__
94 bpf_error(const char *fmt, ...)
95 #else
96 bpf_error(fmt, va_alist)
97 	const char *fmt;
98 	va_dcl
99 #endif
100 {
101 	va_list ap;
102 
103 #ifdef __STDC__
104 	va_start(ap, fmt);
105 #else
106 	va_start(ap);
107 #endif
108 	if (bpf_pcap != NULL)
109 		(void)vsnprintf(pcap_geterr(bpf_pcap), PCAP_ERRBUF_SIZE,
110 		    fmt, ap);
111 	va_end(ap);
112 	longjmp(top_ctx, 1);
113 	/* NOTREACHED */
114 }
115 
116 static void init_linktype(int);
117 
118 static int alloc_reg(void);
119 static void free_reg(int);
120 
121 static struct block *root;
122 
123 /*
124  * We divy out chunks of memory rather than call malloc each time so
125  * we don't have to worry about leaking memory.  It's probably
126  * not a big deal if all this memory was wasted but it this ever
127  * goes into a library that would probably not be a good idea.
128  */
129 #define NCHUNKS 16
130 #define CHUNK0SIZE 1024
131 struct chunk {
132 	u_int n_left;
133 	void *m;
134 };
135 
136 static struct chunk chunks[NCHUNKS];
137 static int cur_chunk;
138 
139 static void *newchunk(u_int);
140 static void freechunks(void);
141 static __inline struct block *new_block(int);
142 static __inline struct slist *new_stmt(int);
143 static struct block *gen_retblk(int);
144 static __inline void syntax(void);
145 
146 static void backpatch(struct block *, struct block *);
147 static void merge(struct block *, struct block *);
148 static struct block *gen_cmp(u_int, u_int, bpf_int32);
149 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32);
150 static struct block *gen_bcmp(u_int, u_int, u_char *);
151 static struct block *gen_uncond(int);
152 static __inline struct block *gen_true(void);
153 static __inline struct block *gen_false(void);
154 static struct block *gen_linktype(int);
155 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int);
156 static struct block *gen_ehostop(u_char *, int);
157 static struct block *gen_fhostop(u_char *, int);
158 static struct block *gen_dnhostop(bpf_u_int32, int, u_int);
159 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int);
160 static struct block *gen_gateway(u_char *, bpf_u_int32 **, int, int);
161 static struct block *gen_ipfrag(void);
162 static struct block *gen_portatom(int, bpf_int32);
163 struct block *gen_portop(int, int, int);
164 static struct block *gen_port(int, int, int);
165 static int lookup_proto(char *, int);
166 static struct block *gen_proto(int, int, int);
167 static bpf_u_int32 net_mask(bpf_u_int32 *);
168 static struct slist *xfer_to_x(struct arth *);
169 static struct slist *xfer_to_a(struct arth *);
170 static struct block *gen_len(int, int);
171 
172 static void *
173 newchunk(n)
174 	u_int n;
175 {
176 	struct chunk *cp;
177 	int k, size;
178 
179 	/* XXX Round up to nearest long. */
180 	n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1);
181 
182 	cp = &chunks[cur_chunk];
183 	if (n > cp->n_left) {
184 		++cp, k = ++cur_chunk;
185 		if (k >= NCHUNKS)
186 			bpf_error("out of memory");
187 		size = CHUNK0SIZE << k;
188 		cp->m = (void *)malloc(size);
189 		memset((char *)cp->m, 0, size);
190 		cp->n_left = size;
191 		if (n > size)
192 			bpf_error("out of memory");
193 	}
194 	cp->n_left -= n;
195 	return (void *)((char *)cp->m + cp->n_left);
196 }
197 
198 static void
199 freechunks()
200 {
201 	int i;
202 
203 	cur_chunk = 0;
204 	for (i = 0; i < NCHUNKS; ++i)
205 		if (chunks[i].m != NULL) {
206 			free(chunks[i].m);
207 			chunks[i].m = NULL;
208 		}
209 }
210 
211 /*
212  * A strdup whose allocations are freed after code generation is over.
213  */
214 char *
215 sdup(s)
216 	char *s;
217 {
218 	int n = strlen(s) + 1;
219 	char *cp = newchunk(n);
220 	strcpy(cp, s);
221 	return (cp);
222 }
223 
224 static __inline struct block *
225 new_block(code)
226 	int code;
227 {
228 	struct block *p;
229 
230 	p = (struct block *)newchunk(sizeof(*p));
231 	p->s.code = code;
232 	p->head = p;
233 
234 	return p;
235 }
236 
237 static __inline struct slist *
238 new_stmt(code)
239 	int code;
240 {
241 	struct slist *p;
242 
243 	p = (struct slist *)newchunk(sizeof(*p));
244 	p->s.code = code;
245 
246 	return p;
247 }
248 
249 static struct block *
250 gen_retblk(v)
251 	int v;
252 {
253 	struct block *b = new_block(BPF_RET|BPF_K);
254 
255 	b->s.k = v;
256 	return b;
257 }
258 
259 static __inline void
260 syntax()
261 {
262 	bpf_error("syntax error in filter expression");
263 }
264 
265 static bpf_u_int32 netmask;
266 static int snaplen;
267 
268 int
269 pcap_compile(pcap_t *p, struct bpf_program *program,
270 	     char *buf, int optimize, bpf_u_int32 mask)
271 {
272 	extern int n_errors;
273 	int len;
274 
275 	n_errors = 0;
276 	root = NULL;
277 	bpf_pcap = p;
278 	if (setjmp(top_ctx)) {
279 		freechunks();
280 		return (-1);
281 	}
282 
283 	netmask = mask;
284 	snaplen = pcap_snapshot(p);
285 
286 	lex_init(buf ? buf : "");
287 	init_linktype(pcap_datalink(p));
288 	(void)pcap_parse();
289 
290 	if (n_errors)
291 		syntax();
292 
293 	if (root == NULL)
294 		root = gen_retblk(snaplen);
295 
296 	if (optimize) {
297 		bpf_optimize(&root);
298 		if (root == NULL ||
299 		    (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0))
300 			bpf_error("expression rejects all packets");
301 	}
302 	program->bf_insns = icode_to_fcode(root, &len);
303 	program->bf_len = len;
304 
305 	freechunks();
306 	return (0);
307 }
308 
309 /*
310  * Backpatch the blocks in 'list' to 'target'.  The 'sense' field indicates
311  * which of the jt and jf fields has been resolved and which is a pointer
312  * back to another unresolved block (or nil).  At least one of the fields
313  * in each block is already resolved.
314  */
315 static void
316 backpatch(list, target)
317 	struct block *list, *target;
318 {
319 	struct block *next;
320 
321 	while (list) {
322 		if (!list->sense) {
323 			next = JT(list);
324 			JT(list) = target;
325 		} else {
326 			next = JF(list);
327 			JF(list) = target;
328 		}
329 		list = next;
330 	}
331 }
332 
333 /*
334  * Merge the lists in b0 and b1, using the 'sense' field to indicate
335  * which of jt and jf is the link.
336  */
337 static void
338 merge(b0, b1)
339 	struct block *b0, *b1;
340 {
341 	register struct block **p = &b0;
342 
343 	/* Find end of list. */
344 	while (*p)
345 		p = !((*p)->sense) ? &JT(*p) : &JF(*p);
346 
347 	/* Concatenate the lists. */
348 	*p = b1;
349 }
350 
351 void
352 finish_parse(p)
353 	struct block *p;
354 {
355 	backpatch(p, gen_retblk(snaplen));
356 	p->sense = !p->sense;
357 	backpatch(p, gen_retblk(0));
358 	root = p->head;
359 }
360 
361 void
362 gen_and(b0, b1)
363 	struct block *b0, *b1;
364 {
365 	backpatch(b0, b1->head);
366 	b0->sense = !b0->sense;
367 	b1->sense = !b1->sense;
368 	merge(b1, b0);
369 	b1->sense = !b1->sense;
370 	b1->head = b0->head;
371 }
372 
373 void
374 gen_or(b0, b1)
375 	struct block *b0, *b1;
376 {
377 	b0->sense = !b0->sense;
378 	backpatch(b0, b1->head);
379 	b0->sense = !b0->sense;
380 	merge(b1, b0);
381 	b1->head = b0->head;
382 }
383 
384 void
385 gen_not(b)
386 	struct block *b;
387 {
388 	b->sense = !b->sense;
389 }
390 
391 static struct block *
392 gen_cmp(offset, size, v)
393 	u_int offset, size;
394 	bpf_int32 v;
395 {
396 	struct slist *s;
397 	struct block *b;
398 
399 	s = new_stmt(BPF_LD|BPF_ABS|size);
400 	s->s.k = offset;
401 
402 	b = new_block(JMP(BPF_JEQ));
403 	b->stmts = s;
404 	b->s.k = v;
405 
406 	return b;
407 }
408 
409 static struct block *
410 gen_mcmp(offset, size, v, mask)
411 	u_int offset, size;
412 	bpf_int32 v;
413 	bpf_u_int32 mask;
414 {
415 	struct block *b = gen_cmp(offset, size, v);
416 	struct slist *s;
417 
418 	if (mask != 0xffffffff) {
419 		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
420 		s->s.k = mask;
421 		b->stmts->next = s;
422 	}
423 	return b;
424 }
425 
426 static struct block *
427 gen_bcmp(offset, size, v)
428 	u_int offset, size;
429 	u_char *v;
430 {
431 	struct block *b, *tmp;
432 
433 	b = NULL;
434 	while (size >= 4) {
435 		u_char *p = &v[size - 4];
436 		bpf_int32 w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
437 		tmp = gen_cmp(offset + size - 4, BPF_W, w);
438 		if (b != NULL)
439 			gen_and(b, tmp);
440 		b = tmp;
441 		size -= 4;
442 	}
443 	while (size >= 2) {
444 		u_char *p = &v[size - 2];
445 		bpf_int32 w = (p[0] << 8) | p[1];
446 		tmp = gen_cmp(offset + size - 2, BPF_H, w);
447 		if (b != NULL)
448 			gen_and(b, tmp);
449 		b = tmp;
450 		size -= 2;
451 	}
452 	if (size > 0) {
453 		tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]);
454 		if (b != NULL)
455 			gen_and(b, tmp);
456 		b = tmp;
457 	}
458 	return b;
459 }
460 
461 /*
462  * Various code constructs need to know the layout of the data link
463  * layer.  These variables give the necessary offsets.  off_linktype
464  * is set to -1 for no encapsulation, in which case, IP is assumed.
465  */
466 static u_int off_linktype;
467 static u_int off_nl;
468 static int linktype;
469 
470 static void
471 init_linktype(type)
472 	int type;
473 {
474 	linktype = type;
475 
476 	switch (type) {
477 
478 	case DLT_EN10MB:
479 		off_linktype = 12;
480 		off_nl = 14;
481 		return;
482 
483 	case DLT_SLIP:
484 		/*
485 		 * SLIP doesn't have a link level type.  The 16 byte
486 		 * header is hacked into our SLIP driver.
487 		 */
488 		off_linktype = -1;
489 		off_nl = 16;
490 		return;
491 
492 	case DLT_NULL:
493 		off_linktype = -1;
494 		off_nl = 0;
495 		return;
496 
497 	case DLT_LOOP:
498 		off_linktype = -1;
499 		off_nl = 4;
500 		return;
501 
502 	case DLT_ENC:
503 		off_linktype = -1;
504 		off_nl = 12;
505 		return;
506 
507 	case DLT_PPP:
508 		off_linktype = 2;
509 		off_nl = 4;
510 		return;
511 
512 	case DLT_FDDI:
513 		/*
514 		 * FDDI doesn't really have a link-level type field.
515 		 * We assume that SSAP = SNAP is being used and pick
516 		 * out the encapsulated Ethernet type.
517 		 */
518 		off_linktype = 19;
519 #ifdef PCAP_FDDIPAD
520 		off_linktype += pcap_fddipad;
521 #endif
522 		off_nl = 21;
523 #ifdef PCAP_FDDIPAD
524 		off_nl += pcap_fddipad;
525 #endif
526 		return;
527 
528 	case DLT_IEEE802:
529 		off_linktype = 20;
530 		off_nl = 22;
531 		return;
532 
533 	case DLT_ATM_RFC1483:
534 		/*
535 		 * assume routed, non-ISO PDUs
536 		 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00)
537 		 */
538 		off_linktype = 6;
539 		off_nl = 8;
540 		return;
541 	}
542 	bpf_error("unknown data link type 0x%x", linktype);
543 	/* NOTREACHED */
544 }
545 
546 static struct block *
547 gen_uncond(rsense)
548 	int rsense;
549 {
550 	struct block *b;
551 	struct slist *s;
552 
553 	s = new_stmt(BPF_LD|BPF_IMM);
554 	s->s.k = !rsense;
555 	b = new_block(JMP(BPF_JEQ));
556 	b->stmts = s;
557 
558 	return b;
559 }
560 
561 static __inline struct block *
562 gen_true()
563 {
564 	return gen_uncond(1);
565 }
566 
567 static __inline struct block *
568 gen_false()
569 {
570 	return gen_uncond(0);
571 }
572 
573 static struct block *
574 gen_linktype(proto)
575 	int proto;
576 {
577 	switch (linktype) {
578 	case DLT_SLIP:
579 		if (proto == ETHERTYPE_IP)
580 			return gen_true();
581 		else
582 			return gen_false();
583 
584 	case DLT_PPP:
585 		if (proto == ETHERTYPE_IP)
586 			proto = 0x0021;		/* XXX - need ppp.h defs */
587 		break;
588 
589 	case DLT_LOOP:
590 		if (proto == ETHERTYPE_IP)
591 			return (gen_cmp(0, BPF_W, (bpf_int32) AF_INET));
592 		else
593 			return gen_false();
594 		break;
595 
596 	case DLT_ENC:
597 	case DLT_NULL:
598 		/* XXX */
599 		if (proto == ETHERTYPE_IP)
600 			return (gen_cmp(0, BPF_W, htonl((bpf_int32) AF_INET)));
601 		else
602 			return gen_false();
603 	}
604 	return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto);
605 }
606 
607 static struct block *
608 gen_hostop(addr, mask, dir, proto, src_off, dst_off)
609 	bpf_u_int32 addr;
610 	bpf_u_int32 mask;
611 	int dir, proto;
612 	u_int src_off, dst_off;
613 {
614 	struct block *b0, *b1;
615 	u_int offset;
616 
617 	switch (dir) {
618 
619 	case Q_SRC:
620 		offset = src_off;
621 		break;
622 
623 	case Q_DST:
624 		offset = dst_off;
625 		break;
626 
627 	case Q_AND:
628 		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
629 		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
630 		gen_and(b0, b1);
631 		return b1;
632 
633 	case Q_OR:
634 	case Q_DEFAULT:
635 		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
636 		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
637 		gen_or(b0, b1);
638 		return b1;
639 
640 	default:
641 		abort();
642 	}
643 	b0 = gen_linktype(proto);
644 	b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask);
645 	gen_and(b0, b1);
646 	return b1;
647 }
648 
649 static struct block *
650 gen_ehostop(eaddr, dir)
651 	u_char *eaddr;
652 	int dir;
653 {
654 	struct block *b0, *b1;
655 
656 	switch (dir) {
657 	case Q_SRC:
658 		return gen_bcmp(6, 6, eaddr);
659 
660 	case Q_DST:
661 		return gen_bcmp(0, 6, eaddr);
662 
663 	case Q_AND:
664 		b0 = gen_ehostop(eaddr, Q_SRC);
665 		b1 = gen_ehostop(eaddr, Q_DST);
666 		gen_and(b0, b1);
667 		return b1;
668 
669 	case Q_DEFAULT:
670 	case Q_OR:
671 		b0 = gen_ehostop(eaddr, Q_SRC);
672 		b1 = gen_ehostop(eaddr, Q_DST);
673 		gen_or(b0, b1);
674 		return b1;
675 	}
676 	abort();
677 	/* NOTREACHED */
678 }
679 
680 /*
681  * Like gen_ehostop, but for DLT_FDDI
682  */
683 static struct block *
684 gen_fhostop(eaddr, dir)
685 	u_char *eaddr;
686 	int dir;
687 {
688 	struct block *b0, *b1;
689 
690 	switch (dir) {
691 	case Q_SRC:
692 #ifdef PCAP_FDDIPAD
693 		return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr);
694 #else
695 		return gen_bcmp(6 + 1, 6, eaddr);
696 #endif
697 
698 	case Q_DST:
699 #ifdef PCAP_FDDIPAD
700 		return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr);
701 #else
702 		return gen_bcmp(0 + 1, 6, eaddr);
703 #endif
704 
705 	case Q_AND:
706 		b0 = gen_fhostop(eaddr, Q_SRC);
707 		b1 = gen_fhostop(eaddr, Q_DST);
708 		gen_and(b0, b1);
709 		return b1;
710 
711 	case Q_DEFAULT:
712 	case Q_OR:
713 		b0 = gen_fhostop(eaddr, Q_SRC);
714 		b1 = gen_fhostop(eaddr, Q_DST);
715 		gen_or(b0, b1);
716 		return b1;
717 	}
718 	abort();
719 	/* NOTREACHED */
720 }
721 
722 /*
723  * This is quite tricky because there may be pad bytes in front of the
724  * DECNET header, and then there are two possible data packet formats that
725  * carry both src and dst addresses, plus 5 packet types in a format that
726  * carries only the src node, plus 2 types that use a different format and
727  * also carry just the src node.
728  *
729  * Yuck.
730  *
731  * Instead of doing those all right, we just look for data packets with
732  * 0 or 1 bytes of padding.  If you want to look at other packets, that
733  * will require a lot more hacking.
734  *
735  * To add support for filtering on DECNET "areas" (network numbers)
736  * one would want to add a "mask" argument to this routine.  That would
737  * make the filter even more inefficient, although one could be clever
738  * and not generate masking instructions if the mask is 0xFFFF.
739  */
740 static struct block *
741 gen_dnhostop(addr, dir, base_off)
742 	bpf_u_int32 addr;
743 	int dir;
744 	u_int base_off;
745 {
746 	struct block *b0, *b1, *b2, *tmp;
747 	u_int offset_lh;	/* offset if long header is received */
748 	u_int offset_sh;	/* offset if short header is received */
749 
750 	switch (dir) {
751 
752 	case Q_DST:
753 		offset_sh = 1;	/* follows flags */
754 		offset_lh = 7;	/* flgs,darea,dsubarea,HIORD */
755 		break;
756 
757 	case Q_SRC:
758 		offset_sh = 3;	/* follows flags, dstnode */
759 		offset_lh = 15;	/* flgs,darea,dsubarea,did,sarea,ssub,HIORD */
760 		break;
761 
762 	case Q_AND:
763 		/* Inefficient because we do our Calvinball dance twice */
764 		b0 = gen_dnhostop(addr, Q_SRC, base_off);
765 		b1 = gen_dnhostop(addr, Q_DST, base_off);
766 		gen_and(b0, b1);
767 		return b1;
768 
769 	case Q_OR:
770 	case Q_DEFAULT:
771 		/* Inefficient because we do our Calvinball dance twice */
772 		b0 = gen_dnhostop(addr, Q_SRC, base_off);
773 		b1 = gen_dnhostop(addr, Q_DST, base_off);
774 		gen_or(b0, b1);
775 		return b1;
776 
777 	default:
778 		abort();
779 	}
780 	b0 = gen_linktype(ETHERTYPE_DN);
781 	/* Check for pad = 1, long header case */
782 	tmp = gen_mcmp(base_off + 2, BPF_H,
783 	    (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF));
784 	b1 = gen_cmp(base_off + 2 + 1 + offset_lh,
785 	    BPF_H, (bpf_int32)ntohs(addr));
786 	gen_and(tmp, b1);
787 	/* Check for pad = 0, long header case */
788 	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7);
789 	b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr));
790 	gen_and(tmp, b2);
791 	gen_or(b2, b1);
792 	/* Check for pad = 1, short header case */
793 	tmp = gen_mcmp(base_off + 2, BPF_H,
794 	    (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF));
795 	b2 = gen_cmp(base_off + 2 + 1 + offset_sh,
796 	    BPF_H, (bpf_int32)ntohs(addr));
797 	gen_and(tmp, b2);
798 	gen_or(b2, b1);
799 	/* Check for pad = 0, short header case */
800 	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7);
801 	b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr));
802 	gen_and(tmp, b2);
803 	gen_or(b2, b1);
804 
805 	/* Combine with test for linktype */
806 	gen_and(b0, b1);
807 	return b1;
808 }
809 
810 static struct block *
811 gen_host(addr, mask, proto, dir)
812 	bpf_u_int32 addr;
813 	bpf_u_int32 mask;
814 	int proto;
815 	int dir;
816 {
817 	struct block *b0, *b1;
818 
819 	switch (proto) {
820 
821 	case Q_DEFAULT:
822 		b0 = gen_host(addr, mask, Q_IP, dir);
823 		b1 = gen_host(addr, mask, Q_ARP, dir);
824 		gen_or(b0, b1);
825 		b0 = gen_host(addr, mask, Q_RARP, dir);
826 		gen_or(b1, b0);
827 		return b0;
828 
829 	case Q_IP:
830 		return gen_hostop(addr, mask, dir, ETHERTYPE_IP,
831 				  off_nl + 12, off_nl + 16);
832 
833 	case Q_RARP:
834 		return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP,
835 				  off_nl + 14, off_nl + 24);
836 
837 	case Q_ARP:
838 		return gen_hostop(addr, mask, dir, ETHERTYPE_ARP,
839 				  off_nl + 14, off_nl + 24);
840 
841 	case Q_TCP:
842 		bpf_error("'tcp' modifier applied to host");
843 
844 	case Q_UDP:
845 		bpf_error("'udp' modifier applied to host");
846 
847 	case Q_ICMP:
848 		bpf_error("'icmp' modifier applied to host");
849 
850 	case Q_IGMP:
851 		bpf_error("'igmp' modifier applied to host");
852 
853 	case Q_DECNET:
854 		return gen_dnhostop(addr, dir, off_nl);
855 
856 	case Q_LAT:
857 		bpf_error("LAT host filtering not implemented");
858 
859 	case Q_MOPDL:
860 		bpf_error("MOPDL host filtering not implemented");
861 
862 	case Q_MOPRC:
863 		bpf_error("MOPRC host filtering not implemented");
864 
865 	default:
866 		abort();
867 	}
868 	/* NOTREACHED */
869 }
870 
871 static struct block *
872 gen_gateway(eaddr, alist, proto, dir)
873 	u_char *eaddr;
874 	bpf_u_int32 **alist;
875 	int proto;
876 	int dir;
877 {
878 	struct block *b0, *b1, *tmp;
879 
880 	if (dir != 0)
881 		bpf_error("direction applied to 'gateway'");
882 
883 	switch (proto) {
884 	case Q_DEFAULT:
885 	case Q_IP:
886 	case Q_ARP:
887 	case Q_RARP:
888 		if (linktype == DLT_EN10MB)
889 			b0 = gen_ehostop(eaddr, Q_OR);
890 		else if (linktype == DLT_FDDI)
891 			b0 = gen_fhostop(eaddr, Q_OR);
892 		else
893 			bpf_error(
894 			    "'gateway' supported only on ethernet or FDDI");
895 
896 		b1 = gen_host(**alist++, 0xffffffffL, proto, Q_OR);
897 		while (*alist) {
898 			tmp = gen_host(**alist++, 0xffffffffL, proto, Q_OR);
899 			gen_or(b1, tmp);
900 			b1 = tmp;
901 		}
902 		gen_not(b1);
903 		gen_and(b0, b1);
904 		return b1;
905 	}
906 	bpf_error("illegal modifier of 'gateway'");
907 	/* NOTREACHED */
908 }
909 
910 struct block *
911 gen_proto_abbrev(proto)
912 	int proto;
913 {
914 	struct block *b0, *b1;
915 
916 	switch (proto) {
917 
918 	case Q_TCP:
919 		b0 = gen_linktype(ETHERTYPE_IP);
920 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP);
921 		gen_and(b0, b1);
922 		break;
923 
924 	case Q_UDP:
925 		b0 =  gen_linktype(ETHERTYPE_IP);
926 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP);
927 		gen_and(b0, b1);
928 		break;
929 
930 	case Q_ICMP:
931 		b0 =  gen_linktype(ETHERTYPE_IP);
932 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP);
933 		gen_and(b0, b1);
934 		break;
935 
936 	case Q_IGMP:
937 		b0 =  gen_linktype(ETHERTYPE_IP);
938 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2);
939 		gen_and(b0, b1);
940 		break;
941 
942 	case Q_IP:
943 		b1 =  gen_linktype(ETHERTYPE_IP);
944 		break;
945 
946 	case Q_ARP:
947 		b1 =  gen_linktype(ETHERTYPE_ARP);
948 		break;
949 
950 	case Q_RARP:
951 		b1 =  gen_linktype(ETHERTYPE_REVARP);
952 		break;
953 
954 	case Q_LINK:
955 		bpf_error("link layer applied in wrong context");
956 
957 	case Q_DECNET:
958 		b1 =  gen_linktype(ETHERTYPE_DN);
959 		break;
960 
961 	case Q_LAT:
962 		b1 =  gen_linktype(ETHERTYPE_LAT);
963 		break;
964 
965 	case Q_MOPDL:
966 		b1 =  gen_linktype(ETHERTYPE_MOPDL);
967 		break;
968 
969 	case Q_MOPRC:
970 		b1 =  gen_linktype(ETHERTYPE_MOPRC);
971 		break;
972 
973 	default:
974 		abort();
975 	}
976 	return b1;
977 }
978 
979 static struct block *
980 gen_ipfrag()
981 {
982 	struct slist *s;
983 	struct block *b;
984 
985 	/* not ip frag */
986 	s = new_stmt(BPF_LD|BPF_H|BPF_ABS);
987 	s->s.k = off_nl + 6;
988 	b = new_block(JMP(BPF_JSET));
989 	b->s.k = 0x1fff;
990 	b->stmts = s;
991 	gen_not(b);
992 
993 	return b;
994 }
995 
996 static struct block *
997 gen_portatom(off, v)
998 	int off;
999 	bpf_int32 v;
1000 {
1001 	struct slist *s;
1002 	struct block *b;
1003 
1004 	s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1005 	s->s.k = off_nl;
1006 
1007 	s->next = new_stmt(BPF_LD|BPF_IND|BPF_H);
1008 	s->next->s.k = off_nl + off;
1009 
1010 	b = new_block(JMP(BPF_JEQ));
1011 	b->stmts = s;
1012 	b->s.k = v;
1013 
1014 	return b;
1015 }
1016 
1017 struct block *
1018 gen_portop(port, proto, dir)
1019 	int port, proto, dir;
1020 {
1021 	struct block *b0, *b1, *tmp;
1022 
1023 	/* ip proto 'proto' */
1024 	tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto);
1025 	b0 = gen_ipfrag();
1026 	gen_and(tmp, b0);
1027 
1028 	switch (dir) {
1029 	case Q_SRC:
1030 		b1 = gen_portatom(0, (bpf_int32)port);
1031 		break;
1032 
1033 	case Q_DST:
1034 		b1 = gen_portatom(2, (bpf_int32)port);
1035 		break;
1036 
1037 	case Q_OR:
1038 	case Q_DEFAULT:
1039 		tmp = gen_portatom(0, (bpf_int32)port);
1040 		b1 = gen_portatom(2, (bpf_int32)port);
1041 		gen_or(tmp, b1);
1042 		break;
1043 
1044 	case Q_AND:
1045 		tmp = gen_portatom(0, (bpf_int32)port);
1046 		b1 = gen_portatom(2, (bpf_int32)port);
1047 		gen_and(tmp, b1);
1048 		break;
1049 
1050 	default:
1051 		abort();
1052 	}
1053 	gen_and(b0, b1);
1054 
1055 	return b1;
1056 }
1057 
1058 static struct block *
1059 gen_port(port, ip_proto, dir)
1060 	int port;
1061 	int ip_proto;
1062 	int dir;
1063 {
1064 	struct block *b0, *b1, *tmp;
1065 
1066 	/* ether proto ip */
1067 	b0 =  gen_linktype(ETHERTYPE_IP);
1068 
1069 	switch (ip_proto) {
1070 	case IPPROTO_UDP:
1071 	case IPPROTO_TCP:
1072 		b1 = gen_portop(port, ip_proto, dir);
1073 		break;
1074 
1075 	case PROTO_UNDEF:
1076 		tmp = gen_portop(port, IPPROTO_TCP, dir);
1077 		b1 = gen_portop(port, IPPROTO_UDP, dir);
1078 		gen_or(tmp, b1);
1079 		break;
1080 
1081 	default:
1082 		abort();
1083 	}
1084 	gen_and(b0, b1);
1085 	return b1;
1086 }
1087 
1088 static int
1089 lookup_proto(name, proto)
1090 	char *name;
1091 	int proto;
1092 {
1093 	int v;
1094 
1095 	switch (proto) {
1096 	case Q_DEFAULT:
1097 	case Q_IP:
1098 		v = pcap_nametoproto(name);
1099 		if (v == PROTO_UNDEF)
1100 			bpf_error("unknown ip proto '%s'", name);
1101 		break;
1102 
1103 	case Q_LINK:
1104 		/* XXX should look up h/w protocol type based on linktype */
1105 		v = pcap_nametoeproto(name);
1106 		if (v == PROTO_UNDEF)
1107 			bpf_error("unknown ether proto '%s'", name);
1108 		break;
1109 
1110 	default:
1111 		v = PROTO_UNDEF;
1112 		break;
1113 	}
1114 	return v;
1115 }
1116 
1117 static struct block *
1118 gen_proto(v, proto, dir)
1119 	int v;
1120 	int proto;
1121 	int dir;
1122 {
1123 	struct block *b0, *b1;
1124 
1125 	if (dir != Q_DEFAULT)
1126 		bpf_error("direction applied to 'proto'");
1127 
1128 	switch (proto) {
1129 	case Q_DEFAULT:
1130 	case Q_IP:
1131 		b0 = gen_linktype(ETHERTYPE_IP);
1132 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v);
1133 		gen_and(b0, b1);
1134 		return b1;
1135 
1136 	case Q_ARP:
1137 		bpf_error("arp does not encapsulate another protocol");
1138 		/* NOTREACHED */
1139 
1140 	case Q_RARP:
1141 		bpf_error("rarp does not encapsulate another protocol");
1142 		/* NOTREACHED */
1143 
1144 	case Q_DECNET:
1145 		bpf_error("decnet encapsulation is not specifiable");
1146 		/* NOTREACHED */
1147 
1148 	case Q_LAT:
1149 		bpf_error("lat does not encapsulate another protocol");
1150 		/* NOTREACHED */
1151 
1152 	case Q_MOPRC:
1153 		bpf_error("moprc does not encapsulate another protocol");
1154 		/* NOTREACHED */
1155 
1156 	case Q_MOPDL:
1157 		bpf_error("mopdl does not encapsulate another protocol");
1158 		/* NOTREACHED */
1159 
1160 	case Q_LINK:
1161 		return gen_linktype(v);
1162 
1163 	case Q_UDP:
1164 		bpf_error("'udp proto' is bogus");
1165 		/* NOTREACHED */
1166 
1167 	case Q_TCP:
1168 		bpf_error("'tcp proto' is bogus");
1169 		/* NOTREACHED */
1170 
1171 	case Q_ICMP:
1172 		bpf_error("'icmp proto' is bogus");
1173 		/* NOTREACHED */
1174 
1175 	case Q_IGMP:
1176 		bpf_error("'igmp proto' is bogus");
1177 		/* NOTREACHED */
1178 
1179 	default:
1180 		abort();
1181 		/* NOTREACHED */
1182 	}
1183 	/* NOTREACHED */
1184 }
1185 
1186 /*
1187  * Left justify 'addr' and return its resulting network mask.
1188  */
1189 static bpf_u_int32
1190 net_mask(addr)
1191 	bpf_u_int32 *addr;
1192 {
1193 	register bpf_u_int32 m = 0xffffffff;
1194 
1195 	if (*addr)
1196 		while ((*addr & 0xff000000) == 0)
1197 			*addr <<= 8, m <<= 8;
1198 
1199 	return m;
1200 }
1201 
1202 struct block *
1203 gen_scode(name, q)
1204 	char *name;
1205 	struct qual q;
1206 {
1207 	int proto = q.proto;
1208 	int dir = q.dir;
1209 	u_char *eaddr;
1210 	bpf_u_int32 mask, addr, **alist;
1211 	struct block *b, *tmp;
1212 	int port, real_proto;
1213 
1214 	switch (q.addr) {
1215 
1216 	case Q_NET:
1217 		addr = pcap_nametonetaddr(name);
1218 		if (addr == 0)
1219 			bpf_error("unknown network '%s'", name);
1220 		mask = net_mask(&addr);
1221 		return gen_host(addr, mask, proto, dir);
1222 
1223 	case Q_DEFAULT:
1224 	case Q_HOST:
1225 		if (proto == Q_LINK) {
1226 			switch (linktype) {
1227 
1228 			case DLT_EN10MB:
1229 				eaddr = pcap_ether_hostton(name);
1230 				if (eaddr == NULL)
1231 					bpf_error(
1232 					    "unknown ether host '%s'", name);
1233 				return gen_ehostop(eaddr, dir);
1234 
1235 			case DLT_FDDI:
1236 				eaddr = pcap_ether_hostton(name);
1237 				if (eaddr == NULL)
1238 					bpf_error(
1239 					    "unknown FDDI host '%s'", name);
1240 				return gen_fhostop(eaddr, dir);
1241 
1242 			default:
1243 				bpf_error(
1244 			"only ethernet/FDDI supports link-level host name");
1245 				break;
1246 			}
1247 		} else if (proto == Q_DECNET) {
1248 			unsigned short dn_addr = __pcap_nametodnaddr(name);
1249 			/*
1250 			 * I don't think DECNET hosts can be multihomed, so
1251 			 * there is no need to build up a list of addresses
1252 			 */
1253 			return (gen_host(dn_addr, 0, proto, dir));
1254 		} else {
1255 			alist = pcap_nametoaddr(name);
1256 			if (alist == NULL || *alist == NULL)
1257 				bpf_error("unknown host '%s'", name);
1258 			b = gen_host(**alist++, 0xffffffffL, proto, dir);
1259 			while (*alist) {
1260 				tmp = gen_host(**alist++, 0xffffffffL,
1261 					       proto, dir);
1262 				gen_or(b, tmp);
1263 				b = tmp;
1264 			}
1265 			return b;
1266 		}
1267 
1268 	case Q_PORT:
1269 		if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP)
1270 			bpf_error("illegal qualifier of 'port'");
1271 		if (pcap_nametoport(name, &port, &real_proto) == 0)
1272 			bpf_error("unknown port '%s'", name);
1273 		if (proto == Q_UDP) {
1274 			if (real_proto == IPPROTO_TCP)
1275 				bpf_error("port '%s' is tcp", name);
1276 			else
1277 				/* override PROTO_UNDEF */
1278 				real_proto = IPPROTO_UDP;
1279 		}
1280 		if (proto == Q_TCP) {
1281 			if (real_proto == IPPROTO_UDP)
1282 				bpf_error("port '%s' is udp", name);
1283 			else
1284 				/* override PROTO_UNDEF */
1285 				real_proto = IPPROTO_TCP;
1286 		}
1287 		return gen_port(port, real_proto, dir);
1288 
1289 	case Q_GATEWAY:
1290 		eaddr = pcap_ether_hostton(name);
1291 		if (eaddr == NULL)
1292 			bpf_error("unknown ether host: %s", name);
1293 
1294 		alist = pcap_nametoaddr(name);
1295 		if (alist == NULL || *alist == NULL)
1296 			bpf_error("unknown host '%s'", name);
1297 		return gen_gateway(eaddr, alist, proto, dir);
1298 
1299 	case Q_PROTO:
1300 		real_proto = lookup_proto(name, proto);
1301 		if (real_proto >= 0)
1302 			return gen_proto(real_proto, proto, dir);
1303 		else
1304 			bpf_error("unknown protocol: %s", name);
1305 
1306 	case Q_UNDEF:
1307 		syntax();
1308 		/* NOTREACHED */
1309 	}
1310 	abort();
1311 	/* NOTREACHED */
1312 }
1313 
1314 struct block *
1315 gen_ncode(v, q)
1316 	bpf_u_int32 v;
1317 	struct qual q;
1318 {
1319 	bpf_u_int32 mask;
1320 	int proto = q.proto;
1321 	int dir = q.dir;
1322 
1323 	switch (q.addr) {
1324 
1325 	case Q_DEFAULT:
1326 	case Q_HOST:
1327 	case Q_NET:
1328 		if (proto == Q_DECNET)
1329 			return gen_host(v, 0, proto, dir);
1330 		else if (proto == Q_LINK) {
1331 			bpf_error("illegal link layer address");
1332 		} else {
1333 			mask = net_mask(&v);
1334 			return gen_host(v, mask, proto, dir);
1335 		}
1336 
1337 	case Q_PORT:
1338 		if (proto == Q_UDP)
1339 			proto = IPPROTO_UDP;
1340 		else if (proto == Q_TCP)
1341 			proto = IPPROTO_TCP;
1342 		else if (proto == Q_DEFAULT)
1343 			proto = PROTO_UNDEF;
1344 		else
1345 			bpf_error("illegal qualifier of 'port'");
1346 
1347 		return gen_port((int)v, proto, dir);
1348 
1349 	case Q_GATEWAY:
1350 		bpf_error("'gateway' requires a name");
1351 		/* NOTREACHED */
1352 
1353 	case Q_PROTO:
1354 		return gen_proto((int)v, proto, dir);
1355 
1356 	case Q_UNDEF:
1357 		syntax();
1358 		/* NOTREACHED */
1359 
1360 	default:
1361 		abort();
1362 		/* NOTREACHED */
1363 	}
1364 	/* NOTREACHED */
1365 }
1366 
1367 struct block *
1368 gen_ecode(eaddr, q)
1369 	u_char *eaddr;
1370 	struct qual q;
1371 {
1372 	if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) {
1373 		if (linktype == DLT_EN10MB)
1374 			return gen_ehostop(eaddr, (int)q.dir);
1375 		if (linktype == DLT_FDDI)
1376 			return gen_fhostop(eaddr, (int)q.dir);
1377 	}
1378 	bpf_error("ethernet address used in non-ether expression");
1379 	/* NOTREACHED */
1380 }
1381 
1382 void
1383 sappend(s0, s1)
1384 	struct slist *s0, *s1;
1385 {
1386 	/*
1387 	 * This is definitely not the best way to do this, but the
1388 	 * lists will rarely get long.
1389 	 */
1390 	while (s0->next)
1391 		s0 = s0->next;
1392 	s0->next = s1;
1393 }
1394 
1395 static struct slist *
1396 xfer_to_x(a)
1397 	struct arth *a;
1398 {
1399 	struct slist *s;
1400 
1401 	s = new_stmt(BPF_LDX|BPF_MEM);
1402 	s->s.k = a->regno;
1403 	return s;
1404 }
1405 
1406 static struct slist *
1407 xfer_to_a(a)
1408 	struct arth *a;
1409 {
1410 	struct slist *s;
1411 
1412 	s = new_stmt(BPF_LD|BPF_MEM);
1413 	s->s.k = a->regno;
1414 	return s;
1415 }
1416 
1417 struct arth *
1418 gen_load(proto, index, size)
1419 	int proto;
1420 	struct arth *index;
1421 	int size;
1422 {
1423 	struct slist *s, *tmp;
1424 	struct block *b;
1425 	int regno = alloc_reg();
1426 
1427 	free_reg(index->regno);
1428 	switch (size) {
1429 
1430 	default:
1431 		bpf_error("data size must be 1, 2, or 4");
1432 
1433 	case 1:
1434 		size = BPF_B;
1435 		break;
1436 
1437 	case 2:
1438 		size = BPF_H;
1439 		break;
1440 
1441 	case 4:
1442 		size = BPF_W;
1443 		break;
1444 	}
1445 	switch (proto) {
1446 	default:
1447 		bpf_error("unsupported index operation");
1448 
1449 	case Q_LINK:
1450 		s = xfer_to_x(index);
1451 		tmp = new_stmt(BPF_LD|BPF_IND|size);
1452 		sappend(s, tmp);
1453 		sappend(index->s, s);
1454 		break;
1455 
1456 	case Q_IP:
1457 	case Q_ARP:
1458 	case Q_RARP:
1459 	case Q_DECNET:
1460 	case Q_LAT:
1461 	case Q_MOPRC:
1462 	case Q_MOPDL:
1463 		/* XXX Note that we assume a fixed link link header here. */
1464 		s = xfer_to_x(index);
1465 		tmp = new_stmt(BPF_LD|BPF_IND|size);
1466 		tmp->s.k = off_nl;
1467 		sappend(s, tmp);
1468 		sappend(index->s, s);
1469 
1470 		b = gen_proto_abbrev(proto);
1471 		if (index->b)
1472 			gen_and(index->b, b);
1473 		index->b = b;
1474 		break;
1475 
1476 	case Q_TCP:
1477 	case Q_UDP:
1478 	case Q_ICMP:
1479 	case Q_IGMP:
1480 		s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1481 		s->s.k = off_nl;
1482 		sappend(s, xfer_to_a(index));
1483 		sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X));
1484 		sappend(s, new_stmt(BPF_MISC|BPF_TAX));
1485 		sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size));
1486 		tmp->s.k = off_nl;
1487 		sappend(index->s, s);
1488 
1489 		gen_and(gen_proto_abbrev(proto), b = gen_ipfrag());
1490 		if (index->b)
1491 			gen_and(index->b, b);
1492 		index->b = b;
1493 		break;
1494 	}
1495 	index->regno = regno;
1496 	s = new_stmt(BPF_ST);
1497 	s->s.k = regno;
1498 	sappend(index->s, s);
1499 
1500 	return index;
1501 }
1502 
1503 struct block *
1504 gen_relation(code, a0, a1, reversed)
1505 	int code;
1506 	struct arth *a0, *a1;
1507 	int reversed;
1508 {
1509 	struct slist *s0, *s1, *s2;
1510 	struct block *b, *tmp;
1511 
1512 	s0 = xfer_to_x(a1);
1513 	s1 = xfer_to_a(a0);
1514 	s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
1515 	b = new_block(JMP(code));
1516 	if (code == BPF_JGT || code == BPF_JGE) {
1517 		reversed = !reversed;
1518 		b->s.k = 0x80000000;
1519 	}
1520 	if (reversed)
1521 		gen_not(b);
1522 
1523 	sappend(s1, s2);
1524 	sappend(s0, s1);
1525 	sappend(a1->s, s0);
1526 	sappend(a0->s, a1->s);
1527 
1528 	b->stmts = a0->s;
1529 
1530 	free_reg(a0->regno);
1531 	free_reg(a1->regno);
1532 
1533 	/* 'and' together protocol checks */
1534 	if (a0->b) {
1535 		if (a1->b) {
1536 			gen_and(a0->b, tmp = a1->b);
1537 		}
1538 		else
1539 			tmp = a0->b;
1540 	} else
1541 		tmp = a1->b;
1542 
1543 	if (tmp)
1544 		gen_and(tmp, b);
1545 
1546 	return b;
1547 }
1548 
1549 struct arth *
1550 gen_loadlen()
1551 {
1552 	int regno = alloc_reg();
1553 	struct arth *a = (struct arth *)newchunk(sizeof(*a));
1554 	struct slist *s;
1555 
1556 	s = new_stmt(BPF_LD|BPF_LEN);
1557 	s->next = new_stmt(BPF_ST);
1558 	s->next->s.k = regno;
1559 	a->s = s;
1560 	a->regno = regno;
1561 
1562 	return a;
1563 }
1564 
1565 struct arth *
1566 gen_loadi(val)
1567 	int val;
1568 {
1569 	struct arth *a;
1570 	struct slist *s;
1571 	int reg;
1572 
1573 	a = (struct arth *)newchunk(sizeof(*a));
1574 
1575 	reg = alloc_reg();
1576 
1577 	s = new_stmt(BPF_LD|BPF_IMM);
1578 	s->s.k = val;
1579 	s->next = new_stmt(BPF_ST);
1580 	s->next->s.k = reg;
1581 	a->s = s;
1582 	a->regno = reg;
1583 
1584 	return a;
1585 }
1586 
1587 struct arth *
1588 gen_neg(a)
1589 	struct arth *a;
1590 {
1591 	struct slist *s;
1592 
1593 	s = xfer_to_a(a);
1594 	sappend(a->s, s);
1595 	s = new_stmt(BPF_ALU|BPF_NEG);
1596 	s->s.k = 0;
1597 	sappend(a->s, s);
1598 	s = new_stmt(BPF_ST);
1599 	s->s.k = a->regno;
1600 	sappend(a->s, s);
1601 
1602 	return a;
1603 }
1604 
1605 struct arth *
1606 gen_arth(code, a0, a1)
1607 	int code;
1608 	struct arth *a0, *a1;
1609 {
1610 	struct slist *s0, *s1, *s2;
1611 
1612 	s0 = xfer_to_x(a1);
1613 	s1 = xfer_to_a(a0);
1614 	s2 = new_stmt(BPF_ALU|BPF_X|code);
1615 
1616 	sappend(s1, s2);
1617 	sappend(s0, s1);
1618 	sappend(a1->s, s0);
1619 	sappend(a0->s, a1->s);
1620 
1621 	free_reg(a1->regno);
1622 
1623 	s0 = new_stmt(BPF_ST);
1624 	a0->regno = s0->s.k = alloc_reg();
1625 	sappend(a0->s, s0);
1626 
1627 	return a0;
1628 }
1629 
1630 /*
1631  * Here we handle simple allocation of the scratch registers.
1632  * If too many registers are alloc'd, the allocator punts.
1633  */
1634 static int regused[BPF_MEMWORDS];
1635 static int curreg;
1636 
1637 /*
1638  * Return the next free register.
1639  */
1640 static int
1641 alloc_reg()
1642 {
1643 	int n = BPF_MEMWORDS;
1644 
1645 	while (--n >= 0) {
1646 		if (regused[curreg])
1647 			curreg = (curreg + 1) % BPF_MEMWORDS;
1648 		else {
1649 			regused[curreg] = 1;
1650 			return curreg;
1651 		}
1652 	}
1653 	bpf_error("too many registers needed to evaluate expression");
1654 	/* NOTREACHED */
1655 }
1656 
1657 /*
1658  * Return a register to the table so it can
1659  * be used later.
1660  */
1661 static void
1662 free_reg(n)
1663 	int n;
1664 {
1665 	regused[n] = 0;
1666 }
1667 
1668 static struct block *
1669 gen_len(jmp, n)
1670 	int jmp, n;
1671 {
1672 	struct slist *s;
1673 	struct block *b;
1674 
1675 	s = new_stmt(BPF_LD|BPF_LEN);
1676 	b = new_block(JMP(jmp));
1677 	b->stmts = s;
1678 	b->s.k = n;
1679 
1680 	return b;
1681 }
1682 
1683 struct block *
1684 gen_greater(n)
1685 	int n;
1686 {
1687 	return gen_len(BPF_JGE, n);
1688 }
1689 
1690 struct block *
1691 gen_less(n)
1692 	int n;
1693 {
1694 	struct block *b;
1695 
1696 	b = gen_len(BPF_JGT, n);
1697 	gen_not(b);
1698 
1699 	return b;
1700 }
1701 
1702 struct block *
1703 gen_byteop(op, idx, val)
1704 	int op, idx, val;
1705 {
1706 	struct block *b;
1707 	struct slist *s;
1708 
1709 	switch (op) {
1710 	default:
1711 		abort();
1712 
1713 	case '=':
1714 		return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1715 
1716 	case '<':
1717 		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1718 		b->s.code = JMP(BPF_JGE);
1719 		gen_not(b);
1720 		return b;
1721 
1722 	case '>':
1723 		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1724 		b->s.code = JMP(BPF_JGT);
1725 		return b;
1726 
1727 	case '|':
1728 		s = new_stmt(BPF_ALU|BPF_OR|BPF_K);
1729 		break;
1730 
1731 	case '&':
1732 		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
1733 		break;
1734 	}
1735 	s->s.k = val;
1736 	b = new_block(JMP(BPF_JEQ));
1737 	b->stmts = s;
1738 	gen_not(b);
1739 
1740 	return b;
1741 }
1742 
1743 struct block *
1744 gen_broadcast(proto)
1745 	int proto;
1746 {
1747 	bpf_u_int32 hostmask;
1748 	struct block *b0, *b1, *b2;
1749 	static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1750 
1751 	switch (proto) {
1752 
1753 	case Q_DEFAULT:
1754 	case Q_LINK:
1755 		if (linktype == DLT_EN10MB)
1756 			return gen_ehostop(ebroadcast, Q_DST);
1757 		if (linktype == DLT_FDDI)
1758 			return gen_fhostop(ebroadcast, Q_DST);
1759 		bpf_error("not a broadcast link");
1760 		break;
1761 
1762 	case Q_IP:
1763 		b0 = gen_linktype(ETHERTYPE_IP);
1764 		hostmask = ~netmask;
1765 		b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask);
1766 		b2 = gen_mcmp(off_nl + 16, BPF_W,
1767 			      (bpf_int32)(~0 & hostmask), hostmask);
1768 		gen_or(b1, b2);
1769 		gen_and(b0, b2);
1770 		return b2;
1771 	}
1772 	bpf_error("only ether/ip broadcast filters supported");
1773 }
1774 
1775 struct block *
1776 gen_multicast(proto)
1777 	int proto;
1778 {
1779 	register struct block *b0, *b1;
1780 	register struct slist *s;
1781 
1782 	switch (proto) {
1783 
1784 	case Q_DEFAULT:
1785 	case Q_LINK:
1786 		if (linktype == DLT_EN10MB) {
1787 			/* ether[0] & 1 != 0 */
1788 			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1789 			s->s.k = 0;
1790 			b0 = new_block(JMP(BPF_JSET));
1791 			b0->s.k = 1;
1792 			b0->stmts = s;
1793 			return b0;
1794 		}
1795 
1796 		if (linktype == DLT_FDDI) {
1797 			/* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */
1798 			/* fddi[1] & 1 != 0 */
1799 			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1800 			s->s.k = 1;
1801 			b0 = new_block(JMP(BPF_JSET));
1802 			b0->s.k = 1;
1803 			b0->stmts = s;
1804 			return b0;
1805 		}
1806 		/* Link not known to support multicasts */
1807 		break;
1808 
1809 	case Q_IP:
1810 		b0 = gen_linktype(ETHERTYPE_IP);
1811 		b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224);
1812 		b1->s.code = JMP(BPF_JGE);
1813 		gen_and(b0, b1);
1814 		return b1;
1815 	}
1816 	bpf_error("only IP multicast filters supported on ethernet/FDDI");
1817 }
1818 
1819 /*
1820  * generate command for inbound/outbound.  It's here so we can
1821  * make it link-type specific.  'dir' = 0 implies "inbound",
1822  * = 1 implies "outbound".
1823  */
1824 struct block *
1825 gen_inbound(dir)
1826 	int dir;
1827 {
1828 	register struct block *b0;
1829 
1830 	b0 = gen_relation(BPF_JEQ,
1831 			  gen_load(Q_LINK, gen_loadi(0), 1),
1832 			  gen_loadi(0),
1833 			  dir);
1834 	return (b0);
1835 }
1836