xref: /openbsd-src/lib/libpcap/gencode.c (revision 01efc7ef8ab68f8ab38c9fcffd34efd1d036b4b9)
1 /*	$OpenBSD: gencode.c,v 1.11 1999/07/20 04:49:54 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that: (1) source code distributions
9  * retain the above copyright notice and this paragraph in its entirety, (2)
10  * distributions including binary code include the above copyright notice and
11  * this paragraph in its entirety in the documentation or other materials
12  * provided with the distribution, and (3) all advertising materials mentioning
13  * features or use of this software display the following acknowledgement:
14  * ``This product includes software developed by the University of California,
15  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
16  * the University nor the names of its contributors may be used to endorse
17  * or promote products derived from this software without specific prior
18  * written permission.
19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
20  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22  */
23 #ifndef lint
24 static const char rcsid[] =
25     "@(#) $Header: /home/cvs/src/lib/libpcap/gencode.c,v 1.11 1999/07/20 04:49:54 deraadt Exp $ (LBL)";
26 #endif
27 
28 #include <sys/types.h>
29 #include <sys/socket.h>
30 #include <sys/time.h>
31 
32 #ifdef __STDC__
33 struct mbuf;
34 struct rtentry;
35 #endif
36 
37 #include <net/if.h>
38 
39 #include <netinet/in.h>
40 #include <netinet/if_ether.h>
41 
42 #include <stdlib.h>
43 #include <memory.h>
44 #include <setjmp.h>
45 #ifdef __STDC__
46 #include <stdarg.h>
47 #else
48 #include <varargs.h>
49 #endif
50 
51 #include "pcap-int.h"
52 
53 #include "ethertype.h"
54 #include "gencode.h"
55 #include "ppp.h"
56 #include <pcap-namedb.h>
57 
58 #ifdef HAVE_OS_PROTO_H
59 #include "os-proto.h"
60 #endif
61 
62 #define JMP(c) ((c)|BPF_JMP|BPF_K)
63 
64 /* Locals */
65 static jmp_buf top_ctx;
66 static pcap_t *bpf_pcap;
67 
68 /* XXX */
69 #ifdef PCAP_FDDIPAD
70 int	pcap_fddipad = PCAP_FDDIPAD;
71 #else
72 int	pcap_fddipad;
73 #endif
74 
75 /* VARARGS */
76 __dead void
77 #ifdef __STDC__
78 bpf_error(const char *fmt, ...)
79 #else
80 bpf_error(fmt, va_alist)
81 	const char *fmt;
82 	va_dcl
83 #endif
84 {
85 	va_list ap;
86 
87 #ifdef __STDC__
88 	va_start(ap, fmt);
89 #else
90 	va_start(ap);
91 #endif
92 	if (bpf_pcap != NULL)
93 		(void)vsnprintf(pcap_geterr(bpf_pcap), PCAP_ERRBUF_SIZE,
94 		    fmt, ap);
95 	va_end(ap);
96 	longjmp(top_ctx, 1);
97 	/* NOTREACHED */
98 }
99 
100 static void init_linktype(int);
101 
102 static int alloc_reg(void);
103 static void free_reg(int);
104 
105 static struct block *root;
106 
107 /*
108  * We divy out chunks of memory rather than call malloc each time so
109  * we don't have to worry about leaking memory.  It's probably
110  * not a big deal if all this memory was wasted but it this ever
111  * goes into a library that would probably not be a good idea.
112  */
113 #define NCHUNKS 16
114 #define CHUNK0SIZE 1024
115 struct chunk {
116 	u_int n_left;
117 	void *m;
118 };
119 
120 static struct chunk chunks[NCHUNKS];
121 static int cur_chunk;
122 
123 static void *newchunk(u_int);
124 static void freechunks(void);
125 static __inline struct block *new_block(int);
126 static __inline struct slist *new_stmt(int);
127 static struct block *gen_retblk(int);
128 static __inline void syntax(void);
129 
130 static void backpatch(struct block *, struct block *);
131 static void merge(struct block *, struct block *);
132 static struct block *gen_cmp(u_int, u_int, bpf_int32);
133 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32);
134 static struct block *gen_bcmp(u_int, u_int, const u_char *);
135 static struct block *gen_uncond(int);
136 static __inline struct block *gen_true(void);
137 static __inline struct block *gen_false(void);
138 static struct block *gen_linktype(int);
139 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int);
140 static struct block *gen_ehostop(const u_char *, int);
141 static struct block *gen_fhostop(const u_char *, int);
142 static struct block *gen_dnhostop(bpf_u_int32, int, u_int);
143 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int);
144 static struct block *gen_gateway(const u_char *, bpf_u_int32 **, int, int);
145 static struct block *gen_ipfrag(void);
146 static struct block *gen_portatom(int, bpf_int32);
147 struct block *gen_portop(int, int, int);
148 static struct block *gen_port(int, int, int);
149 static int lookup_proto(const char *, int);
150 static struct block *gen_proto(int, int, int);
151 static struct slist *xfer_to_x(struct arth *);
152 static struct slist *xfer_to_a(struct arth *);
153 static struct block *gen_len(int, int);
154 
155 static void *
156 newchunk(n)
157 	u_int n;
158 {
159 	struct chunk *cp;
160 	int k, size;
161 
162 	/* XXX Round up to nearest long. */
163 	n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1);
164 
165 	cp = &chunks[cur_chunk];
166 	if (n > cp->n_left) {
167 		++cp, k = ++cur_chunk;
168 		if (k >= NCHUNKS)
169 			bpf_error("out of memory");
170 		size = CHUNK0SIZE << k;
171 		cp->m = (void *)malloc(size);
172 		memset((char *)cp->m, 0, size);
173 		cp->n_left = size;
174 		if (n > size)
175 			bpf_error("out of memory");
176 	}
177 	cp->n_left -= n;
178 	return (void *)((char *)cp->m + cp->n_left);
179 }
180 
181 static void
182 freechunks()
183 {
184 	int i;
185 
186 	cur_chunk = 0;
187 	for (i = 0; i < NCHUNKS; ++i)
188 		if (chunks[i].m != NULL) {
189 			free(chunks[i].m);
190 			chunks[i].m = NULL;
191 		}
192 }
193 
194 /*
195  * A strdup whose allocations are freed after code generation is over.
196  */
197 char *
198 sdup(s)
199 	register const char *s;
200 {
201 	int n = strlen(s) + 1;
202 	char *cp = newchunk(n);
203 
204 	strlcpy(cp, s, n);
205 	return (cp);
206 }
207 
208 static __inline struct block *
209 new_block(code)
210 	int code;
211 {
212 	struct block *p;
213 
214 	p = (struct block *)newchunk(sizeof(*p));
215 	p->s.code = code;
216 	p->head = p;
217 
218 	return p;
219 }
220 
221 static __inline struct slist *
222 new_stmt(code)
223 	int code;
224 {
225 	struct slist *p;
226 
227 	p = (struct slist *)newchunk(sizeof(*p));
228 	p->s.code = code;
229 
230 	return p;
231 }
232 
233 static struct block *
234 gen_retblk(v)
235 	int v;
236 {
237 	struct block *b = new_block(BPF_RET|BPF_K);
238 
239 	b->s.k = v;
240 	return b;
241 }
242 
243 static __inline void
244 syntax()
245 {
246 	bpf_error("syntax error in filter expression");
247 }
248 
249 static bpf_u_int32 netmask;
250 static int snaplen;
251 
252 int
253 pcap_compile(pcap_t *p, struct bpf_program *program,
254 	     char *buf, int optimize, bpf_u_int32 mask)
255 {
256 	extern int n_errors;
257 	int len;
258 
259 	n_errors = 0;
260 	root = NULL;
261 	bpf_pcap = p;
262 	if (setjmp(top_ctx)) {
263 		freechunks();
264 		return (-1);
265 	}
266 
267 	netmask = mask;
268 	snaplen = pcap_snapshot(p);
269 
270 	lex_init(buf ? buf : "");
271 	init_linktype(pcap_datalink(p));
272 	(void)pcap_parse();
273 
274 	if (n_errors)
275 		syntax();
276 
277 	if (root == NULL)
278 		root = gen_retblk(snaplen);
279 
280 	if (optimize) {
281 		bpf_optimize(&root);
282 		if (root == NULL ||
283 		    (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0))
284 			bpf_error("expression rejects all packets");
285 	}
286 	program->bf_insns = icode_to_fcode(root, &len);
287 	program->bf_len = len;
288 
289 	freechunks();
290 	return (0);
291 }
292 
293 /*
294  * Backpatch the blocks in 'list' to 'target'.  The 'sense' field indicates
295  * which of the jt and jf fields has been resolved and which is a pointer
296  * back to another unresolved block (or nil).  At least one of the fields
297  * in each block is already resolved.
298  */
299 static void
300 backpatch(list, target)
301 	struct block *list, *target;
302 {
303 	struct block *next;
304 
305 	while (list) {
306 		if (!list->sense) {
307 			next = JT(list);
308 			JT(list) = target;
309 		} else {
310 			next = JF(list);
311 			JF(list) = target;
312 		}
313 		list = next;
314 	}
315 }
316 
317 /*
318  * Merge the lists in b0 and b1, using the 'sense' field to indicate
319  * which of jt and jf is the link.
320  */
321 static void
322 merge(b0, b1)
323 	struct block *b0, *b1;
324 {
325 	register struct block **p = &b0;
326 
327 	/* Find end of list. */
328 	while (*p)
329 		p = !((*p)->sense) ? &JT(*p) : &JF(*p);
330 
331 	/* Concatenate the lists. */
332 	*p = b1;
333 }
334 
335 void
336 finish_parse(p)
337 	struct block *p;
338 {
339 	backpatch(p, gen_retblk(snaplen));
340 	p->sense = !p->sense;
341 	backpatch(p, gen_retblk(0));
342 	root = p->head;
343 }
344 
345 void
346 gen_and(b0, b1)
347 	struct block *b0, *b1;
348 {
349 	backpatch(b0, b1->head);
350 	b0->sense = !b0->sense;
351 	b1->sense = !b1->sense;
352 	merge(b1, b0);
353 	b1->sense = !b1->sense;
354 	b1->head = b0->head;
355 }
356 
357 void
358 gen_or(b0, b1)
359 	struct block *b0, *b1;
360 {
361 	b0->sense = !b0->sense;
362 	backpatch(b0, b1->head);
363 	b0->sense = !b0->sense;
364 	merge(b1, b0);
365 	b1->head = b0->head;
366 }
367 
368 void
369 gen_not(b)
370 	struct block *b;
371 {
372 	b->sense = !b->sense;
373 }
374 
375 static struct block *
376 gen_cmp(offset, size, v)
377 	u_int offset, size;
378 	bpf_int32 v;
379 {
380 	struct slist *s;
381 	struct block *b;
382 
383 	s = new_stmt(BPF_LD|BPF_ABS|size);
384 	s->s.k = offset;
385 
386 	b = new_block(JMP(BPF_JEQ));
387 	b->stmts = s;
388 	b->s.k = v;
389 
390 	return b;
391 }
392 
393 static struct block *
394 gen_mcmp(offset, size, v, mask)
395 	u_int offset, size;
396 	bpf_int32 v;
397 	bpf_u_int32 mask;
398 {
399 	struct block *b = gen_cmp(offset, size, v);
400 	struct slist *s;
401 
402 	if (mask != 0xffffffff) {
403 		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
404 		s->s.k = mask;
405 		b->stmts->next = s;
406 	}
407 	return b;
408 }
409 
410 static struct block *
411 gen_bcmp(offset, size, v)
412 	register u_int offset, size;
413 	register const u_char *v;
414 {
415 	register struct block *b, *tmp;
416 
417 	b = NULL;
418 	while (size >= 4) {
419 		register const u_char *p = &v[size - 4];
420 		bpf_int32 w = ((bpf_int32)p[0] << 24) |
421 		    ((bpf_int32)p[1] << 16) | ((bpf_int32)p[2] << 8) | p[3];
422 
423 		tmp = gen_cmp(offset + size - 4, BPF_W, w);
424 		if (b != NULL)
425 			gen_and(b, tmp);
426 		b = tmp;
427 		size -= 4;
428 	}
429 	while (size >= 2) {
430 		register const u_char *p = &v[size - 2];
431 		bpf_int32 w = ((bpf_int32)p[0] << 8) | p[1];
432 
433 		tmp = gen_cmp(offset + size - 2, BPF_H, w);
434 		if (b != NULL)
435 			gen_and(b, tmp);
436 		b = tmp;
437 		size -= 2;
438 	}
439 	if (size > 0) {
440 		tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]);
441 		if (b != NULL)
442 			gen_and(b, tmp);
443 		b = tmp;
444 	}
445 	return b;
446 }
447 
448 /*
449  * Various code constructs need to know the layout of the data link
450  * layer.  These variables give the necessary offsets.  off_linktype
451  * is set to -1 for no encapsulation, in which case, IP is assumed.
452  */
453 static u_int off_linktype;
454 static u_int off_nl;
455 static int linktype;
456 
457 static void
458 init_linktype(type)
459 	int type;
460 {
461 	linktype = type;
462 
463 	switch (type) {
464 
465 	case DLT_EN10MB:
466 		off_linktype = 12;
467 		off_nl = 14;
468 		return;
469 
470 	case DLT_SLIP:
471 		/*
472 		 * SLIP doesn't have a link level type.  The 16 byte
473 		 * header is hacked into our SLIP driver.
474 		 */
475 		off_linktype = -1;
476 		off_nl = 16;
477 		return;
478 
479 	case DLT_SLIP_BSDOS:
480 		/* XXX this may be the same as the DLT_PPP_BSDOS case */
481 		off_linktype = -1;
482 		/* XXX end */
483 		off_nl = 24;
484 		return;
485 
486 	case DLT_NULL:
487 		off_linktype = 0;
488 		off_nl = 4;
489 		return;
490 
491 	case DLT_PPP:
492 		off_linktype = 2;
493 		off_nl = 4;
494 		return;
495 
496 	case DLT_PPP_BSDOS:
497 		off_linktype = 5;
498 		off_nl = 24;
499 		return;
500 
501 	case DLT_FDDI:
502 		/*
503 		 * FDDI doesn't really have a link-level type field.
504 		 * We assume that SSAP = SNAP is being used and pick
505 		 * out the encapsulated Ethernet type.
506 		 */
507 		off_linktype = 19;
508 #ifdef PCAP_FDDIPAD
509 		off_linktype += pcap_fddipad;
510 #endif
511 		off_nl = 21;
512 #ifdef PCAP_FDDIPAD
513 		off_nl += pcap_fddipad;
514 #endif
515 		return;
516 
517 	case DLT_IEEE802:
518 		off_linktype = 20;
519 		off_nl = 22;
520 		return;
521 
522 	case DLT_ATM_RFC1483:
523 		/*
524 		 * assume routed, non-ISO PDUs
525 		 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00)
526 		 */
527 		off_linktype = 6;
528 		off_nl = 8;
529 		return;
530 
531 	case DLT_LOOP:
532 		off_linktype = -1;
533 		off_nl = 4;
534 		return;
535 
536 	case DLT_ENC:
537 		off_linktype = -1;
538 		off_nl = 12;
539 		return;
540 
541 	case DLT_RAW:
542 		off_linktype = -1;
543 		off_nl = 0;
544 		return;
545 	}
546 	bpf_error("unknown data link type 0x%x", linktype);
547 	/* NOTREACHED */
548 }
549 
550 static struct block *
551 gen_uncond(rsense)
552 	int rsense;
553 {
554 	struct block *b;
555 	struct slist *s;
556 
557 	s = new_stmt(BPF_LD|BPF_IMM);
558 	s->s.k = !rsense;
559 	b = new_block(JMP(BPF_JEQ));
560 	b->stmts = s;
561 
562 	return b;
563 }
564 
565 static __inline struct block *
566 gen_true()
567 {
568 	return gen_uncond(1);
569 }
570 
571 static __inline struct block *
572 gen_false()
573 {
574 	return gen_uncond(0);
575 }
576 
577 static struct block *
578 gen_linktype(proto)
579 	register int proto;
580 {
581 	struct block *b0, *b1;
582 
583 	/* If we're not using encapsulation and checking for IP, we're done */
584 	if (off_linktype == -1 && proto == ETHERTYPE_IP)
585 		return gen_true();
586 
587 	switch (linktype) {
588 
589 	case DLT_SLIP:
590 		return gen_false();
591 
592 	case DLT_PPP:
593 		if (proto == ETHERTYPE_IP)
594 			proto = PPP_IP;			/* XXX was 0x21 */
595 		break;
596 
597 	case DLT_PPP_BSDOS:
598 		switch (proto) {
599 
600 		case ETHERTYPE_IP:
601 			b0 = gen_cmp(off_linktype, BPF_H, PPP_IP);
602 			b1 = gen_cmp(off_linktype, BPF_H, PPP_VJC);
603 			gen_or(b0, b1);
604 			b0 = gen_cmp(off_linktype, BPF_H, PPP_VJNC);
605 			gen_or(b1, b0);
606 			return b0;
607 
608 		case ETHERTYPE_DN:
609 			proto = PPP_DECNET;
610 			break;
611 
612 		case ETHERTYPE_ATALK:
613 			proto = PPP_APPLE;
614 			break;
615 
616 		case ETHERTYPE_NS:
617 			proto = PPP_NS;
618 			break;
619 		}
620 		break;
621 
622 	case DLT_LOOP:
623 	case DLT_ENC:
624 	case DLT_NULL:
625 		/* XXX */
626 		if (proto == ETHERTYPE_IP)
627 			return (gen_cmp(0, BPF_W, (bpf_int32)htonl(AF_INET)));
628 		else
629 			return gen_false();
630 	}
631 	return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto);
632 }
633 
634 static struct block *
635 gen_hostop(addr, mask, dir, proto, src_off, dst_off)
636 	bpf_u_int32 addr;
637 	bpf_u_int32 mask;
638 	int dir, proto;
639 	u_int src_off, dst_off;
640 {
641 	struct block *b0, *b1;
642 	u_int offset;
643 
644 	switch (dir) {
645 
646 	case Q_SRC:
647 		offset = src_off;
648 		break;
649 
650 	case Q_DST:
651 		offset = dst_off;
652 		break;
653 
654 	case Q_AND:
655 		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
656 		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
657 		gen_and(b0, b1);
658 		return b1;
659 
660 	case Q_OR:
661 	case Q_DEFAULT:
662 		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
663 		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
664 		gen_or(b0, b1);
665 		return b1;
666 
667 	default:
668 		abort();
669 	}
670 	b0 = gen_linktype(proto);
671 	b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask);
672 	gen_and(b0, b1);
673 	return b1;
674 }
675 
676 static struct block *
677 gen_ehostop(eaddr, dir)
678 	register const u_char *eaddr;
679 	register int dir;
680 {
681 	register struct block *b0, *b1;
682 
683 	switch (dir) {
684 	case Q_SRC:
685 		return gen_bcmp(6, 6, eaddr);
686 
687 	case Q_DST:
688 		return gen_bcmp(0, 6, eaddr);
689 
690 	case Q_AND:
691 		b0 = gen_ehostop(eaddr, Q_SRC);
692 		b1 = gen_ehostop(eaddr, Q_DST);
693 		gen_and(b0, b1);
694 		return b1;
695 
696 	case Q_DEFAULT:
697 	case Q_OR:
698 		b0 = gen_ehostop(eaddr, Q_SRC);
699 		b1 = gen_ehostop(eaddr, Q_DST);
700 		gen_or(b0, b1);
701 		return b1;
702 	}
703 	abort();
704 	/* NOTREACHED */
705 }
706 
707 /*
708  * Like gen_ehostop, but for DLT_FDDI
709  */
710 static struct block *
711 gen_fhostop(eaddr, dir)
712 	register const u_char *eaddr;
713 	register int dir;
714 {
715 	struct block *b0, *b1;
716 
717 	switch (dir) {
718 	case Q_SRC:
719 #ifdef PCAP_FDDIPAD
720 		return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr);
721 #else
722 		return gen_bcmp(6 + 1, 6, eaddr);
723 #endif
724 
725 	case Q_DST:
726 #ifdef PCAP_FDDIPAD
727 		return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr);
728 #else
729 		return gen_bcmp(0 + 1, 6, eaddr);
730 #endif
731 
732 	case Q_AND:
733 		b0 = gen_fhostop(eaddr, Q_SRC);
734 		b1 = gen_fhostop(eaddr, Q_DST);
735 		gen_and(b0, b1);
736 		return b1;
737 
738 	case Q_DEFAULT:
739 	case Q_OR:
740 		b0 = gen_fhostop(eaddr, Q_SRC);
741 		b1 = gen_fhostop(eaddr, Q_DST);
742 		gen_or(b0, b1);
743 		return b1;
744 	}
745 	abort();
746 	/* NOTREACHED */
747 }
748 
749 /*
750  * This is quite tricky because there may be pad bytes in front of the
751  * DECNET header, and then there are two possible data packet formats that
752  * carry both src and dst addresses, plus 5 packet types in a format that
753  * carries only the src node, plus 2 types that use a different format and
754  * also carry just the src node.
755  *
756  * Yuck.
757  *
758  * Instead of doing those all right, we just look for data packets with
759  * 0 or 1 bytes of padding.  If you want to look at other packets, that
760  * will require a lot more hacking.
761  *
762  * To add support for filtering on DECNET "areas" (network numbers)
763  * one would want to add a "mask" argument to this routine.  That would
764  * make the filter even more inefficient, although one could be clever
765  * and not generate masking instructions if the mask is 0xFFFF.
766  */
767 static struct block *
768 gen_dnhostop(addr, dir, base_off)
769 	bpf_u_int32 addr;
770 	int dir;
771 	u_int base_off;
772 {
773 	struct block *b0, *b1, *b2, *tmp;
774 	u_int offset_lh;	/* offset if long header is received */
775 	u_int offset_sh;	/* offset if short header is received */
776 
777 	switch (dir) {
778 
779 	case Q_DST:
780 		offset_sh = 1;	/* follows flags */
781 		offset_lh = 7;	/* flgs,darea,dsubarea,HIORD */
782 		break;
783 
784 	case Q_SRC:
785 		offset_sh = 3;	/* follows flags, dstnode */
786 		offset_lh = 15;	/* flgs,darea,dsubarea,did,sarea,ssub,HIORD */
787 		break;
788 
789 	case Q_AND:
790 		/* Inefficient because we do our Calvinball dance twice */
791 		b0 = gen_dnhostop(addr, Q_SRC, base_off);
792 		b1 = gen_dnhostop(addr, Q_DST, base_off);
793 		gen_and(b0, b1);
794 		return b1;
795 
796 	case Q_OR:
797 	case Q_DEFAULT:
798 		/* Inefficient because we do our Calvinball dance twice */
799 		b0 = gen_dnhostop(addr, Q_SRC, base_off);
800 		b1 = gen_dnhostop(addr, Q_DST, base_off);
801 		gen_or(b0, b1);
802 		return b1;
803 
804 	default:
805 		abort();
806 	}
807 	b0 = gen_linktype(ETHERTYPE_DN);
808 	/* Check for pad = 1, long header case */
809 	tmp = gen_mcmp(base_off + 2, BPF_H,
810 	    (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF));
811 	b1 = gen_cmp(base_off + 2 + 1 + offset_lh,
812 	    BPF_H, (bpf_int32)ntohs(addr));
813 	gen_and(tmp, b1);
814 	/* Check for pad = 0, long header case */
815 	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7);
816 	b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr));
817 	gen_and(tmp, b2);
818 	gen_or(b2, b1);
819 	/* Check for pad = 1, short header case */
820 	tmp = gen_mcmp(base_off + 2, BPF_H,
821 	    (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF));
822 	b2 = gen_cmp(base_off + 2 + 1 + offset_sh,
823 	    BPF_H, (bpf_int32)ntohs(addr));
824 	gen_and(tmp, b2);
825 	gen_or(b2, b1);
826 	/* Check for pad = 0, short header case */
827 	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7);
828 	b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr));
829 	gen_and(tmp, b2);
830 	gen_or(b2, b1);
831 
832 	/* Combine with test for linktype */
833 	gen_and(b0, b1);
834 	return b1;
835 }
836 
837 static struct block *
838 gen_host(addr, mask, proto, dir)
839 	bpf_u_int32 addr;
840 	bpf_u_int32 mask;
841 	int proto;
842 	int dir;
843 {
844 	struct block *b0, *b1;
845 
846 	switch (proto) {
847 
848 	case Q_DEFAULT:
849 		b0 = gen_host(addr, mask, Q_IP, dir);
850 		b1 = gen_host(addr, mask, Q_ARP, dir);
851 		gen_or(b0, b1);
852 		b0 = gen_host(addr, mask, Q_RARP, dir);
853 		gen_or(b1, b0);
854 		return b0;
855 
856 	case Q_IP:
857 		return gen_hostop(addr, mask, dir, ETHERTYPE_IP,
858 				  off_nl + 12, off_nl + 16);
859 
860 	case Q_RARP:
861 		return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP,
862 				  off_nl + 14, off_nl + 24);
863 
864 	case Q_ARP:
865 		return gen_hostop(addr, mask, dir, ETHERTYPE_ARP,
866 				  off_nl + 14, off_nl + 24);
867 
868 	case Q_TCP:
869 		bpf_error("'tcp' modifier applied to host");
870 
871 	case Q_UDP:
872 		bpf_error("'udp' modifier applied to host");
873 
874 	case Q_ICMP:
875 		bpf_error("'icmp' modifier applied to host");
876 
877 	case Q_IGMP:
878 		bpf_error("'igmp' modifier applied to host");
879 
880 	case Q_IGRP:
881 		bpf_error("'igrp' modifier applied to host");
882 
883 	case Q_ATALK:
884 		bpf_error("ATALK host filtering not implemented");
885 
886 	case Q_DECNET:
887 		return gen_dnhostop(addr, dir, off_nl);
888 
889 	case Q_SCA:
890 		bpf_error("SCA host filtering not implemented");
891 
892 	case Q_LAT:
893 		bpf_error("LAT host filtering not implemented");
894 
895 	case Q_MOPDL:
896 		bpf_error("MOPDL host filtering not implemented");
897 
898 	case Q_MOPRC:
899 		bpf_error("MOPRC host filtering not implemented");
900 
901 	default:
902 		abort();
903 	}
904 	/* NOTREACHED */
905 }
906 
907 static struct block *
908 gen_gateway(eaddr, alist, proto, dir)
909 	const u_char *eaddr;
910 	bpf_u_int32 **alist;
911 	int proto;
912 	int dir;
913 {
914 	struct block *b0, *b1, *tmp;
915 
916 	if (dir != 0)
917 		bpf_error("direction applied to 'gateway'");
918 
919 	switch (proto) {
920 	case Q_DEFAULT:
921 	case Q_IP:
922 	case Q_ARP:
923 	case Q_RARP:
924 		if (linktype == DLT_EN10MB)
925 			b0 = gen_ehostop(eaddr, Q_OR);
926 		else if (linktype == DLT_FDDI)
927 			b0 = gen_fhostop(eaddr, Q_OR);
928 		else
929 			bpf_error(
930 			    "'gateway' supported only on ethernet or FDDI");
931 
932 		b1 = gen_host(**alist++, 0xffffffff, proto, Q_OR);
933 		while (*alist) {
934 			tmp = gen_host(**alist++, 0xffffffff, proto, Q_OR);
935 			gen_or(b1, tmp);
936 			b1 = tmp;
937 		}
938 		gen_not(b1);
939 		gen_and(b0, b1);
940 		return b1;
941 	}
942 	bpf_error("illegal modifier of 'gateway'");
943 	/* NOTREACHED */
944 }
945 
946 struct block *
947 gen_proto_abbrev(proto)
948 	int proto;
949 {
950 	struct block *b0, *b1;
951 
952 	switch (proto) {
953 
954 	case Q_TCP:
955 		b0 = gen_linktype(ETHERTYPE_IP);
956 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP);
957 		gen_and(b0, b1);
958 		break;
959 
960 	case Q_UDP:
961 		b0 =  gen_linktype(ETHERTYPE_IP);
962 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP);
963 		gen_and(b0, b1);
964 		break;
965 
966 	case Q_ICMP:
967 		b0 =  gen_linktype(ETHERTYPE_IP);
968 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP);
969 		gen_and(b0, b1);
970 		break;
971 
972 	case Q_IGMP:
973 		b0 =  gen_linktype(ETHERTYPE_IP);
974 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2);
975 		gen_and(b0, b1);
976 		break;
977 
978 #ifndef	IPPROTO_IGRP
979 #define	IPPROTO_IGRP	9
980 #endif
981 	case Q_IGRP:
982 		b0 =  gen_linktype(ETHERTYPE_IP);
983 		b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_IGRP);
984 		gen_and(b0, b1);
985 		break;
986 
987 	case Q_IP:
988 		b1 =  gen_linktype(ETHERTYPE_IP);
989 		break;
990 
991 	case Q_ARP:
992 		b1 =  gen_linktype(ETHERTYPE_ARP);
993 		break;
994 
995 	case Q_RARP:
996 		b1 =  gen_linktype(ETHERTYPE_REVARP);
997 		break;
998 
999 	case Q_LINK:
1000 		bpf_error("link layer applied in wrong context");
1001 
1002 	case Q_ATALK:
1003 		b1 =  gen_linktype(ETHERTYPE_ATALK);
1004 		break;
1005 
1006 	case Q_DECNET:
1007 		b1 =  gen_linktype(ETHERTYPE_DN);
1008 		break;
1009 
1010 	case Q_SCA:
1011 		b1 =  gen_linktype(ETHERTYPE_SCA);
1012 		break;
1013 
1014 	case Q_LAT:
1015 		b1 =  gen_linktype(ETHERTYPE_LAT);
1016 		break;
1017 
1018 	case Q_MOPDL:
1019 		b1 =  gen_linktype(ETHERTYPE_MOPDL);
1020 		break;
1021 
1022 	case Q_MOPRC:
1023 		b1 =  gen_linktype(ETHERTYPE_MOPRC);
1024 		break;
1025 
1026 	default:
1027 		abort();
1028 	}
1029 	return b1;
1030 }
1031 
1032 static struct block *
1033 gen_ipfrag()
1034 {
1035 	struct slist *s;
1036 	struct block *b;
1037 
1038 	/* not ip frag */
1039 	s = new_stmt(BPF_LD|BPF_H|BPF_ABS);
1040 	s->s.k = off_nl + 6;
1041 	b = new_block(JMP(BPF_JSET));
1042 	b->s.k = 0x1fff;
1043 	b->stmts = s;
1044 	gen_not(b);
1045 
1046 	return b;
1047 }
1048 
1049 static struct block *
1050 gen_portatom(off, v)
1051 	int off;
1052 	bpf_int32 v;
1053 {
1054 	struct slist *s;
1055 	struct block *b;
1056 
1057 	s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1058 	s->s.k = off_nl;
1059 
1060 	s->next = new_stmt(BPF_LD|BPF_IND|BPF_H);
1061 	s->next->s.k = off_nl + off;
1062 
1063 	b = new_block(JMP(BPF_JEQ));
1064 	b->stmts = s;
1065 	b->s.k = v;
1066 
1067 	return b;
1068 }
1069 
1070 struct block *
1071 gen_portop(port, proto, dir)
1072 	int port, proto, dir;
1073 {
1074 	struct block *b0, *b1, *tmp;
1075 
1076 	/* ip proto 'proto' */
1077 	tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto);
1078 	b0 = gen_ipfrag();
1079 	gen_and(tmp, b0);
1080 
1081 	switch (dir) {
1082 	case Q_SRC:
1083 		b1 = gen_portatom(0, (bpf_int32)port);
1084 		break;
1085 
1086 	case Q_DST:
1087 		b1 = gen_portatom(2, (bpf_int32)port);
1088 		break;
1089 
1090 	case Q_OR:
1091 	case Q_DEFAULT:
1092 		tmp = gen_portatom(0, (bpf_int32)port);
1093 		b1 = gen_portatom(2, (bpf_int32)port);
1094 		gen_or(tmp, b1);
1095 		break;
1096 
1097 	case Q_AND:
1098 		tmp = gen_portatom(0, (bpf_int32)port);
1099 		b1 = gen_portatom(2, (bpf_int32)port);
1100 		gen_and(tmp, b1);
1101 		break;
1102 
1103 	default:
1104 		abort();
1105 	}
1106 	gen_and(b0, b1);
1107 
1108 	return b1;
1109 }
1110 
1111 static struct block *
1112 gen_port(port, ip_proto, dir)
1113 	int port;
1114 	int ip_proto;
1115 	int dir;
1116 {
1117 	struct block *b0, *b1, *tmp;
1118 
1119 	/* ether proto ip */
1120 	b0 =  gen_linktype(ETHERTYPE_IP);
1121 
1122 	switch (ip_proto) {
1123 	case IPPROTO_UDP:
1124 	case IPPROTO_TCP:
1125 		b1 = gen_portop(port, ip_proto, dir);
1126 		break;
1127 
1128 	case PROTO_UNDEF:
1129 		tmp = gen_portop(port, IPPROTO_TCP, dir);
1130 		b1 = gen_portop(port, IPPROTO_UDP, dir);
1131 		gen_or(tmp, b1);
1132 		break;
1133 
1134 	default:
1135 		abort();
1136 	}
1137 	gen_and(b0, b1);
1138 	return b1;
1139 }
1140 
1141 static int
1142 lookup_proto(name, proto)
1143 	register const char *name;
1144 	register int proto;
1145 {
1146 	register int v;
1147 
1148 	switch (proto) {
1149 
1150 	case Q_DEFAULT:
1151 	case Q_IP:
1152 		v = pcap_nametoproto(name);
1153 		if (v == PROTO_UNDEF)
1154 			bpf_error("unknown ip proto '%s'", name);
1155 		break;
1156 
1157 	case Q_LINK:
1158 		/* XXX should look up h/w protocol type based on linktype */
1159 		v = pcap_nametoeproto(name);
1160 		if (v == PROTO_UNDEF)
1161 			bpf_error("unknown ether proto '%s'", name);
1162 		break;
1163 
1164 	default:
1165 		v = PROTO_UNDEF;
1166 		break;
1167 	}
1168 	return v;
1169 }
1170 
1171 static struct block *
1172 gen_proto(v, proto, dir)
1173 	int v;
1174 	int proto;
1175 	int dir;
1176 {
1177 	struct block *b0, *b1;
1178 
1179 	if (dir != Q_DEFAULT)
1180 		bpf_error("direction applied to 'proto'");
1181 
1182 	switch (proto) {
1183 	case Q_DEFAULT:
1184 	case Q_IP:
1185 		b0 = gen_linktype(ETHERTYPE_IP);
1186 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v);
1187 		gen_and(b0, b1);
1188 		return b1;
1189 
1190 	case Q_ARP:
1191 		bpf_error("arp does not encapsulate another protocol");
1192 		/* NOTREACHED */
1193 
1194 	case Q_RARP:
1195 		bpf_error("rarp does not encapsulate another protocol");
1196 		/* NOTREACHED */
1197 
1198 	case Q_ATALK:
1199 		bpf_error("atalk encapsulation is not specifiable");
1200 		/* NOTREACHED */
1201 
1202 	case Q_DECNET:
1203 		bpf_error("decnet encapsulation is not specifiable");
1204 		/* NOTREACHED */
1205 
1206 	case Q_SCA:
1207 		bpf_error("sca does not encapsulate another protocol");
1208 		/* NOTREACHED */
1209 
1210 	case Q_LAT:
1211 		bpf_error("lat does not encapsulate another protocol");
1212 		/* NOTREACHED */
1213 
1214 	case Q_MOPRC:
1215 		bpf_error("moprc does not encapsulate another protocol");
1216 		/* NOTREACHED */
1217 
1218 	case Q_MOPDL:
1219 		bpf_error("mopdl does not encapsulate another protocol");
1220 		/* NOTREACHED */
1221 
1222 	case Q_LINK:
1223 		return gen_linktype(v);
1224 
1225 	case Q_UDP:
1226 		bpf_error("'udp proto' is bogus");
1227 		/* NOTREACHED */
1228 
1229 	case Q_TCP:
1230 		bpf_error("'tcp proto' is bogus");
1231 		/* NOTREACHED */
1232 
1233 	case Q_ICMP:
1234 		bpf_error("'icmp proto' is bogus");
1235 		/* NOTREACHED */
1236 
1237 	case Q_IGMP:
1238 		bpf_error("'igmp proto' is bogus");
1239 		/* NOTREACHED */
1240 
1241 	case Q_IGRP:
1242 		bpf_error("'igrp proto' is bogus");
1243 		/* NOTREACHED */
1244 
1245 	default:
1246 		abort();
1247 		/* NOTREACHED */
1248 	}
1249 	/* NOTREACHED */
1250 }
1251 
1252 struct block *
1253 gen_scode(name, q)
1254 	register const char *name;
1255 	struct qual q;
1256 {
1257 	int proto = q.proto;
1258 	int dir = q.dir;
1259 	int tproto;
1260 	u_char *eaddr;
1261 	bpf_u_int32 mask, addr, **alist;
1262 	struct block *b, *tmp;
1263 	int port, real_proto;
1264 
1265 	switch (q.addr) {
1266 
1267 	case Q_NET:
1268 		addr = pcap_nametonetaddr(name);
1269 		if (addr == 0)
1270 			bpf_error("unknown network '%s'", name);
1271 		/* Left justify network addr and calculate its network mask */
1272 		mask = 0xffffffff;
1273 		while (addr && (addr & 0xff000000) == 0) {
1274 			addr <<= 8;
1275 			mask <<= 8;
1276 		}
1277 		return gen_host(addr, mask, proto, dir);
1278 
1279 	case Q_DEFAULT:
1280 	case Q_HOST:
1281 		if (proto == Q_LINK) {
1282 			switch (linktype) {
1283 
1284 			case DLT_EN10MB:
1285 				eaddr = pcap_ether_hostton(name);
1286 				if (eaddr == NULL)
1287 					bpf_error(
1288 					    "unknown ether host '%s'", name);
1289 				return gen_ehostop(eaddr, dir);
1290 
1291 			case DLT_FDDI:
1292 				eaddr = pcap_ether_hostton(name);
1293 				if (eaddr == NULL)
1294 					bpf_error(
1295 					    "unknown FDDI host '%s'", name);
1296 				return gen_fhostop(eaddr, dir);
1297 
1298 			default:
1299 				bpf_error(
1300 			"only ethernet/FDDI supports link-level host name");
1301 				break;
1302 			}
1303 		} else if (proto == Q_DECNET) {
1304 			unsigned short dn_addr = __pcap_nametodnaddr(name);
1305 			/*
1306 			 * I don't think DECNET hosts can be multihomed, so
1307 			 * there is no need to build up a list of addresses
1308 			 */
1309 			return (gen_host(dn_addr, 0, proto, dir));
1310 		} else {
1311 			alist = pcap_nametoaddr(name);
1312 			if (alist == NULL || *alist == NULL)
1313 				bpf_error("unknown host '%s'", name);
1314 			tproto = proto;
1315 			if (off_linktype == -1 && tproto == Q_DEFAULT)
1316 				tproto = Q_IP;
1317 			b = gen_host(**alist++, 0xffffffff, tproto, dir);
1318 			while (*alist) {
1319 				tmp = gen_host(**alist++, 0xffffffff,
1320 					       tproto, dir);
1321 				gen_or(b, tmp);
1322 				b = tmp;
1323 			}
1324 			return b;
1325 		}
1326 
1327 	case Q_PORT:
1328 		if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP)
1329 			bpf_error("illegal qualifier of 'port'");
1330 		if (pcap_nametoport(name, &port, &real_proto) == 0)
1331 			bpf_error("unknown port '%s'", name);
1332 		if (proto == Q_UDP) {
1333 			if (real_proto == IPPROTO_TCP)
1334 				bpf_error("port '%s' is tcp", name);
1335 			else
1336 				/* override PROTO_UNDEF */
1337 				real_proto = IPPROTO_UDP;
1338 		}
1339 		if (proto == Q_TCP) {
1340 			if (real_proto == IPPROTO_UDP)
1341 				bpf_error("port '%s' is udp", name);
1342 			else
1343 				/* override PROTO_UNDEF */
1344 				real_proto = IPPROTO_TCP;
1345 		}
1346 		return gen_port(port, real_proto, dir);
1347 
1348 	case Q_GATEWAY:
1349 		eaddr = pcap_ether_hostton(name);
1350 		if (eaddr == NULL)
1351 			bpf_error("unknown ether host: %s", name);
1352 
1353 		alist = pcap_nametoaddr(name);
1354 		if (alist == NULL || *alist == NULL)
1355 			bpf_error("unknown host '%s'", name);
1356 		return gen_gateway(eaddr, alist, proto, dir);
1357 
1358 	case Q_PROTO:
1359 		real_proto = lookup_proto(name, proto);
1360 		if (real_proto >= 0)
1361 			return gen_proto(real_proto, proto, dir);
1362 		else
1363 			bpf_error("unknown protocol: %s", name);
1364 
1365 	case Q_UNDEF:
1366 		syntax();
1367 		/* NOTREACHED */
1368 	}
1369 	abort();
1370 	/* NOTREACHED */
1371 }
1372 
1373 struct block *
1374 gen_mcode(s1, s2, masklen, q)
1375 	register const char *s1, *s2;
1376 	register int masklen;
1377 	struct qual q;
1378 {
1379 	register int nlen, mlen;
1380 	bpf_u_int32 n, m;
1381 
1382 	nlen = __pcap_atoin(s1, &n);
1383 	/* Promote short ipaddr */
1384 	n <<= 32 - nlen;
1385 
1386 	if (s2 != NULL) {
1387 		mlen = __pcap_atoin(s2, &m);
1388 		/* Promote short ipaddr */
1389 		m <<= 32 - mlen;
1390 		if ((n & ~m) != 0)
1391 			bpf_error("non-network bits set in \"%s mask %s\"",
1392 			    s1, s2);
1393 	} else {
1394 		/* Convert mask len to mask */
1395 		if (masklen > 32)
1396 			bpf_error("mask length must be <= 32");
1397 		m = 0xffffffff << (32 - masklen);
1398 		if ((n & ~m) != 0)
1399 			bpf_error("non-network bits set in \"%s/%d\"",
1400 			    s1, masklen);
1401 	}
1402 
1403 	switch (q.addr) {
1404 
1405 	case Q_NET:
1406 		return gen_host(n, m, q.proto, q.dir);
1407 
1408 	default:
1409 		bpf_error("Mask syntax for networks only");
1410 		/* NOTREACHED */
1411 	}
1412 }
1413 
1414 struct block *
1415 gen_ncode(s, v, q)
1416 	register const char *s;
1417 	bpf_u_int32 v;
1418 	struct qual q;
1419 {
1420 	bpf_u_int32 mask;
1421 	int proto = q.proto;
1422 	int dir = q.dir;
1423 	register int vlen;
1424 
1425 	if (s == NULL)
1426 		vlen = 32;
1427 	else if (q.proto == Q_DECNET)
1428 		vlen = __pcap_atodn(s, &v);
1429 	else
1430 		vlen = __pcap_atoin(s, &v);
1431 
1432 	switch (q.addr) {
1433 
1434 	case Q_DEFAULT:
1435 	case Q_HOST:
1436 	case Q_NET:
1437 		if (proto == Q_DECNET)
1438 			return gen_host(v, 0, proto, dir);
1439 		else if (proto == Q_LINK) {
1440 			bpf_error("illegal link layer address");
1441 		} else {
1442 			mask = 0xffffffff;
1443 			if (s == NULL && q.addr == Q_NET) {
1444 				/* Promote short net number */
1445 				while (v && (v & 0xff000000) == 0) {
1446 					v <<= 8;
1447 					mask <<= 8;
1448 				}
1449 			} else {
1450 				/* Promote short ipaddr */
1451 				v <<= 32 - vlen;
1452 				mask <<= 32 - vlen;
1453 			}
1454 			return gen_host(v, mask, proto, dir);
1455 		}
1456 
1457 	case Q_PORT:
1458 		if (proto == Q_UDP)
1459 			proto = IPPROTO_UDP;
1460 		else if (proto == Q_TCP)
1461 			proto = IPPROTO_TCP;
1462 		else if (proto == Q_DEFAULT)
1463 			proto = PROTO_UNDEF;
1464 		else
1465 			bpf_error("illegal qualifier of 'port'");
1466 
1467 		return gen_port((int)v, proto, dir);
1468 
1469 	case Q_GATEWAY:
1470 		bpf_error("'gateway' requires a name");
1471 		/* NOTREACHED */
1472 
1473 	case Q_PROTO:
1474 		return gen_proto((int)v, proto, dir);
1475 
1476 	case Q_UNDEF:
1477 		syntax();
1478 		/* NOTREACHED */
1479 
1480 	default:
1481 		abort();
1482 		/* NOTREACHED */
1483 	}
1484 	/* NOTREACHED */
1485 }
1486 
1487 struct block *
1488 gen_ecode(eaddr, q)
1489 	register const u_char *eaddr;
1490 	struct qual q;
1491 {
1492 	if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) {
1493 		if (linktype == DLT_EN10MB)
1494 			return gen_ehostop(eaddr, (int)q.dir);
1495 		if (linktype == DLT_FDDI)
1496 			return gen_fhostop(eaddr, (int)q.dir);
1497 	}
1498 	bpf_error("ethernet address used in non-ether expression");
1499 	/* NOTREACHED */
1500 }
1501 
1502 void
1503 sappend(s0, s1)
1504 	struct slist *s0, *s1;
1505 {
1506 	/*
1507 	 * This is definitely not the best way to do this, but the
1508 	 * lists will rarely get long.
1509 	 */
1510 	while (s0->next)
1511 		s0 = s0->next;
1512 	s0->next = s1;
1513 }
1514 
1515 static struct slist *
1516 xfer_to_x(a)
1517 	struct arth *a;
1518 {
1519 	struct slist *s;
1520 
1521 	s = new_stmt(BPF_LDX|BPF_MEM);
1522 	s->s.k = a->regno;
1523 	return s;
1524 }
1525 
1526 static struct slist *
1527 xfer_to_a(a)
1528 	struct arth *a;
1529 {
1530 	struct slist *s;
1531 
1532 	s = new_stmt(BPF_LD|BPF_MEM);
1533 	s->s.k = a->regno;
1534 	return s;
1535 }
1536 
1537 struct arth *
1538 gen_load(proto, index, size)
1539 	int proto;
1540 	struct arth *index;
1541 	int size;
1542 {
1543 	struct slist *s, *tmp;
1544 	struct block *b;
1545 	int regno = alloc_reg();
1546 
1547 	free_reg(index->regno);
1548 	switch (size) {
1549 
1550 	default:
1551 		bpf_error("data size must be 1, 2, or 4");
1552 
1553 	case 1:
1554 		size = BPF_B;
1555 		break;
1556 
1557 	case 2:
1558 		size = BPF_H;
1559 		break;
1560 
1561 	case 4:
1562 		size = BPF_W;
1563 		break;
1564 	}
1565 	switch (proto) {
1566 	default:
1567 		bpf_error("unsupported index operation");
1568 
1569 	case Q_LINK:
1570 		s = xfer_to_x(index);
1571 		tmp = new_stmt(BPF_LD|BPF_IND|size);
1572 		sappend(s, tmp);
1573 		sappend(index->s, s);
1574 		break;
1575 
1576 	case Q_IP:
1577 	case Q_ARP:
1578 	case Q_RARP:
1579 	case Q_ATALK:
1580 	case Q_DECNET:
1581 	case Q_SCA:
1582 	case Q_LAT:
1583 	case Q_MOPRC:
1584 	case Q_MOPDL:
1585 		/* XXX Note that we assume a fixed link link header here. */
1586 		s = xfer_to_x(index);
1587 		tmp = new_stmt(BPF_LD|BPF_IND|size);
1588 		tmp->s.k = off_nl;
1589 		sappend(s, tmp);
1590 		sappend(index->s, s);
1591 
1592 		b = gen_proto_abbrev(proto);
1593 		if (index->b)
1594 			gen_and(index->b, b);
1595 		index->b = b;
1596 		break;
1597 
1598 	case Q_TCP:
1599 	case Q_UDP:
1600 	case Q_ICMP:
1601 	case Q_IGMP:
1602 	case Q_IGRP:
1603 		s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1604 		s->s.k = off_nl;
1605 		sappend(s, xfer_to_a(index));
1606 		sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X));
1607 		sappend(s, new_stmt(BPF_MISC|BPF_TAX));
1608 		sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size));
1609 		tmp->s.k = off_nl;
1610 		sappend(index->s, s);
1611 
1612 		gen_and(gen_proto_abbrev(proto), b = gen_ipfrag());
1613 		if (index->b)
1614 			gen_and(index->b, b);
1615 		index->b = b;
1616 		break;
1617 	}
1618 	index->regno = regno;
1619 	s = new_stmt(BPF_ST);
1620 	s->s.k = regno;
1621 	sappend(index->s, s);
1622 
1623 	return index;
1624 }
1625 
1626 struct block *
1627 gen_relation(code, a0, a1, reversed)
1628 	int code;
1629 	struct arth *a0, *a1;
1630 	int reversed;
1631 {
1632 	struct slist *s0, *s1, *s2;
1633 	struct block *b, *tmp;
1634 
1635 	s0 = xfer_to_x(a1);
1636 	s1 = xfer_to_a(a0);
1637 	s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
1638 	b = new_block(JMP(code));
1639 	if (code == BPF_JGT || code == BPF_JGE) {
1640 		reversed = !reversed;
1641 		b->s.k = 0x80000000;
1642 	}
1643 	if (reversed)
1644 		gen_not(b);
1645 
1646 	sappend(s1, s2);
1647 	sappend(s0, s1);
1648 	sappend(a1->s, s0);
1649 	sappend(a0->s, a1->s);
1650 
1651 	b->stmts = a0->s;
1652 
1653 	free_reg(a0->regno);
1654 	free_reg(a1->regno);
1655 
1656 	/* 'and' together protocol checks */
1657 	if (a0->b) {
1658 		if (a1->b) {
1659 			gen_and(a0->b, tmp = a1->b);
1660 		}
1661 		else
1662 			tmp = a0->b;
1663 	} else
1664 		tmp = a1->b;
1665 
1666 	if (tmp)
1667 		gen_and(tmp, b);
1668 
1669 	return b;
1670 }
1671 
1672 struct arth *
1673 gen_loadlen()
1674 {
1675 	int regno = alloc_reg();
1676 	struct arth *a = (struct arth *)newchunk(sizeof(*a));
1677 	struct slist *s;
1678 
1679 	s = new_stmt(BPF_LD|BPF_LEN);
1680 	s->next = new_stmt(BPF_ST);
1681 	s->next->s.k = regno;
1682 	a->s = s;
1683 	a->regno = regno;
1684 
1685 	return a;
1686 }
1687 
1688 struct arth *
1689 gen_loadi(val)
1690 	int val;
1691 {
1692 	struct arth *a;
1693 	struct slist *s;
1694 	int reg;
1695 
1696 	a = (struct arth *)newchunk(sizeof(*a));
1697 
1698 	reg = alloc_reg();
1699 
1700 	s = new_stmt(BPF_LD|BPF_IMM);
1701 	s->s.k = val;
1702 	s->next = new_stmt(BPF_ST);
1703 	s->next->s.k = reg;
1704 	a->s = s;
1705 	a->regno = reg;
1706 
1707 	return a;
1708 }
1709 
1710 struct arth *
1711 gen_neg(a)
1712 	struct arth *a;
1713 {
1714 	struct slist *s;
1715 
1716 	s = xfer_to_a(a);
1717 	sappend(a->s, s);
1718 	s = new_stmt(BPF_ALU|BPF_NEG);
1719 	s->s.k = 0;
1720 	sappend(a->s, s);
1721 	s = new_stmt(BPF_ST);
1722 	s->s.k = a->regno;
1723 	sappend(a->s, s);
1724 
1725 	return a;
1726 }
1727 
1728 struct arth *
1729 gen_arth(code, a0, a1)
1730 	int code;
1731 	struct arth *a0, *a1;
1732 {
1733 	struct slist *s0, *s1, *s2;
1734 
1735 	s0 = xfer_to_x(a1);
1736 	s1 = xfer_to_a(a0);
1737 	s2 = new_stmt(BPF_ALU|BPF_X|code);
1738 
1739 	sappend(s1, s2);
1740 	sappend(s0, s1);
1741 	sappend(a1->s, s0);
1742 	sappend(a0->s, a1->s);
1743 
1744 	free_reg(a1->regno);
1745 
1746 	s0 = new_stmt(BPF_ST);
1747 	a0->regno = s0->s.k = alloc_reg();
1748 	sappend(a0->s, s0);
1749 
1750 	return a0;
1751 }
1752 
1753 /*
1754  * Here we handle simple allocation of the scratch registers.
1755  * If too many registers are alloc'd, the allocator punts.
1756  */
1757 static int regused[BPF_MEMWORDS];
1758 static int curreg;
1759 
1760 /*
1761  * Return the next free register.
1762  */
1763 static int
1764 alloc_reg()
1765 {
1766 	int n = BPF_MEMWORDS;
1767 
1768 	while (--n >= 0) {
1769 		if (regused[curreg])
1770 			curreg = (curreg + 1) % BPF_MEMWORDS;
1771 		else {
1772 			regused[curreg] = 1;
1773 			return curreg;
1774 		}
1775 	}
1776 	bpf_error("too many registers needed to evaluate expression");
1777 	/* NOTREACHED */
1778 }
1779 
1780 /*
1781  * Return a register to the table so it can
1782  * be used later.
1783  */
1784 static void
1785 free_reg(n)
1786 	int n;
1787 {
1788 	regused[n] = 0;
1789 }
1790 
1791 static struct block *
1792 gen_len(jmp, n)
1793 	int jmp, n;
1794 {
1795 	struct slist *s;
1796 	struct block *b;
1797 
1798 	s = new_stmt(BPF_LD|BPF_LEN);
1799 	b = new_block(JMP(jmp));
1800 	b->stmts = s;
1801 	b->s.k = n;
1802 
1803 	return b;
1804 }
1805 
1806 struct block *
1807 gen_greater(n)
1808 	int n;
1809 {
1810 	return gen_len(BPF_JGE, n);
1811 }
1812 
1813 struct block *
1814 gen_less(n)
1815 	int n;
1816 {
1817 	struct block *b;
1818 
1819 	b = gen_len(BPF_JGT, n);
1820 	gen_not(b);
1821 
1822 	return b;
1823 }
1824 
1825 struct block *
1826 gen_byteop(op, idx, val)
1827 	int op, idx, val;
1828 {
1829 	struct block *b;
1830 	struct slist *s;
1831 
1832 	switch (op) {
1833 	default:
1834 		abort();
1835 
1836 	case '=':
1837 		return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1838 
1839 	case '<':
1840 		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1841 		b->s.code = JMP(BPF_JGE);
1842 		gen_not(b);
1843 		return b;
1844 
1845 	case '>':
1846 		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1847 		b->s.code = JMP(BPF_JGT);
1848 		return b;
1849 
1850 	case '|':
1851 		s = new_stmt(BPF_ALU|BPF_OR|BPF_K);
1852 		break;
1853 
1854 	case '&':
1855 		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
1856 		break;
1857 	}
1858 	s->s.k = val;
1859 	b = new_block(JMP(BPF_JEQ));
1860 	b->stmts = s;
1861 	gen_not(b);
1862 
1863 	return b;
1864 }
1865 
1866 struct block *
1867 gen_broadcast(proto)
1868 	int proto;
1869 {
1870 	bpf_u_int32 hostmask;
1871 	struct block *b0, *b1, *b2;
1872 	static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1873 
1874 	switch (proto) {
1875 
1876 	case Q_DEFAULT:
1877 	case Q_LINK:
1878 		if (linktype == DLT_EN10MB)
1879 			return gen_ehostop(ebroadcast, Q_DST);
1880 		if (linktype == DLT_FDDI)
1881 			return gen_fhostop(ebroadcast, Q_DST);
1882 		bpf_error("not a broadcast link");
1883 		break;
1884 
1885 	case Q_IP:
1886 		b0 = gen_linktype(ETHERTYPE_IP);
1887 		hostmask = ~netmask;
1888 		b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask);
1889 		b2 = gen_mcmp(off_nl + 16, BPF_W,
1890 			      (bpf_int32)(~0 & hostmask), hostmask);
1891 		gen_or(b1, b2);
1892 		gen_and(b0, b2);
1893 		return b2;
1894 	}
1895 	bpf_error("only ether/ip broadcast filters supported");
1896 }
1897 
1898 struct block *
1899 gen_multicast(proto)
1900 	int proto;
1901 {
1902 	register struct block *b0, *b1;
1903 	register struct slist *s;
1904 
1905 	switch (proto) {
1906 
1907 	case Q_DEFAULT:
1908 	case Q_LINK:
1909 		if (linktype == DLT_EN10MB) {
1910 			/* ether[0] & 1 != 0 */
1911 			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1912 			s->s.k = 0;
1913 			b0 = new_block(JMP(BPF_JSET));
1914 			b0->s.k = 1;
1915 			b0->stmts = s;
1916 			return b0;
1917 		}
1918 
1919 		if (linktype == DLT_FDDI) {
1920 			/* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */
1921 			/* fddi[1] & 1 != 0 */
1922 			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1923 			s->s.k = 1;
1924 			b0 = new_block(JMP(BPF_JSET));
1925 			b0->s.k = 1;
1926 			b0->stmts = s;
1927 			return b0;
1928 		}
1929 		/* Link not known to support multicasts */
1930 		break;
1931 
1932 	case Q_IP:
1933 		b0 = gen_linktype(ETHERTYPE_IP);
1934 		b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224);
1935 		b1->s.code = JMP(BPF_JGE);
1936 		gen_and(b0, b1);
1937 		return b1;
1938 	}
1939 	bpf_error("only IP multicast filters supported on ethernet/FDDI");
1940 }
1941 
1942 /*
1943  * generate command for inbound/outbound.  It's here so we can
1944  * make it link-type specific.  'dir' = 0 implies "inbound",
1945  * = 1 implies "outbound".
1946  */
1947 struct block *
1948 gen_inbound(dir)
1949 	int dir;
1950 {
1951 	register struct block *b0;
1952 
1953 	b0 = gen_relation(BPF_JEQ,
1954 			  gen_load(Q_LINK, gen_loadi(0), 1),
1955 			  gen_loadi(0),
1956 			  dir);
1957 	return (b0);
1958 }
1959