1 /* $OpenBSD: gencode.c,v 1.9 1998/07/14 00:14:00 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that: (1) source code distributions 9 * retain the above copyright notice and this paragraph in its entirety, (2) 10 * distributions including binary code include the above copyright notice and 11 * this paragraph in its entirety in the documentation or other materials 12 * provided with the distribution, and (3) all advertising materials mentioning 13 * features or use of this software display the following acknowledgement: 14 * ``This product includes software developed by the University of California, 15 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 16 * the University nor the names of its contributors may be used to endorse 17 * or promote products derived from this software without specific prior 18 * written permission. 19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 20 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 22 */ 23 #ifndef lint 24 static char rcsid[] = 25 "@(#) Header: gencode.c,v 1.81 96/06/19 23:09:09 leres Exp (LBL)"; 26 #endif 27 28 #include <sys/types.h> 29 #include <sys/socket.h> 30 #include <sys/time.h> 31 32 #ifdef __STDC__ 33 struct mbuf; 34 struct rtentry; 35 #endif 36 37 #include <net/if.h> 38 #include <net/bpf.h> 39 40 #include <netinet/in.h> 41 #include <netinet/if_ether.h> 42 43 #include <stdlib.h> 44 #include <memory.h> 45 #include <pcap.h> 46 #include <pcap-namedb.h> 47 #include <setjmp.h> 48 #ifdef __STDC__ 49 #include <stdarg.h> 50 #else 51 #include <varargs.h> 52 #endif 53 54 #ifdef HAVE_OS_PROTO_H 55 #include "os-proto.h" 56 #endif 57 58 #include "pcap-int.h" 59 60 #include "gencode.h" 61 62 #ifndef ETHERTYPE_REVARP 63 #define ETHERTYPE_REVARP 0x8035 64 #endif 65 #ifndef ETHERTYPE_MOPDL 66 #define ETHERTYPE_MOPDL 0x6001 67 #endif 68 #ifndef ETHERTYPE_MOPRC 69 #define ETHERTYPE_MOPRC 0x6002 70 #endif 71 #ifndef ETHERTYPE_DN 72 #define ETHERTYPE_DN 0x6003 73 #endif 74 #ifndef ETHERTYPE_LAT 75 #define ETHERTYPE_LAT 0x6004 76 #endif 77 78 #define JMP(c) ((c)|BPF_JMP|BPF_K) 79 80 /* Locals */ 81 static jmp_buf top_ctx; 82 static pcap_t *bpf_pcap; 83 84 /* XXX */ 85 #ifdef PCAP_FDDIPAD 86 int pcap_fddipad = PCAP_FDDIPAD; 87 #else 88 int pcap_fddipad; 89 #endif 90 91 /* VARARGS */ 92 __dead void 93 #ifdef __STDC__ 94 bpf_error(const char *fmt, ...) 95 #else 96 bpf_error(fmt, va_alist) 97 const char *fmt; 98 va_dcl 99 #endif 100 { 101 va_list ap; 102 103 #ifdef __STDC__ 104 va_start(ap, fmt); 105 #else 106 va_start(ap); 107 #endif 108 if (bpf_pcap != NULL) 109 (void)vsnprintf(pcap_geterr(bpf_pcap), PCAP_ERRBUF_SIZE, 110 fmt, ap); 111 va_end(ap); 112 longjmp(top_ctx, 1); 113 /* NOTREACHED */ 114 } 115 116 static void init_linktype(int); 117 118 static int alloc_reg(void); 119 static void free_reg(int); 120 121 static struct block *root; 122 123 /* 124 * We divy out chunks of memory rather than call malloc each time so 125 * we don't have to worry about leaking memory. It's probably 126 * not a big deal if all this memory was wasted but it this ever 127 * goes into a library that would probably not be a good idea. 128 */ 129 #define NCHUNKS 16 130 #define CHUNK0SIZE 1024 131 struct chunk { 132 u_int n_left; 133 void *m; 134 }; 135 136 static struct chunk chunks[NCHUNKS]; 137 static int cur_chunk; 138 139 static void *newchunk(u_int); 140 static void freechunks(void); 141 static __inline struct block *new_block(int); 142 static __inline struct slist *new_stmt(int); 143 static struct block *gen_retblk(int); 144 static __inline void syntax(void); 145 146 static void backpatch(struct block *, struct block *); 147 static void merge(struct block *, struct block *); 148 static struct block *gen_cmp(u_int, u_int, bpf_int32); 149 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32); 150 static struct block *gen_bcmp(u_int, u_int, u_char *); 151 static struct block *gen_uncond(int); 152 static __inline struct block *gen_true(void); 153 static __inline struct block *gen_false(void); 154 static struct block *gen_linktype(int); 155 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int); 156 static struct block *gen_ehostop(u_char *, int); 157 static struct block *gen_fhostop(u_char *, int); 158 static struct block *gen_dnhostop(bpf_u_int32, int, u_int); 159 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int); 160 static struct block *gen_gateway(u_char *, bpf_u_int32 **, int, int); 161 static struct block *gen_ipfrag(void); 162 static struct block *gen_portatom(int, bpf_int32); 163 struct block *gen_portop(int, int, int); 164 static struct block *gen_port(int, int, int); 165 static int lookup_proto(char *, int); 166 static struct block *gen_proto(int, int, int); 167 static bpf_u_int32 net_mask(bpf_u_int32 *); 168 static struct slist *xfer_to_x(struct arth *); 169 static struct slist *xfer_to_a(struct arth *); 170 static struct block *gen_len(int, int); 171 172 static void * 173 newchunk(n) 174 u_int n; 175 { 176 struct chunk *cp; 177 int k, size; 178 179 /* XXX Round up to nearest long. */ 180 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1); 181 182 cp = &chunks[cur_chunk]; 183 if (n > cp->n_left) { 184 ++cp, k = ++cur_chunk; 185 if (k >= NCHUNKS) 186 bpf_error("out of memory"); 187 size = CHUNK0SIZE << k; 188 cp->m = (void *)malloc(size); 189 memset((char *)cp->m, 0, size); 190 cp->n_left = size; 191 if (n > size) 192 bpf_error("out of memory"); 193 } 194 cp->n_left -= n; 195 return (void *)((char *)cp->m + cp->n_left); 196 } 197 198 static void 199 freechunks() 200 { 201 int i; 202 203 cur_chunk = 0; 204 for (i = 0; i < NCHUNKS; ++i) 205 if (chunks[i].m != NULL) { 206 free(chunks[i].m); 207 chunks[i].m = NULL; 208 } 209 } 210 211 /* 212 * A strdup whose allocations are freed after code generation is over. 213 */ 214 char * 215 sdup(s) 216 char *s; 217 { 218 int n = strlen(s) + 1; 219 char *cp = newchunk(n); 220 strcpy(cp, s); 221 return (cp); 222 } 223 224 static __inline struct block * 225 new_block(code) 226 int code; 227 { 228 struct block *p; 229 230 p = (struct block *)newchunk(sizeof(*p)); 231 p->s.code = code; 232 p->head = p; 233 234 return p; 235 } 236 237 static __inline struct slist * 238 new_stmt(code) 239 int code; 240 { 241 struct slist *p; 242 243 p = (struct slist *)newchunk(sizeof(*p)); 244 p->s.code = code; 245 246 return p; 247 } 248 249 static struct block * 250 gen_retblk(v) 251 int v; 252 { 253 struct block *b = new_block(BPF_RET|BPF_K); 254 255 b->s.k = v; 256 return b; 257 } 258 259 static __inline void 260 syntax() 261 { 262 bpf_error("syntax error in filter expression"); 263 } 264 265 static bpf_u_int32 netmask; 266 static int snaplen; 267 268 int 269 pcap_compile(pcap_t *p, struct bpf_program *program, 270 char *buf, int optimize, bpf_u_int32 mask) 271 { 272 extern int n_errors; 273 int len; 274 275 n_errors = 0; 276 root = NULL; 277 bpf_pcap = p; 278 if (setjmp(top_ctx)) { 279 freechunks(); 280 return (-1); 281 } 282 283 netmask = mask; 284 snaplen = pcap_snapshot(p); 285 286 lex_init(buf ? buf : ""); 287 init_linktype(pcap_datalink(p)); 288 (void)pcap_parse(); 289 290 if (n_errors) 291 syntax(); 292 293 if (root == NULL) 294 root = gen_retblk(snaplen); 295 296 if (optimize) { 297 bpf_optimize(&root); 298 if (root == NULL || 299 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0)) 300 bpf_error("expression rejects all packets"); 301 } 302 program->bf_insns = icode_to_fcode(root, &len); 303 program->bf_len = len; 304 305 freechunks(); 306 return (0); 307 } 308 309 /* 310 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates 311 * which of the jt and jf fields has been resolved and which is a pointer 312 * back to another unresolved block (or nil). At least one of the fields 313 * in each block is already resolved. 314 */ 315 static void 316 backpatch(list, target) 317 struct block *list, *target; 318 { 319 struct block *next; 320 321 while (list) { 322 if (!list->sense) { 323 next = JT(list); 324 JT(list) = target; 325 } else { 326 next = JF(list); 327 JF(list) = target; 328 } 329 list = next; 330 } 331 } 332 333 /* 334 * Merge the lists in b0 and b1, using the 'sense' field to indicate 335 * which of jt and jf is the link. 336 */ 337 static void 338 merge(b0, b1) 339 struct block *b0, *b1; 340 { 341 register struct block **p = &b0; 342 343 /* Find end of list. */ 344 while (*p) 345 p = !((*p)->sense) ? &JT(*p) : &JF(*p); 346 347 /* Concatenate the lists. */ 348 *p = b1; 349 } 350 351 void 352 finish_parse(p) 353 struct block *p; 354 { 355 backpatch(p, gen_retblk(snaplen)); 356 p->sense = !p->sense; 357 backpatch(p, gen_retblk(0)); 358 root = p->head; 359 } 360 361 void 362 gen_and(b0, b1) 363 struct block *b0, *b1; 364 { 365 backpatch(b0, b1->head); 366 b0->sense = !b0->sense; 367 b1->sense = !b1->sense; 368 merge(b1, b0); 369 b1->sense = !b1->sense; 370 b1->head = b0->head; 371 } 372 373 void 374 gen_or(b0, b1) 375 struct block *b0, *b1; 376 { 377 b0->sense = !b0->sense; 378 backpatch(b0, b1->head); 379 b0->sense = !b0->sense; 380 merge(b1, b0); 381 b1->head = b0->head; 382 } 383 384 void 385 gen_not(b) 386 struct block *b; 387 { 388 b->sense = !b->sense; 389 } 390 391 static struct block * 392 gen_cmp(offset, size, v) 393 u_int offset, size; 394 bpf_int32 v; 395 { 396 struct slist *s; 397 struct block *b; 398 399 s = new_stmt(BPF_LD|BPF_ABS|size); 400 s->s.k = offset; 401 402 b = new_block(JMP(BPF_JEQ)); 403 b->stmts = s; 404 b->s.k = v; 405 406 return b; 407 } 408 409 static struct block * 410 gen_mcmp(offset, size, v, mask) 411 u_int offset, size; 412 bpf_int32 v; 413 bpf_u_int32 mask; 414 { 415 struct block *b = gen_cmp(offset, size, v); 416 struct slist *s; 417 418 if (mask != 0xffffffff) { 419 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 420 s->s.k = mask; 421 b->stmts->next = s; 422 } 423 return b; 424 } 425 426 static struct block * 427 gen_bcmp(offset, size, v) 428 u_int offset, size; 429 u_char *v; 430 { 431 struct block *b, *tmp; 432 433 b = NULL; 434 while (size >= 4) { 435 u_char *p = &v[size - 4]; 436 bpf_int32 w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 437 tmp = gen_cmp(offset + size - 4, BPF_W, w); 438 if (b != NULL) 439 gen_and(b, tmp); 440 b = tmp; 441 size -= 4; 442 } 443 while (size >= 2) { 444 u_char *p = &v[size - 2]; 445 bpf_int32 w = (p[0] << 8) | p[1]; 446 tmp = gen_cmp(offset + size - 2, BPF_H, w); 447 if (b != NULL) 448 gen_and(b, tmp); 449 b = tmp; 450 size -= 2; 451 } 452 if (size > 0) { 453 tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]); 454 if (b != NULL) 455 gen_and(b, tmp); 456 b = tmp; 457 } 458 return b; 459 } 460 461 /* 462 * Various code constructs need to know the layout of the data link 463 * layer. These variables give the necessary offsets. off_linktype 464 * is set to -1 for no encapsulation, in which case, IP is assumed. 465 */ 466 static u_int off_linktype; 467 static u_int off_nl; 468 static int linktype; 469 470 static void 471 init_linktype(type) 472 int type; 473 { 474 linktype = type; 475 476 switch (type) { 477 478 case DLT_EN10MB: 479 off_linktype = 12; 480 off_nl = 14; 481 return; 482 483 case DLT_SLIP: 484 /* 485 * SLIP doesn't have a link level type. The 16 byte 486 * header is hacked into our SLIP driver. 487 */ 488 off_linktype = -1; 489 off_nl = 16; 490 return; 491 492 case DLT_NULL: 493 off_linktype = -1; 494 off_nl = 0; 495 return; 496 497 case DLT_LOOP: 498 off_linktype = -1; 499 off_nl = 4; 500 return; 501 502 case DLT_ENC: 503 off_linktype = -1; 504 off_nl = 12; 505 return; 506 507 case DLT_PPP: 508 off_linktype = 2; 509 off_nl = 4; 510 return; 511 512 case DLT_FDDI: 513 /* 514 * FDDI doesn't really have a link-level type field. 515 * We assume that SSAP = SNAP is being used and pick 516 * out the encapsulated Ethernet type. 517 */ 518 off_linktype = 19; 519 #ifdef PCAP_FDDIPAD 520 off_linktype += pcap_fddipad; 521 #endif 522 off_nl = 21; 523 #ifdef PCAP_FDDIPAD 524 off_nl += pcap_fddipad; 525 #endif 526 return; 527 528 case DLT_IEEE802: 529 off_linktype = 20; 530 off_nl = 22; 531 return; 532 533 case DLT_ATM_RFC1483: 534 /* 535 * assume routed, non-ISO PDUs 536 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00) 537 */ 538 off_linktype = 6; 539 off_nl = 8; 540 return; 541 } 542 bpf_error("unknown data link type 0x%x", linktype); 543 /* NOTREACHED */ 544 } 545 546 static struct block * 547 gen_uncond(rsense) 548 int rsense; 549 { 550 struct block *b; 551 struct slist *s; 552 553 s = new_stmt(BPF_LD|BPF_IMM); 554 s->s.k = !rsense; 555 b = new_block(JMP(BPF_JEQ)); 556 b->stmts = s; 557 558 return b; 559 } 560 561 static __inline struct block * 562 gen_true() 563 { 564 return gen_uncond(1); 565 } 566 567 static __inline struct block * 568 gen_false() 569 { 570 return gen_uncond(0); 571 } 572 573 static struct block * 574 gen_linktype(proto) 575 int proto; 576 { 577 switch (linktype) { 578 case DLT_SLIP: 579 if (proto == ETHERTYPE_IP) 580 return gen_true(); 581 else 582 return gen_false(); 583 584 case DLT_PPP: 585 if (proto == ETHERTYPE_IP) 586 proto = 0x0021; /* XXX - need ppp.h defs */ 587 break; 588 589 case DLT_ENC: 590 case DLT_LOOP: 591 case DLT_NULL: 592 /* XXX */ 593 if (proto == ETHERTYPE_IP) 594 return (gen_cmp(0, BPF_W, htonl((bpf_int32) AF_INET))); 595 else 596 return gen_false(); 597 } 598 return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto); 599 } 600 601 static struct block * 602 gen_hostop(addr, mask, dir, proto, src_off, dst_off) 603 bpf_u_int32 addr; 604 bpf_u_int32 mask; 605 int dir, proto; 606 u_int src_off, dst_off; 607 { 608 struct block *b0, *b1; 609 u_int offset; 610 611 switch (dir) { 612 613 case Q_SRC: 614 offset = src_off; 615 break; 616 617 case Q_DST: 618 offset = dst_off; 619 break; 620 621 case Q_AND: 622 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 623 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 624 gen_and(b0, b1); 625 return b1; 626 627 case Q_OR: 628 case Q_DEFAULT: 629 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 630 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 631 gen_or(b0, b1); 632 return b1; 633 634 default: 635 abort(); 636 } 637 b0 = gen_linktype(proto); 638 b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask); 639 gen_and(b0, b1); 640 return b1; 641 } 642 643 static struct block * 644 gen_ehostop(eaddr, dir) 645 u_char *eaddr; 646 int dir; 647 { 648 struct block *b0, *b1; 649 650 switch (dir) { 651 case Q_SRC: 652 return gen_bcmp(6, 6, eaddr); 653 654 case Q_DST: 655 return gen_bcmp(0, 6, eaddr); 656 657 case Q_AND: 658 b0 = gen_ehostop(eaddr, Q_SRC); 659 b1 = gen_ehostop(eaddr, Q_DST); 660 gen_and(b0, b1); 661 return b1; 662 663 case Q_DEFAULT: 664 case Q_OR: 665 b0 = gen_ehostop(eaddr, Q_SRC); 666 b1 = gen_ehostop(eaddr, Q_DST); 667 gen_or(b0, b1); 668 return b1; 669 } 670 abort(); 671 /* NOTREACHED */ 672 } 673 674 /* 675 * Like gen_ehostop, but for DLT_FDDI 676 */ 677 static struct block * 678 gen_fhostop(eaddr, dir) 679 u_char *eaddr; 680 int dir; 681 { 682 struct block *b0, *b1; 683 684 switch (dir) { 685 case Q_SRC: 686 #ifdef PCAP_FDDIPAD 687 return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr); 688 #else 689 return gen_bcmp(6 + 1, 6, eaddr); 690 #endif 691 692 case Q_DST: 693 #ifdef PCAP_FDDIPAD 694 return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr); 695 #else 696 return gen_bcmp(0 + 1, 6, eaddr); 697 #endif 698 699 case Q_AND: 700 b0 = gen_fhostop(eaddr, Q_SRC); 701 b1 = gen_fhostop(eaddr, Q_DST); 702 gen_and(b0, b1); 703 return b1; 704 705 case Q_DEFAULT: 706 case Q_OR: 707 b0 = gen_fhostop(eaddr, Q_SRC); 708 b1 = gen_fhostop(eaddr, Q_DST); 709 gen_or(b0, b1); 710 return b1; 711 } 712 abort(); 713 /* NOTREACHED */ 714 } 715 716 /* 717 * This is quite tricky because there may be pad bytes in front of the 718 * DECNET header, and then there are two possible data packet formats that 719 * carry both src and dst addresses, plus 5 packet types in a format that 720 * carries only the src node, plus 2 types that use a different format and 721 * also carry just the src node. 722 * 723 * Yuck. 724 * 725 * Instead of doing those all right, we just look for data packets with 726 * 0 or 1 bytes of padding. If you want to look at other packets, that 727 * will require a lot more hacking. 728 * 729 * To add support for filtering on DECNET "areas" (network numbers) 730 * one would want to add a "mask" argument to this routine. That would 731 * make the filter even more inefficient, although one could be clever 732 * and not generate masking instructions if the mask is 0xFFFF. 733 */ 734 static struct block * 735 gen_dnhostop(addr, dir, base_off) 736 bpf_u_int32 addr; 737 int dir; 738 u_int base_off; 739 { 740 struct block *b0, *b1, *b2, *tmp; 741 u_int offset_lh; /* offset if long header is received */ 742 u_int offset_sh; /* offset if short header is received */ 743 744 switch (dir) { 745 746 case Q_DST: 747 offset_sh = 1; /* follows flags */ 748 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */ 749 break; 750 751 case Q_SRC: 752 offset_sh = 3; /* follows flags, dstnode */ 753 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */ 754 break; 755 756 case Q_AND: 757 /* Inefficient because we do our Calvinball dance twice */ 758 b0 = gen_dnhostop(addr, Q_SRC, base_off); 759 b1 = gen_dnhostop(addr, Q_DST, base_off); 760 gen_and(b0, b1); 761 return b1; 762 763 case Q_OR: 764 case Q_DEFAULT: 765 /* Inefficient because we do our Calvinball dance twice */ 766 b0 = gen_dnhostop(addr, Q_SRC, base_off); 767 b1 = gen_dnhostop(addr, Q_DST, base_off); 768 gen_or(b0, b1); 769 return b1; 770 771 default: 772 abort(); 773 } 774 b0 = gen_linktype(ETHERTYPE_DN); 775 /* Check for pad = 1, long header case */ 776 tmp = gen_mcmp(base_off + 2, BPF_H, 777 (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF)); 778 b1 = gen_cmp(base_off + 2 + 1 + offset_lh, 779 BPF_H, (bpf_int32)ntohs(addr)); 780 gen_and(tmp, b1); 781 /* Check for pad = 0, long header case */ 782 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7); 783 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr)); 784 gen_and(tmp, b2); 785 gen_or(b2, b1); 786 /* Check for pad = 1, short header case */ 787 tmp = gen_mcmp(base_off + 2, BPF_H, 788 (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF)); 789 b2 = gen_cmp(base_off + 2 + 1 + offset_sh, 790 BPF_H, (bpf_int32)ntohs(addr)); 791 gen_and(tmp, b2); 792 gen_or(b2, b1); 793 /* Check for pad = 0, short header case */ 794 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7); 795 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr)); 796 gen_and(tmp, b2); 797 gen_or(b2, b1); 798 799 /* Combine with test for linktype */ 800 gen_and(b0, b1); 801 return b1; 802 } 803 804 static struct block * 805 gen_host(addr, mask, proto, dir) 806 bpf_u_int32 addr; 807 bpf_u_int32 mask; 808 int proto; 809 int dir; 810 { 811 struct block *b0, *b1; 812 813 switch (proto) { 814 815 case Q_DEFAULT: 816 b0 = gen_host(addr, mask, Q_IP, dir); 817 b1 = gen_host(addr, mask, Q_ARP, dir); 818 gen_or(b0, b1); 819 b0 = gen_host(addr, mask, Q_RARP, dir); 820 gen_or(b1, b0); 821 return b0; 822 823 case Q_IP: 824 return gen_hostop(addr, mask, dir, ETHERTYPE_IP, 825 off_nl + 12, off_nl + 16); 826 827 case Q_RARP: 828 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP, 829 off_nl + 14, off_nl + 24); 830 831 case Q_ARP: 832 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP, 833 off_nl + 14, off_nl + 24); 834 835 case Q_TCP: 836 bpf_error("'tcp' modifier applied to host"); 837 838 case Q_UDP: 839 bpf_error("'udp' modifier applied to host"); 840 841 case Q_ICMP: 842 bpf_error("'icmp' modifier applied to host"); 843 844 case Q_IGMP: 845 bpf_error("'igmp' modifier applied to host"); 846 847 case Q_DECNET: 848 return gen_dnhostop(addr, dir, off_nl); 849 850 case Q_LAT: 851 bpf_error("LAT host filtering not implemented"); 852 853 case Q_MOPDL: 854 bpf_error("MOPDL host filtering not implemented"); 855 856 case Q_MOPRC: 857 bpf_error("MOPRC host filtering not implemented"); 858 859 default: 860 abort(); 861 } 862 /* NOTREACHED */ 863 } 864 865 static struct block * 866 gen_gateway(eaddr, alist, proto, dir) 867 u_char *eaddr; 868 bpf_u_int32 **alist; 869 int proto; 870 int dir; 871 { 872 struct block *b0, *b1, *tmp; 873 874 if (dir != 0) 875 bpf_error("direction applied to 'gateway'"); 876 877 switch (proto) { 878 case Q_DEFAULT: 879 case Q_IP: 880 case Q_ARP: 881 case Q_RARP: 882 if (linktype == DLT_EN10MB) 883 b0 = gen_ehostop(eaddr, Q_OR); 884 else if (linktype == DLT_FDDI) 885 b0 = gen_fhostop(eaddr, Q_OR); 886 else 887 bpf_error( 888 "'gateway' supported only on ethernet or FDDI"); 889 890 b1 = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 891 while (*alist) { 892 tmp = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 893 gen_or(b1, tmp); 894 b1 = tmp; 895 } 896 gen_not(b1); 897 gen_and(b0, b1); 898 return b1; 899 } 900 bpf_error("illegal modifier of 'gateway'"); 901 /* NOTREACHED */ 902 } 903 904 struct block * 905 gen_proto_abbrev(proto) 906 int proto; 907 { 908 struct block *b0, *b1; 909 910 switch (proto) { 911 912 case Q_TCP: 913 b0 = gen_linktype(ETHERTYPE_IP); 914 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP); 915 gen_and(b0, b1); 916 break; 917 918 case Q_UDP: 919 b0 = gen_linktype(ETHERTYPE_IP); 920 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP); 921 gen_and(b0, b1); 922 break; 923 924 case Q_ICMP: 925 b0 = gen_linktype(ETHERTYPE_IP); 926 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP); 927 gen_and(b0, b1); 928 break; 929 930 case Q_IGMP: 931 b0 = gen_linktype(ETHERTYPE_IP); 932 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2); 933 gen_and(b0, b1); 934 break; 935 936 case Q_IP: 937 b1 = gen_linktype(ETHERTYPE_IP); 938 break; 939 940 case Q_ARP: 941 b1 = gen_linktype(ETHERTYPE_ARP); 942 break; 943 944 case Q_RARP: 945 b1 = gen_linktype(ETHERTYPE_REVARP); 946 break; 947 948 case Q_LINK: 949 bpf_error("link layer applied in wrong context"); 950 951 case Q_DECNET: 952 b1 = gen_linktype(ETHERTYPE_DN); 953 break; 954 955 case Q_LAT: 956 b1 = gen_linktype(ETHERTYPE_LAT); 957 break; 958 959 case Q_MOPDL: 960 b1 = gen_linktype(ETHERTYPE_MOPDL); 961 break; 962 963 case Q_MOPRC: 964 b1 = gen_linktype(ETHERTYPE_MOPRC); 965 break; 966 967 default: 968 abort(); 969 } 970 return b1; 971 } 972 973 static struct block * 974 gen_ipfrag() 975 { 976 struct slist *s; 977 struct block *b; 978 979 /* not ip frag */ 980 s = new_stmt(BPF_LD|BPF_H|BPF_ABS); 981 s->s.k = off_nl + 6; 982 b = new_block(JMP(BPF_JSET)); 983 b->s.k = 0x1fff; 984 b->stmts = s; 985 gen_not(b); 986 987 return b; 988 } 989 990 static struct block * 991 gen_portatom(off, v) 992 int off; 993 bpf_int32 v; 994 { 995 struct slist *s; 996 struct block *b; 997 998 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 999 s->s.k = off_nl; 1000 1001 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H); 1002 s->next->s.k = off_nl + off; 1003 1004 b = new_block(JMP(BPF_JEQ)); 1005 b->stmts = s; 1006 b->s.k = v; 1007 1008 return b; 1009 } 1010 1011 struct block * 1012 gen_portop(port, proto, dir) 1013 int port, proto, dir; 1014 { 1015 struct block *b0, *b1, *tmp; 1016 1017 /* ip proto 'proto' */ 1018 tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto); 1019 b0 = gen_ipfrag(); 1020 gen_and(tmp, b0); 1021 1022 switch (dir) { 1023 case Q_SRC: 1024 b1 = gen_portatom(0, (bpf_int32)port); 1025 break; 1026 1027 case Q_DST: 1028 b1 = gen_portatom(2, (bpf_int32)port); 1029 break; 1030 1031 case Q_OR: 1032 case Q_DEFAULT: 1033 tmp = gen_portatom(0, (bpf_int32)port); 1034 b1 = gen_portatom(2, (bpf_int32)port); 1035 gen_or(tmp, b1); 1036 break; 1037 1038 case Q_AND: 1039 tmp = gen_portatom(0, (bpf_int32)port); 1040 b1 = gen_portatom(2, (bpf_int32)port); 1041 gen_and(tmp, b1); 1042 break; 1043 1044 default: 1045 abort(); 1046 } 1047 gen_and(b0, b1); 1048 1049 return b1; 1050 } 1051 1052 static struct block * 1053 gen_port(port, ip_proto, dir) 1054 int port; 1055 int ip_proto; 1056 int dir; 1057 { 1058 struct block *b0, *b1, *tmp; 1059 1060 /* ether proto ip */ 1061 b0 = gen_linktype(ETHERTYPE_IP); 1062 1063 switch (ip_proto) { 1064 case IPPROTO_UDP: 1065 case IPPROTO_TCP: 1066 b1 = gen_portop(port, ip_proto, dir); 1067 break; 1068 1069 case PROTO_UNDEF: 1070 tmp = gen_portop(port, IPPROTO_TCP, dir); 1071 b1 = gen_portop(port, IPPROTO_UDP, dir); 1072 gen_or(tmp, b1); 1073 break; 1074 1075 default: 1076 abort(); 1077 } 1078 gen_and(b0, b1); 1079 return b1; 1080 } 1081 1082 static int 1083 lookup_proto(name, proto) 1084 char *name; 1085 int proto; 1086 { 1087 int v; 1088 1089 switch (proto) { 1090 case Q_DEFAULT: 1091 case Q_IP: 1092 v = pcap_nametoproto(name); 1093 if (v == PROTO_UNDEF) 1094 bpf_error("unknown ip proto '%s'", name); 1095 break; 1096 1097 case Q_LINK: 1098 /* XXX should look up h/w protocol type based on linktype */ 1099 v = pcap_nametoeproto(name); 1100 if (v == PROTO_UNDEF) 1101 bpf_error("unknown ether proto '%s'", name); 1102 break; 1103 1104 default: 1105 v = PROTO_UNDEF; 1106 break; 1107 } 1108 return v; 1109 } 1110 1111 static struct block * 1112 gen_proto(v, proto, dir) 1113 int v; 1114 int proto; 1115 int dir; 1116 { 1117 struct block *b0, *b1; 1118 1119 if (dir != Q_DEFAULT) 1120 bpf_error("direction applied to 'proto'"); 1121 1122 switch (proto) { 1123 case Q_DEFAULT: 1124 case Q_IP: 1125 b0 = gen_linktype(ETHERTYPE_IP); 1126 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v); 1127 gen_and(b0, b1); 1128 return b1; 1129 1130 case Q_ARP: 1131 bpf_error("arp does not encapsulate another protocol"); 1132 /* NOTREACHED */ 1133 1134 case Q_RARP: 1135 bpf_error("rarp does not encapsulate another protocol"); 1136 /* NOTREACHED */ 1137 1138 case Q_DECNET: 1139 bpf_error("decnet encapsulation is not specifiable"); 1140 /* NOTREACHED */ 1141 1142 case Q_LAT: 1143 bpf_error("lat does not encapsulate another protocol"); 1144 /* NOTREACHED */ 1145 1146 case Q_MOPRC: 1147 bpf_error("moprc does not encapsulate another protocol"); 1148 /* NOTREACHED */ 1149 1150 case Q_MOPDL: 1151 bpf_error("mopdl does not encapsulate another protocol"); 1152 /* NOTREACHED */ 1153 1154 case Q_LINK: 1155 return gen_linktype(v); 1156 1157 case Q_UDP: 1158 bpf_error("'udp proto' is bogus"); 1159 /* NOTREACHED */ 1160 1161 case Q_TCP: 1162 bpf_error("'tcp proto' is bogus"); 1163 /* NOTREACHED */ 1164 1165 case Q_ICMP: 1166 bpf_error("'icmp proto' is bogus"); 1167 /* NOTREACHED */ 1168 1169 case Q_IGMP: 1170 bpf_error("'igmp proto' is bogus"); 1171 /* NOTREACHED */ 1172 1173 default: 1174 abort(); 1175 /* NOTREACHED */ 1176 } 1177 /* NOTREACHED */ 1178 } 1179 1180 /* 1181 * Left justify 'addr' and return its resulting network mask. 1182 */ 1183 static bpf_u_int32 1184 net_mask(addr) 1185 bpf_u_int32 *addr; 1186 { 1187 register bpf_u_int32 m = 0xffffffff; 1188 1189 if (*addr) 1190 while ((*addr & 0xff000000) == 0) 1191 *addr <<= 8, m <<= 8; 1192 1193 return m; 1194 } 1195 1196 struct block * 1197 gen_scode(name, q) 1198 char *name; 1199 struct qual q; 1200 { 1201 int proto = q.proto; 1202 int dir = q.dir; 1203 u_char *eaddr; 1204 bpf_u_int32 mask, addr, **alist; 1205 struct block *b, *tmp; 1206 int port, real_proto; 1207 1208 switch (q.addr) { 1209 1210 case Q_NET: 1211 addr = pcap_nametonetaddr(name); 1212 if (addr == 0) 1213 bpf_error("unknown network '%s'", name); 1214 mask = net_mask(&addr); 1215 return gen_host(addr, mask, proto, dir); 1216 1217 case Q_DEFAULT: 1218 case Q_HOST: 1219 if (proto == Q_LINK) { 1220 switch (linktype) { 1221 1222 case DLT_EN10MB: 1223 eaddr = pcap_ether_hostton(name); 1224 if (eaddr == NULL) 1225 bpf_error( 1226 "unknown ether host '%s'", name); 1227 return gen_ehostop(eaddr, dir); 1228 1229 case DLT_FDDI: 1230 eaddr = pcap_ether_hostton(name); 1231 if (eaddr == NULL) 1232 bpf_error( 1233 "unknown FDDI host '%s'", name); 1234 return gen_fhostop(eaddr, dir); 1235 1236 default: 1237 bpf_error( 1238 "only ethernet/FDDI supports link-level host name"); 1239 break; 1240 } 1241 } else if (proto == Q_DECNET) { 1242 unsigned short dn_addr = __pcap_nametodnaddr(name); 1243 /* 1244 * I don't think DECNET hosts can be multihomed, so 1245 * there is no need to build up a list of addresses 1246 */ 1247 return (gen_host(dn_addr, 0, proto, dir)); 1248 } else { 1249 alist = pcap_nametoaddr(name); 1250 if (alist == NULL || *alist == NULL) 1251 bpf_error("unknown host '%s'", name); 1252 b = gen_host(**alist++, 0xffffffffL, proto, dir); 1253 while (*alist) { 1254 tmp = gen_host(**alist++, 0xffffffffL, 1255 proto, dir); 1256 gen_or(b, tmp); 1257 b = tmp; 1258 } 1259 return b; 1260 } 1261 1262 case Q_PORT: 1263 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP) 1264 bpf_error("illegal qualifier of 'port'"); 1265 if (pcap_nametoport(name, &port, &real_proto) == 0) 1266 bpf_error("unknown port '%s'", name); 1267 if (proto == Q_UDP) { 1268 if (real_proto == IPPROTO_TCP) 1269 bpf_error("port '%s' is tcp", name); 1270 else 1271 /* override PROTO_UNDEF */ 1272 real_proto = IPPROTO_UDP; 1273 } 1274 if (proto == Q_TCP) { 1275 if (real_proto == IPPROTO_UDP) 1276 bpf_error("port '%s' is udp", name); 1277 else 1278 /* override PROTO_UNDEF */ 1279 real_proto = IPPROTO_TCP; 1280 } 1281 return gen_port(port, real_proto, dir); 1282 1283 case Q_GATEWAY: 1284 eaddr = pcap_ether_hostton(name); 1285 if (eaddr == NULL) 1286 bpf_error("unknown ether host: %s", name); 1287 1288 alist = pcap_nametoaddr(name); 1289 if (alist == NULL || *alist == NULL) 1290 bpf_error("unknown host '%s'", name); 1291 return gen_gateway(eaddr, alist, proto, dir); 1292 1293 case Q_PROTO: 1294 real_proto = lookup_proto(name, proto); 1295 if (real_proto >= 0) 1296 return gen_proto(real_proto, proto, dir); 1297 else 1298 bpf_error("unknown protocol: %s", name); 1299 1300 case Q_UNDEF: 1301 syntax(); 1302 /* NOTREACHED */ 1303 } 1304 abort(); 1305 /* NOTREACHED */ 1306 } 1307 1308 struct block * 1309 gen_ncode(v, q) 1310 bpf_u_int32 v; 1311 struct qual q; 1312 { 1313 bpf_u_int32 mask; 1314 int proto = q.proto; 1315 int dir = q.dir; 1316 1317 switch (q.addr) { 1318 1319 case Q_DEFAULT: 1320 case Q_HOST: 1321 case Q_NET: 1322 if (proto == Q_DECNET) 1323 return gen_host(v, 0, proto, dir); 1324 else if (proto == Q_LINK) { 1325 bpf_error("illegal link layer address"); 1326 } else { 1327 mask = net_mask(&v); 1328 return gen_host(v, mask, proto, dir); 1329 } 1330 1331 case Q_PORT: 1332 if (proto == Q_UDP) 1333 proto = IPPROTO_UDP; 1334 else if (proto == Q_TCP) 1335 proto = IPPROTO_TCP; 1336 else if (proto == Q_DEFAULT) 1337 proto = PROTO_UNDEF; 1338 else 1339 bpf_error("illegal qualifier of 'port'"); 1340 1341 return gen_port((int)v, proto, dir); 1342 1343 case Q_GATEWAY: 1344 bpf_error("'gateway' requires a name"); 1345 /* NOTREACHED */ 1346 1347 case Q_PROTO: 1348 return gen_proto((int)v, proto, dir); 1349 1350 case Q_UNDEF: 1351 syntax(); 1352 /* NOTREACHED */ 1353 1354 default: 1355 abort(); 1356 /* NOTREACHED */ 1357 } 1358 /* NOTREACHED */ 1359 } 1360 1361 struct block * 1362 gen_ecode(eaddr, q) 1363 u_char *eaddr; 1364 struct qual q; 1365 { 1366 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) { 1367 if (linktype == DLT_EN10MB) 1368 return gen_ehostop(eaddr, (int)q.dir); 1369 if (linktype == DLT_FDDI) 1370 return gen_fhostop(eaddr, (int)q.dir); 1371 } 1372 bpf_error("ethernet address used in non-ether expression"); 1373 /* NOTREACHED */ 1374 } 1375 1376 void 1377 sappend(s0, s1) 1378 struct slist *s0, *s1; 1379 { 1380 /* 1381 * This is definitely not the best way to do this, but the 1382 * lists will rarely get long. 1383 */ 1384 while (s0->next) 1385 s0 = s0->next; 1386 s0->next = s1; 1387 } 1388 1389 static struct slist * 1390 xfer_to_x(a) 1391 struct arth *a; 1392 { 1393 struct slist *s; 1394 1395 s = new_stmt(BPF_LDX|BPF_MEM); 1396 s->s.k = a->regno; 1397 return s; 1398 } 1399 1400 static struct slist * 1401 xfer_to_a(a) 1402 struct arth *a; 1403 { 1404 struct slist *s; 1405 1406 s = new_stmt(BPF_LD|BPF_MEM); 1407 s->s.k = a->regno; 1408 return s; 1409 } 1410 1411 struct arth * 1412 gen_load(proto, index, size) 1413 int proto; 1414 struct arth *index; 1415 int size; 1416 { 1417 struct slist *s, *tmp; 1418 struct block *b; 1419 int regno = alloc_reg(); 1420 1421 free_reg(index->regno); 1422 switch (size) { 1423 1424 default: 1425 bpf_error("data size must be 1, 2, or 4"); 1426 1427 case 1: 1428 size = BPF_B; 1429 break; 1430 1431 case 2: 1432 size = BPF_H; 1433 break; 1434 1435 case 4: 1436 size = BPF_W; 1437 break; 1438 } 1439 switch (proto) { 1440 default: 1441 bpf_error("unsupported index operation"); 1442 1443 case Q_LINK: 1444 s = xfer_to_x(index); 1445 tmp = new_stmt(BPF_LD|BPF_IND|size); 1446 sappend(s, tmp); 1447 sappend(index->s, s); 1448 break; 1449 1450 case Q_IP: 1451 case Q_ARP: 1452 case Q_RARP: 1453 case Q_DECNET: 1454 case Q_LAT: 1455 case Q_MOPRC: 1456 case Q_MOPDL: 1457 /* XXX Note that we assume a fixed link link header here. */ 1458 s = xfer_to_x(index); 1459 tmp = new_stmt(BPF_LD|BPF_IND|size); 1460 tmp->s.k = off_nl; 1461 sappend(s, tmp); 1462 sappend(index->s, s); 1463 1464 b = gen_proto_abbrev(proto); 1465 if (index->b) 1466 gen_and(index->b, b); 1467 index->b = b; 1468 break; 1469 1470 case Q_TCP: 1471 case Q_UDP: 1472 case Q_ICMP: 1473 case Q_IGMP: 1474 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 1475 s->s.k = off_nl; 1476 sappend(s, xfer_to_a(index)); 1477 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X)); 1478 sappend(s, new_stmt(BPF_MISC|BPF_TAX)); 1479 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size)); 1480 tmp->s.k = off_nl; 1481 sappend(index->s, s); 1482 1483 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag()); 1484 if (index->b) 1485 gen_and(index->b, b); 1486 index->b = b; 1487 break; 1488 } 1489 index->regno = regno; 1490 s = new_stmt(BPF_ST); 1491 s->s.k = regno; 1492 sappend(index->s, s); 1493 1494 return index; 1495 } 1496 1497 struct block * 1498 gen_relation(code, a0, a1, reversed) 1499 int code; 1500 struct arth *a0, *a1; 1501 int reversed; 1502 { 1503 struct slist *s0, *s1, *s2; 1504 struct block *b, *tmp; 1505 1506 s0 = xfer_to_x(a1); 1507 s1 = xfer_to_a(a0); 1508 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X); 1509 b = new_block(JMP(code)); 1510 if (code == BPF_JGT || code == BPF_JGE) { 1511 reversed = !reversed; 1512 b->s.k = 0x80000000; 1513 } 1514 if (reversed) 1515 gen_not(b); 1516 1517 sappend(s1, s2); 1518 sappend(s0, s1); 1519 sappend(a1->s, s0); 1520 sappend(a0->s, a1->s); 1521 1522 b->stmts = a0->s; 1523 1524 free_reg(a0->regno); 1525 free_reg(a1->regno); 1526 1527 /* 'and' together protocol checks */ 1528 if (a0->b) { 1529 if (a1->b) { 1530 gen_and(a0->b, tmp = a1->b); 1531 } 1532 else 1533 tmp = a0->b; 1534 } else 1535 tmp = a1->b; 1536 1537 if (tmp) 1538 gen_and(tmp, b); 1539 1540 return b; 1541 } 1542 1543 struct arth * 1544 gen_loadlen() 1545 { 1546 int regno = alloc_reg(); 1547 struct arth *a = (struct arth *)newchunk(sizeof(*a)); 1548 struct slist *s; 1549 1550 s = new_stmt(BPF_LD|BPF_LEN); 1551 s->next = new_stmt(BPF_ST); 1552 s->next->s.k = regno; 1553 a->s = s; 1554 a->regno = regno; 1555 1556 return a; 1557 } 1558 1559 struct arth * 1560 gen_loadi(val) 1561 int val; 1562 { 1563 struct arth *a; 1564 struct slist *s; 1565 int reg; 1566 1567 a = (struct arth *)newchunk(sizeof(*a)); 1568 1569 reg = alloc_reg(); 1570 1571 s = new_stmt(BPF_LD|BPF_IMM); 1572 s->s.k = val; 1573 s->next = new_stmt(BPF_ST); 1574 s->next->s.k = reg; 1575 a->s = s; 1576 a->regno = reg; 1577 1578 return a; 1579 } 1580 1581 struct arth * 1582 gen_neg(a) 1583 struct arth *a; 1584 { 1585 struct slist *s; 1586 1587 s = xfer_to_a(a); 1588 sappend(a->s, s); 1589 s = new_stmt(BPF_ALU|BPF_NEG); 1590 s->s.k = 0; 1591 sappend(a->s, s); 1592 s = new_stmt(BPF_ST); 1593 s->s.k = a->regno; 1594 sappend(a->s, s); 1595 1596 return a; 1597 } 1598 1599 struct arth * 1600 gen_arth(code, a0, a1) 1601 int code; 1602 struct arth *a0, *a1; 1603 { 1604 struct slist *s0, *s1, *s2; 1605 1606 s0 = xfer_to_x(a1); 1607 s1 = xfer_to_a(a0); 1608 s2 = new_stmt(BPF_ALU|BPF_X|code); 1609 1610 sappend(s1, s2); 1611 sappend(s0, s1); 1612 sappend(a1->s, s0); 1613 sappend(a0->s, a1->s); 1614 1615 free_reg(a1->regno); 1616 1617 s0 = new_stmt(BPF_ST); 1618 a0->regno = s0->s.k = alloc_reg(); 1619 sappend(a0->s, s0); 1620 1621 return a0; 1622 } 1623 1624 /* 1625 * Here we handle simple allocation of the scratch registers. 1626 * If too many registers are alloc'd, the allocator punts. 1627 */ 1628 static int regused[BPF_MEMWORDS]; 1629 static int curreg; 1630 1631 /* 1632 * Return the next free register. 1633 */ 1634 static int 1635 alloc_reg() 1636 { 1637 int n = BPF_MEMWORDS; 1638 1639 while (--n >= 0) { 1640 if (regused[curreg]) 1641 curreg = (curreg + 1) % BPF_MEMWORDS; 1642 else { 1643 regused[curreg] = 1; 1644 return curreg; 1645 } 1646 } 1647 bpf_error("too many registers needed to evaluate expression"); 1648 /* NOTREACHED */ 1649 } 1650 1651 /* 1652 * Return a register to the table so it can 1653 * be used later. 1654 */ 1655 static void 1656 free_reg(n) 1657 int n; 1658 { 1659 regused[n] = 0; 1660 } 1661 1662 static struct block * 1663 gen_len(jmp, n) 1664 int jmp, n; 1665 { 1666 struct slist *s; 1667 struct block *b; 1668 1669 s = new_stmt(BPF_LD|BPF_LEN); 1670 b = new_block(JMP(jmp)); 1671 b->stmts = s; 1672 b->s.k = n; 1673 1674 return b; 1675 } 1676 1677 struct block * 1678 gen_greater(n) 1679 int n; 1680 { 1681 return gen_len(BPF_JGE, n); 1682 } 1683 1684 struct block * 1685 gen_less(n) 1686 int n; 1687 { 1688 struct block *b; 1689 1690 b = gen_len(BPF_JGT, n); 1691 gen_not(b); 1692 1693 return b; 1694 } 1695 1696 struct block * 1697 gen_byteop(op, idx, val) 1698 int op, idx, val; 1699 { 1700 struct block *b; 1701 struct slist *s; 1702 1703 switch (op) { 1704 default: 1705 abort(); 1706 1707 case '=': 1708 return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1709 1710 case '<': 1711 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1712 b->s.code = JMP(BPF_JGE); 1713 gen_not(b); 1714 return b; 1715 1716 case '>': 1717 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1718 b->s.code = JMP(BPF_JGT); 1719 return b; 1720 1721 case '|': 1722 s = new_stmt(BPF_ALU|BPF_OR|BPF_K); 1723 break; 1724 1725 case '&': 1726 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 1727 break; 1728 } 1729 s->s.k = val; 1730 b = new_block(JMP(BPF_JEQ)); 1731 b->stmts = s; 1732 gen_not(b); 1733 1734 return b; 1735 } 1736 1737 struct block * 1738 gen_broadcast(proto) 1739 int proto; 1740 { 1741 bpf_u_int32 hostmask; 1742 struct block *b0, *b1, *b2; 1743 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1744 1745 switch (proto) { 1746 1747 case Q_DEFAULT: 1748 case Q_LINK: 1749 if (linktype == DLT_EN10MB) 1750 return gen_ehostop(ebroadcast, Q_DST); 1751 if (linktype == DLT_FDDI) 1752 return gen_fhostop(ebroadcast, Q_DST); 1753 bpf_error("not a broadcast link"); 1754 break; 1755 1756 case Q_IP: 1757 b0 = gen_linktype(ETHERTYPE_IP); 1758 hostmask = ~netmask; 1759 b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask); 1760 b2 = gen_mcmp(off_nl + 16, BPF_W, 1761 (bpf_int32)(~0 & hostmask), hostmask); 1762 gen_or(b1, b2); 1763 gen_and(b0, b2); 1764 return b2; 1765 } 1766 bpf_error("only ether/ip broadcast filters supported"); 1767 } 1768 1769 struct block * 1770 gen_multicast(proto) 1771 int proto; 1772 { 1773 register struct block *b0, *b1; 1774 register struct slist *s; 1775 1776 switch (proto) { 1777 1778 case Q_DEFAULT: 1779 case Q_LINK: 1780 if (linktype == DLT_EN10MB) { 1781 /* ether[0] & 1 != 0 */ 1782 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1783 s->s.k = 0; 1784 b0 = new_block(JMP(BPF_JSET)); 1785 b0->s.k = 1; 1786 b0->stmts = s; 1787 return b0; 1788 } 1789 1790 if (linktype == DLT_FDDI) { 1791 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */ 1792 /* fddi[1] & 1 != 0 */ 1793 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1794 s->s.k = 1; 1795 b0 = new_block(JMP(BPF_JSET)); 1796 b0->s.k = 1; 1797 b0->stmts = s; 1798 return b0; 1799 } 1800 /* Link not known to support multicasts */ 1801 break; 1802 1803 case Q_IP: 1804 b0 = gen_linktype(ETHERTYPE_IP); 1805 b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224); 1806 b1->s.code = JMP(BPF_JGE); 1807 gen_and(b0, b1); 1808 return b1; 1809 } 1810 bpf_error("only IP multicast filters supported on ethernet/FDDI"); 1811 } 1812 1813 /* 1814 * generate command for inbound/outbound. It's here so we can 1815 * make it link-type specific. 'dir' = 0 implies "inbound", 1816 * = 1 implies "outbound". 1817 */ 1818 struct block * 1819 gen_inbound(dir) 1820 int dir; 1821 { 1822 register struct block *b0; 1823 1824 b0 = gen_relation(BPF_JEQ, 1825 gen_load(Q_LINK, gen_loadi(0), 1), 1826 gen_loadi(0), 1827 dir); 1828 return (b0); 1829 } 1830