1 /* $OpenBSD: gencode.c,v 1.2 1996/03/04 15:47:18 mickey Exp $ */ 2 /* $NetBSD: gencode.c,v 1.2 1995/03/06 11:38:21 mycroft Exp $ */ 3 4 /* 5 * Copyright (c) 1990, 1991, 1992, 1993, 1994 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that: (1) source code distributions 10 * retain the above copyright notice and this paragraph in its entirety, (2) 11 * distributions including binary code include the above copyright notice and 12 * this paragraph in its entirety in the documentation or other materials 13 * provided with the distribution, and (3) all advertising materials mentioning 14 * features or use of this software display the following acknowledgement: 15 * ``This product includes software developed by the University of California, 16 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 17 * the University nor the names of its contributors may be used to endorse 18 * or promote products derived from this software without specific prior 19 * written permission. 20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 21 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 23 */ 24 #ifndef lint 25 static char rcsid[] = 26 "@(#) Header: gencode.c,v 1.55 94/06/20 19:07:53 leres Exp (LBL)"; 27 #endif 28 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/time.h> 32 33 #include <net/if.h> 34 #include <net/bpf.h> 35 36 #include <netinet/in.h> 37 #include <netinet/if_ether.h> 38 39 #include <memory.h> 40 #include <pcap.h> 41 #include <pcap-namedb.h> 42 #include <setjmp.h> 43 #if __STDC__ 44 #include <stdarg.h> 45 #include <stdlib.h> 46 #else 47 #include <varargs.h> 48 #endif 49 50 #include "gencode.h" 51 52 #ifndef __GNUC__ 53 #define inline 54 #endif 55 56 #ifndef ETHERTYPE_REVARP 57 #define ETHERTYPE_REVARP 0x8035 58 #endif 59 #ifndef ETHERTYPE_MOPDL 60 #define ETHERTYPE_MOPDL 0x6001 61 #endif 62 #ifndef ETHERTYPE_MOPRC 63 #define ETHERTYPE_MOPRC 0x6002 64 #endif 65 #ifndef ETHERTYPE_DN 66 #define ETHERTYPE_DN 0x6003 67 #endif 68 #ifndef ETHERTYPE_LAT 69 #define ETHERTYPE_LAT 0x6004 70 #endif 71 72 #define JMP(c) ((c)|BPF_JMP|BPF_K) 73 74 static jmp_buf top_ctx; 75 static pcap_t *bpf_pcap; 76 77 /* VARARGS */ 78 volatile void 79 #if __STDC__ || defined(SOLARIS) 80 bpf_error(char *fmt, ...) 81 #else 82 bpf_error(fmt, va_alist) 83 char *fmt; 84 va_dcl 85 #endif 86 { 87 va_list ap; 88 89 #if __STDC__ 90 va_start(ap, fmt); 91 #else 92 va_start(ap); 93 #endif 94 if (bpf_pcap != NULL) 95 (void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap); 96 va_end(ap); 97 longjmp(top_ctx, 1); 98 /* NOTREACHED */ 99 } 100 101 static void init_linktype(int); 102 103 static int alloc_reg(void); 104 static void free_reg(int); 105 106 static struct block *root; 107 108 /* 109 * We divy out chunks of memory rather than call malloc each time so 110 * we don't have to worry about leaking memory. It's probably 111 * not a big deal if all this memory was wasted but it this ever 112 * goes into a library that would probably not be a good idea. 113 */ 114 #define NCHUNKS 16 115 #define CHUNK0SIZE 1024 116 struct chunk { 117 u_int n_left; 118 void *m; 119 }; 120 121 static struct chunk chunks[NCHUNKS]; 122 static int cur_chunk; 123 124 static void *newchunk(u_int); 125 static void freechunks(void); 126 static inline struct block *new_block(int); 127 static inline struct slist *new_stmt(int); 128 static struct block *gen_retblk(int); 129 static inline void syntax(void); 130 131 static void backpatch(struct block *, struct block *); 132 static void merge(struct block *, struct block *); 133 static struct block *gen_cmp(u_int, u_int, long); 134 static struct block *gen_mcmp(u_int, u_int, long, u_long); 135 static struct block *gen_bcmp(u_int, u_int, u_char *); 136 static struct block *gen_uncond(int); 137 static inline struct block *gen_true(void); 138 static inline struct block *gen_false(void); 139 static struct block *gen_linktype(int); 140 static struct block *gen_hostop(u_long, u_long, int, int, u_int, u_int); 141 static struct block *gen_ehostop(u_char *, int); 142 #ifdef FDDI 143 static struct block *gen_fhostop(u_char *, int); 144 #endif 145 static struct block *gen_dnhostop(u_long, int, u_int); 146 static struct block *gen_host(u_long, u_long, int, int); 147 static struct block *gen_gateway(u_char *, u_long **, int, int); 148 static struct block *gen_ipfrag(void); 149 static struct block *gen_portatom(int, long); 150 struct block *gen_portop(int, int, int); 151 static struct block *gen_port(int, int, int); 152 static int lookup_proto(char *, int); 153 static struct block *gen_proto(int, int, int); 154 static u_long net_mask(u_long *); 155 static u_long net_mask(u_long *); 156 static struct slist *xfer_to_x(struct arth *); 157 static struct slist *xfer_to_a(struct arth *); 158 static struct block *gen_len(int, int); 159 160 static void * 161 newchunk(n) 162 u_int n; 163 { 164 struct chunk *cp; 165 int k, size; 166 167 /* XXX Round up to nearest long. */ 168 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1); 169 170 cp = &chunks[cur_chunk]; 171 if (n > cp->n_left) { 172 ++cp, k = ++cur_chunk; 173 if (k >= NCHUNKS) 174 bpf_error("out of memory"); 175 size = CHUNK0SIZE << k; 176 cp->m = (void *)malloc(size); 177 memset((char *)cp->m, 0, size); 178 cp->n_left = size; 179 if (n > size) 180 bpf_error("out of memory"); 181 } 182 cp->n_left -= n; 183 return (void *)((char *)cp->m + cp->n_left); 184 } 185 186 static void 187 freechunks() 188 { 189 int i; 190 191 for (i = 0; i < NCHUNKS; ++i) 192 if (chunks[i].m) 193 free(chunks[i].m); 194 } 195 196 /* 197 * A strdup whose allocations are freed after code generation is over. 198 */ 199 char * 200 sdup(s) 201 char *s; 202 { 203 int n = strlen(s) + 1; 204 char *cp = newchunk(n); 205 strcpy(cp, s); 206 return (cp); 207 } 208 209 static inline struct block * 210 new_block(code) 211 int code; 212 { 213 struct block *p; 214 215 p = (struct block *)newchunk(sizeof(*p)); 216 p->s.code = code; 217 p->head = p; 218 219 return p; 220 } 221 222 static inline struct slist * 223 new_stmt(code) 224 int code; 225 { 226 struct slist *p; 227 228 p = (struct slist *)newchunk(sizeof(*p)); 229 p->s.code = code; 230 231 return p; 232 } 233 234 static struct block * 235 gen_retblk(v) 236 int v; 237 { 238 struct block *b = new_block(BPF_RET|BPF_K); 239 240 b->s.k = v; 241 return b; 242 } 243 244 static inline void 245 syntax() 246 { 247 bpf_error("syntax error in filter expression"); 248 } 249 250 static u_long netmask; 251 static int snaplen; 252 253 int 254 pcap_compile(pcap_t *p, struct bpf_program *program, 255 char *buf, int optimize, u_long mask) 256 { 257 extern int n_errors; 258 int len; 259 260 bpf_pcap = p; 261 if (setjmp(top_ctx)) 262 return (-1); 263 264 netmask = mask; 265 snaplen = pcap_snapshot(p); 266 267 lex_init(buf ? buf : ""); 268 init_linktype(pcap_datalink(p)); 269 pcap_parse(); 270 271 if (n_errors) 272 syntax(); 273 274 if (root == NULL) 275 root = gen_retblk(snaplen); 276 277 if (optimize) { 278 bpf_optimize(&root); 279 if (root == NULL || 280 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0)) 281 bpf_error("expression rejects all packets"); 282 } 283 program->bf_insns = icode_to_fcode(root, &len); 284 program->bf_len = len; 285 286 freechunks(); 287 return (0); 288 } 289 290 /* 291 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates 292 * which of the jt and jf fields has been resolved and which is a pointer 293 * back to another unresolved block (or nil). At least one of the fields 294 * in each block is already resolved. 295 */ 296 static void 297 backpatch(list, target) 298 struct block *list, *target; 299 { 300 struct block *next; 301 302 while (list) { 303 if (!list->sense) { 304 next = JT(list); 305 JT(list) = target; 306 } else { 307 next = JF(list); 308 JF(list) = target; 309 } 310 list = next; 311 } 312 } 313 314 /* 315 * Merge the lists in b0 and b1, using the 'sense' field to indicate 316 * which of jt and jf is the link. 317 */ 318 static void 319 merge(b0, b1) 320 struct block *b0, *b1; 321 { 322 register struct block **p = &b0; 323 324 /* Find end of list. */ 325 while (*p) 326 p = !((*p)->sense) ? &JT(*p) : &JF(*p); 327 328 /* Concatenate the lists. */ 329 *p = b1; 330 } 331 332 void 333 finish_parse(p) 334 struct block *p; 335 { 336 backpatch(p, gen_retblk(snaplen)); 337 p->sense = !p->sense; 338 backpatch(p, gen_retblk(0)); 339 root = p->head; 340 } 341 342 void 343 gen_and(b0, b1) 344 struct block *b0, *b1; 345 { 346 backpatch(b0, b1->head); 347 b0->sense = !b0->sense; 348 b1->sense = !b1->sense; 349 merge(b1, b0); 350 b1->sense = !b1->sense; 351 b1->head = b0->head; 352 } 353 354 void 355 gen_or(b0, b1) 356 struct block *b0, *b1; 357 { 358 b0->sense = !b0->sense; 359 backpatch(b0, b1->head); 360 b0->sense = !b0->sense; 361 merge(b1, b0); 362 b1->head = b0->head; 363 } 364 365 void 366 gen_not(b) 367 struct block *b; 368 { 369 b->sense = !b->sense; 370 } 371 372 static struct block * 373 gen_cmp(offset, size, v) 374 u_int offset, size; 375 long v; 376 { 377 struct slist *s; 378 struct block *b; 379 380 s = new_stmt(BPF_LD|BPF_ABS|size); 381 s->s.k = offset; 382 383 b = new_block(JMP(BPF_JEQ)); 384 b->stmts = s; 385 b->s.k = v; 386 387 return b; 388 } 389 390 static struct block * 391 gen_mcmp(offset, size, v, mask) 392 u_int offset, size; 393 long v; 394 u_long mask; 395 { 396 struct block *b = gen_cmp(offset, size, v); 397 struct slist *s; 398 399 if (mask != 0xffffffff) { 400 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 401 s->s.k = mask; 402 b->stmts->next = s; 403 } 404 return b; 405 } 406 407 static struct block * 408 gen_bcmp(offset, size, v) 409 u_int offset, size; 410 u_char *v; 411 { 412 struct block *b, *tmp; 413 414 b = NULL; 415 while (size >= 4) { 416 u_char *p = &v[size - 4]; 417 long w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 418 tmp = gen_cmp(offset + size - 4, BPF_W, w); 419 if (b != NULL) 420 gen_and(b, tmp); 421 b = tmp; 422 size -= 4; 423 } 424 while (size >= 2) { 425 u_char *p = &v[size - 2]; 426 long w = (p[0] << 8) | p[1]; 427 tmp = gen_cmp(offset + size - 2, BPF_H, w); 428 if (b != NULL) 429 gen_and(b, tmp); 430 b = tmp; 431 size -= 2; 432 } 433 if (size > 0) { 434 tmp = gen_cmp(offset, BPF_B, (long)v[0]); 435 if (b != NULL) 436 gen_and(b, tmp); 437 b = tmp; 438 } 439 return b; 440 } 441 442 /* 443 * Various code constructs need to know the layout of the data link 444 * layer. These variables give the necessary offsets. off_linktype 445 * is set to -1 for no encapsulation, in which case, IP is assumed. 446 */ 447 static u_int off_linktype; 448 static u_int off_nl; 449 static int linktype; 450 #ifdef FDDI 451 extern int fddipad; 452 #endif 453 454 static void 455 init_linktype(type) 456 int type; 457 { 458 linktype = type; 459 460 switch (type) { 461 462 case DLT_EN10MB: 463 off_linktype = 12; 464 off_nl = 14; 465 return; 466 467 case DLT_SLIP: 468 /* 469 * SLIP doesn't have a link level type. The 16 byte 470 * header is hacked into our SLIP driver. 471 */ 472 off_linktype = -1; 473 off_nl = 16; 474 return; 475 476 case DLT_NULL: 477 off_linktype = -1; 478 off_nl = 0; 479 return; 480 481 case DLT_PPP: 482 off_linktype = 2; 483 off_nl = 4; 484 return; 485 486 #ifdef FDDI 487 case DLT_FDDI: 488 /* 489 * FDDI doesn't really have a link-level type field. 490 * We assume that SSAP = SNAP is being used and pick 491 * out the encapsulated Ethernet type. 492 */ 493 off_linktype = 19 + fddipad; 494 off_nl = 21 + fddipad; 495 return; 496 #endif 497 498 case DLT_IEEE802: 499 off_linktype = 20; 500 off_nl = 22; 501 return; 502 } 503 bpf_error("unknown data link type 0x%x", linktype); 504 /* NOTREACHED */ 505 } 506 507 static struct block * 508 gen_uncond(rsense) 509 int rsense; 510 { 511 struct block *b; 512 struct slist *s; 513 514 s = new_stmt(BPF_LD|BPF_IMM); 515 s->s.k = !rsense; 516 b = new_block(JMP(BPF_JEQ)); 517 b->stmts = s; 518 519 return b; 520 } 521 522 static inline struct block * 523 gen_true() 524 { 525 return gen_uncond(1); 526 } 527 528 static inline struct block * 529 gen_false() 530 { 531 return gen_uncond(0); 532 } 533 534 static struct block * 535 gen_linktype(proto) 536 int proto; 537 { 538 switch (linktype) { 539 case DLT_SLIP: 540 if (proto == ETHERTYPE_IP) 541 return gen_true(); 542 else 543 return gen_false(); 544 545 case DLT_PPP: 546 if (proto == ETHERTYPE_IP) 547 proto = 0x0021; /* XXX - need ppp.h defs */ 548 break; 549 } 550 return gen_cmp(off_linktype, BPF_H, (long)proto); 551 } 552 553 static struct block * 554 gen_hostop(addr, mask, dir, proto, src_off, dst_off) 555 u_long addr; 556 u_long mask; 557 int dir, proto; 558 u_int src_off, dst_off; 559 { 560 struct block *b0, *b1; 561 u_int offset; 562 563 switch (dir) { 564 565 case Q_SRC: 566 offset = src_off; 567 break; 568 569 case Q_DST: 570 offset = dst_off; 571 break; 572 573 case Q_AND: 574 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 575 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 576 gen_and(b0, b1); 577 return b1; 578 579 case Q_OR: 580 case Q_DEFAULT: 581 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 582 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 583 gen_or(b0, b1); 584 return b1; 585 586 default: 587 abort(); 588 } 589 b0 = gen_linktype(proto); 590 b1 = gen_mcmp(offset, BPF_W, (long)addr, mask); 591 gen_and(b0, b1); 592 return b1; 593 } 594 595 static struct block * 596 gen_ehostop(eaddr, dir) 597 u_char *eaddr; 598 int dir; 599 { 600 struct block *b0, *b1; 601 602 switch (dir) { 603 case Q_SRC: 604 return gen_bcmp(6, 6, eaddr); 605 606 case Q_DST: 607 return gen_bcmp(0, 6, eaddr); 608 609 case Q_AND: 610 b0 = gen_ehostop(eaddr, Q_SRC); 611 b1 = gen_ehostop(eaddr, Q_DST); 612 gen_and(b0, b1); 613 return b1; 614 615 case Q_DEFAULT: 616 case Q_OR: 617 b0 = gen_ehostop(eaddr, Q_SRC); 618 b1 = gen_ehostop(eaddr, Q_DST); 619 gen_or(b0, b1); 620 return b1; 621 } 622 abort(); 623 /* NOTREACHED */ 624 } 625 626 #ifdef FDDI 627 /* 628 * Like gen_ehostop, but for DLT_FDDI 629 */ 630 static struct block * 631 gen_fhostop(eaddr, dir) 632 u_char *eaddr; 633 int dir; 634 { 635 struct block *b0, *b1; 636 637 switch (dir) { 638 case Q_SRC: 639 return gen_bcmp(6 + 1 + fddipad, 6, eaddr); 640 641 case Q_DST: 642 return gen_bcmp(0 + 1 + fddipad, 6, eaddr); 643 644 case Q_AND: 645 b0 = gen_fhostop(eaddr, Q_SRC); 646 b1 = gen_fhostop(eaddr, Q_DST); 647 gen_and(b0, b1); 648 return b1; 649 650 case Q_DEFAULT: 651 case Q_OR: 652 b0 = gen_fhostop(eaddr, Q_SRC); 653 b1 = gen_fhostop(eaddr, Q_DST); 654 gen_or(b0, b1); 655 return b1; 656 } 657 abort(); 658 /* NOTREACHED */ 659 } 660 #endif 661 662 /* 663 * This is quite tricky because there may be pad bytes in front of the 664 * DECNET header, and then there are two possible data packet formats that 665 * carry both src and dst addresses, plus 5 packet types in a format that 666 * carries only the src node, plus 2 types that use a different format and 667 * also carry just the src node. 668 * 669 * Yuck. 670 * 671 * Instead of doing those all right, we just look for data packets with 672 * 0 or 1 bytes of padding. If you want to look at other packets, that 673 * will require a lot more hacking. 674 * 675 * To add support for filtering on DECNET "areas" (network numbers) 676 * one would want to add a "mask" argument to this routine. That would 677 * make the filter even more inefficient, although one could be clever 678 * and not generate masking instructions if the mask is 0xFFFF. 679 */ 680 static struct block * 681 gen_dnhostop(addr, dir, base_off) 682 u_long addr; 683 int dir; 684 u_int base_off; 685 { 686 struct block *b0, *b1, *b2, *tmp; 687 u_int offset_lh; /* offset if long header is received */ 688 u_int offset_sh; /* offset if short header is received */ 689 690 switch (dir) { 691 692 case Q_DST: 693 offset_sh = 1; /* follows flags */ 694 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */ 695 break; 696 697 case Q_SRC: 698 offset_sh = 3; /* follows flags, dstnode */ 699 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */ 700 break; 701 702 case Q_AND: 703 /* Inefficient because we do our Calvinball dance twice */ 704 b0 = gen_dnhostop(addr, Q_SRC, base_off); 705 b1 = gen_dnhostop(addr, Q_DST, base_off); 706 gen_and(b0, b1); 707 return b1; 708 709 case Q_OR: 710 case Q_DEFAULT: 711 /* Inefficient because we do our Calvinball dance twice */ 712 b0 = gen_dnhostop(addr, Q_SRC, base_off); 713 b1 = gen_dnhostop(addr, Q_DST, base_off); 714 gen_or(b0, b1); 715 return b1; 716 717 default: 718 abort(); 719 } 720 b0 = gen_linktype(ETHERTYPE_DN); 721 /* Check for pad = 1, long header case */ 722 tmp = gen_mcmp(base_off + 2, BPF_H, 723 (long)ntohs(0x0681), (long)ntohs(0x07FF)); 724 b1 = gen_cmp(base_off + 2 + 1 + offset_lh, BPF_H, (long)ntohs(addr)); 725 gen_and(tmp, b1); 726 /* Check for pad = 0, long header case */ 727 tmp = gen_mcmp(base_off + 2, BPF_B, (long)0x06, (long)0x7); 728 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (long)ntohs(addr)); 729 gen_and(tmp, b2); 730 gen_or(b2, b1); 731 /* Check for pad = 1, short header case */ 732 tmp = gen_mcmp(base_off + 2, BPF_H, 733 (long)ntohs(0x0281), (long)ntohs(0x07FF)); 734 b2 = gen_cmp(base_off + 2 + 1 + offset_sh, BPF_H, (long)ntohs(addr)); 735 gen_and(tmp, b2); 736 gen_or(b2, b1); 737 /* Check for pad = 0, short header case */ 738 tmp = gen_mcmp(base_off + 2, BPF_B, (long)0x02, (long)0x7); 739 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (long)ntohs(addr)); 740 gen_and(tmp, b2); 741 gen_or(b2, b1); 742 743 /* Combine with test for linktype */ 744 gen_and(b0, b1); 745 return b1; 746 } 747 748 static struct block * 749 gen_host(addr, mask, proto, dir) 750 u_long addr; 751 u_long mask; 752 int proto; 753 int dir; 754 { 755 struct block *b0, *b1; 756 757 switch (proto) { 758 759 case Q_DEFAULT: 760 b0 = gen_host(addr, mask, Q_IP, dir); 761 b1 = gen_host(addr, mask, Q_ARP, dir); 762 gen_or(b0, b1); 763 b0 = gen_host(addr, mask, Q_RARP, dir); 764 gen_or(b1, b0); 765 return b0; 766 767 case Q_IP: 768 return gen_hostop(addr, mask, dir, ETHERTYPE_IP, 769 off_nl + 12, off_nl + 16); 770 771 case Q_RARP: 772 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP, 773 off_nl + 14, off_nl + 24); 774 775 case Q_ARP: 776 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP, 777 off_nl + 14, off_nl + 24); 778 779 case Q_TCP: 780 bpf_error("'tcp' modifier applied to host"); 781 782 case Q_UDP: 783 bpf_error("'udp' modifier applied to host"); 784 785 case Q_ICMP: 786 bpf_error("'icmp' modifier applied to host"); 787 788 case Q_DECNET: 789 return gen_dnhostop(addr, dir, off_nl); 790 791 case Q_LAT: 792 bpf_error("LAT host filtering not implemented"); 793 794 case Q_MOPDL: 795 bpf_error("MOPDL host filtering not implemented"); 796 797 case Q_MOPRC: 798 bpf_error("MOPRC host filtering not implemented"); 799 800 default: 801 abort(); 802 } 803 /* NOTREACHED */ 804 } 805 806 static struct block * 807 gen_gateway(eaddr, alist, proto, dir) 808 u_char *eaddr; 809 u_long **alist; 810 int proto; 811 int dir; 812 { 813 struct block *b0, *b1, *tmp; 814 815 if (dir != 0) 816 bpf_error("direction applied to 'gateway'"); 817 818 switch (proto) { 819 case Q_DEFAULT: 820 case Q_IP: 821 case Q_ARP: 822 case Q_RARP: 823 if (linktype == DLT_EN10MB) 824 b0 = gen_ehostop(eaddr, Q_OR); 825 #ifdef FDDI 826 else if (linktype == DLT_FDDI) 827 b0 = gen_fhostop(eaddr, Q_OR); 828 #endif 829 else 830 bpf_error("'gateway' supported only on ethernet or FDDI"); 831 832 b1 = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 833 while (*alist) { 834 tmp = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 835 gen_or(b1, tmp); 836 b1 = tmp; 837 } 838 gen_not(b1); 839 gen_and(b0, b1); 840 return b1; 841 } 842 bpf_error("illegal modifier of 'gateway'"); 843 /* NOTREACHED */ 844 } 845 846 struct block * 847 gen_proto_abbrev(proto) 848 int proto; 849 { 850 struct block *b0, *b1; 851 852 switch (proto) { 853 854 case Q_TCP: 855 b0 = gen_linktype(ETHERTYPE_IP); 856 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_TCP); 857 gen_and(b0, b1); 858 break; 859 860 case Q_UDP: 861 b0 = gen_linktype(ETHERTYPE_IP); 862 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_UDP); 863 gen_and(b0, b1); 864 break; 865 866 case Q_ICMP: 867 b0 = gen_linktype(ETHERTYPE_IP); 868 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_ICMP); 869 gen_and(b0, b1); 870 break; 871 872 case Q_IP: 873 b1 = gen_linktype(ETHERTYPE_IP); 874 break; 875 876 case Q_ARP: 877 b1 = gen_linktype(ETHERTYPE_ARP); 878 break; 879 880 case Q_RARP: 881 b1 = gen_linktype(ETHERTYPE_REVARP); 882 break; 883 884 case Q_LINK: 885 bpf_error("link layer applied in wrong context"); 886 887 case Q_DECNET: 888 b1 = gen_linktype(ETHERTYPE_DN); 889 break; 890 891 case Q_LAT: 892 b1 = gen_linktype(ETHERTYPE_LAT); 893 break; 894 895 case Q_MOPDL: 896 b1 = gen_linktype(ETHERTYPE_MOPDL); 897 break; 898 899 case Q_MOPRC: 900 b1 = gen_linktype(ETHERTYPE_MOPRC); 901 break; 902 903 default: 904 abort(); 905 } 906 return b1; 907 } 908 909 static struct block * 910 gen_ipfrag() 911 { 912 struct slist *s; 913 struct block *b; 914 915 /* not ip frag */ 916 s = new_stmt(BPF_LD|BPF_H|BPF_ABS); 917 s->s.k = off_nl + 6; 918 b = new_block(JMP(BPF_JSET)); 919 b->s.k = 0x1fff; 920 b->stmts = s; 921 gen_not(b); 922 923 return b; 924 } 925 926 static struct block * 927 gen_portatom(off, v) 928 int off; 929 long v; 930 { 931 struct slist *s; 932 struct block *b; 933 934 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 935 s->s.k = off_nl; 936 937 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H); 938 s->next->s.k = off_nl + off; 939 940 b = new_block(JMP(BPF_JEQ)); 941 b->stmts = s; 942 b->s.k = v; 943 944 return b; 945 } 946 947 struct block * 948 gen_portop(port, proto, dir) 949 int port, proto, dir; 950 { 951 struct block *b0, *b1, *tmp; 952 953 /* ip proto 'proto' */ 954 tmp = gen_cmp(off_nl + 9, BPF_B, (long)proto); 955 b0 = gen_ipfrag(); 956 gen_and(tmp, b0); 957 958 switch (dir) { 959 case Q_SRC: 960 b1 = gen_portatom(0, (long)port); 961 break; 962 963 case Q_DST: 964 b1 = gen_portatom(2, (long)port); 965 break; 966 967 case Q_OR: 968 case Q_DEFAULT: 969 tmp = gen_portatom(0, (long)port); 970 b1 = gen_portatom(2, (long)port); 971 gen_or(tmp, b1); 972 break; 973 974 case Q_AND: 975 tmp = gen_portatom(0, (long)port); 976 b1 = gen_portatom(2, (long)port); 977 gen_and(tmp, b1); 978 break; 979 980 default: 981 abort(); 982 } 983 gen_and(b0, b1); 984 985 return b1; 986 } 987 988 static struct block * 989 gen_port(port, ip_proto, dir) 990 int port; 991 int ip_proto; 992 int dir; 993 { 994 struct block *b0, *b1, *tmp; 995 996 /* ether proto ip */ 997 b0 = gen_linktype(ETHERTYPE_IP); 998 999 switch (ip_proto) { 1000 case IPPROTO_UDP: 1001 case IPPROTO_TCP: 1002 b1 = gen_portop(port, ip_proto, dir); 1003 break; 1004 1005 case PROTO_UNDEF: 1006 tmp = gen_portop(port, IPPROTO_TCP, dir); 1007 b1 = gen_portop(port, IPPROTO_UDP, dir); 1008 gen_or(tmp, b1); 1009 break; 1010 1011 default: 1012 abort(); 1013 } 1014 gen_and(b0, b1); 1015 return b1; 1016 } 1017 1018 static int 1019 lookup_proto(name, proto) 1020 char *name; 1021 int proto; 1022 { 1023 int v; 1024 1025 switch (proto) { 1026 case Q_DEFAULT: 1027 case Q_IP: 1028 v = pcap_nametoproto(name); 1029 if (v == PROTO_UNDEF) 1030 bpf_error("unknown ip proto '%s'", name); 1031 break; 1032 1033 case Q_LINK: 1034 /* XXX should look up h/w protocol type based on linktype */ 1035 v = pcap_nametoeproto(name); 1036 if (v == PROTO_UNDEF) 1037 bpf_error("unknown ether proto '%s'", name); 1038 break; 1039 1040 default: 1041 v = PROTO_UNDEF; 1042 break; 1043 } 1044 return v; 1045 } 1046 1047 static struct block * 1048 gen_proto(v, proto, dir) 1049 int v; 1050 int proto; 1051 int dir; 1052 { 1053 struct block *b0, *b1; 1054 1055 if (dir != Q_DEFAULT) 1056 bpf_error("direction applied to 'proto'"); 1057 1058 switch (proto) { 1059 case Q_DEFAULT: 1060 case Q_IP: 1061 b0 = gen_linktype(ETHERTYPE_IP); 1062 b1 = gen_cmp(off_nl + 9, BPF_B, (long)v); 1063 gen_and(b0, b1); 1064 return b1; 1065 1066 case Q_ARP: 1067 bpf_error("arp does not encapsulate another protocol"); 1068 /* NOTREACHED */ 1069 1070 case Q_RARP: 1071 bpf_error("rarp does not encapsulate another protocol"); 1072 /* NOTREACHED */ 1073 1074 case Q_DECNET: 1075 bpf_error("decnet encapsulation is not specifiable"); 1076 /* NOTREACHED */ 1077 1078 case Q_LAT: 1079 bpf_error("lat does not encapsulate another protocol"); 1080 /* NOTREACHED */ 1081 1082 case Q_MOPRC: 1083 bpf_error("moprc does not encapsulate another protocol"); 1084 /* NOTREACHED */ 1085 1086 case Q_MOPDL: 1087 bpf_error("mopdl does not encapsulate another protocol"); 1088 /* NOTREACHED */ 1089 1090 case Q_LINK: 1091 return gen_linktype(v); 1092 1093 case Q_UDP: 1094 bpf_error("'udp proto' is bogus"); 1095 /* NOTREACHED */ 1096 1097 case Q_TCP: 1098 bpf_error("'tcp proto' is bogus"); 1099 /* NOTREACHED */ 1100 1101 case Q_ICMP: 1102 bpf_error("'icmp proto' is bogus"); 1103 /* NOTREACHED */ 1104 1105 default: 1106 abort(); 1107 /* NOTREACHED */ 1108 } 1109 /* NOTREACHED */ 1110 } 1111 1112 /* 1113 * Left justify 'addr' and return its resulting network mask. 1114 */ 1115 static u_long 1116 net_mask(addr) 1117 u_long *addr; 1118 { 1119 register u_long m = 0xffffffff; 1120 1121 if (*addr) 1122 while ((*addr & 0xff000000) == 0) 1123 *addr <<= 8, m <<= 8; 1124 1125 return m; 1126 } 1127 1128 struct block * 1129 gen_scode(name, q) 1130 char *name; 1131 struct qual q; 1132 { 1133 int proto = q.proto; 1134 int dir = q.dir; 1135 u_char *eaddr; 1136 u_long mask, addr, **alist; 1137 struct block *b, *tmp; 1138 int port, real_proto; 1139 1140 switch (q.addr) { 1141 1142 case Q_NET: 1143 addr = pcap_nametonetaddr(name); 1144 if (addr == 0) 1145 bpf_error("unknown network '%s'", name); 1146 mask = net_mask(&addr); 1147 return gen_host(addr, mask, proto, dir); 1148 1149 case Q_DEFAULT: 1150 case Q_HOST: 1151 if (proto == Q_LINK) { 1152 switch (linktype) { 1153 case DLT_EN10MB: 1154 eaddr = pcap_ether_hostton(name); 1155 if (eaddr == NULL) 1156 bpf_error("unknown ether host '%s'", name); 1157 return gen_ehostop(eaddr, dir); 1158 1159 #ifdef FDDI 1160 case DLT_FDDI: 1161 eaddr = pcap_ether_hostton(name); 1162 if (eaddr == NULL) 1163 bpf_error("unknown FDDI host '%s'", name); 1164 return gen_fhostop(eaddr, dir); 1165 #endif 1166 default: 1167 bpf_error("only ethernet/FDDI supports link-level host name"); 1168 break; 1169 } 1170 } else if (proto == Q_DECNET) { 1171 unsigned short dn_addr = __pcap_nametodnaddr(name); 1172 /* 1173 * I don't think DECNET hosts can be multihomed, so 1174 * there is no need to build up a list of addresses 1175 */ 1176 return (gen_host(dn_addr, 0, proto, dir)); 1177 } else { 1178 alist = pcap_nametoaddr(name); 1179 if (alist == NULL || *alist == NULL) 1180 bpf_error("unknown host '%s'", name); 1181 b = gen_host(**alist++, 0xffffffffL, proto, dir); 1182 while (*alist) { 1183 tmp = gen_host(**alist++, 0xffffffffL, 1184 proto, dir); 1185 gen_or(b, tmp); 1186 b = tmp; 1187 } 1188 return b; 1189 } 1190 1191 case Q_PORT: 1192 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP) 1193 bpf_error("illegal qualifier of 'port'"); 1194 if (pcap_nametoport(name, &port, &real_proto) == 0) 1195 bpf_error("unknown port '%s'", name); 1196 if (proto == Q_UDP) { 1197 if (real_proto == IPPROTO_TCP) 1198 bpf_error("port '%s' is tcp", name); 1199 else 1200 /* override PROTO_UNDEF */ 1201 real_proto = IPPROTO_UDP; 1202 } 1203 if (proto == Q_TCP) { 1204 if (real_proto == IPPROTO_UDP) 1205 bpf_error("port '%s' is udp", name); 1206 else 1207 /* override PROTO_UNDEF */ 1208 real_proto = IPPROTO_TCP; 1209 } 1210 return gen_port(port, real_proto, dir); 1211 1212 case Q_GATEWAY: 1213 eaddr = pcap_ether_hostton(name); 1214 if (eaddr == NULL) 1215 bpf_error("unknown ether host: %s", name); 1216 1217 alist = pcap_nametoaddr(name); 1218 if (alist == NULL || *alist == NULL) 1219 bpf_error("unknown host '%s'", name); 1220 return gen_gateway(eaddr, alist, proto, dir); 1221 1222 case Q_PROTO: 1223 real_proto = lookup_proto(name, proto); 1224 if (real_proto >= 0) 1225 return gen_proto(real_proto, proto, dir); 1226 else 1227 bpf_error("unknown protocol: %s", name); 1228 1229 case Q_UNDEF: 1230 syntax(); 1231 /* NOTREACHED */ 1232 } 1233 abort(); 1234 /* NOTREACHED */ 1235 } 1236 1237 struct block * 1238 gen_ncode(v, q) 1239 u_long v; 1240 struct qual q; 1241 { 1242 u_long mask; 1243 int proto = q.proto; 1244 int dir = q.dir; 1245 1246 switch (q.addr) { 1247 1248 case Q_DEFAULT: 1249 case Q_HOST: 1250 case Q_NET: 1251 if (proto == Q_DECNET) 1252 return gen_host(v, 0, proto, dir); 1253 else if (proto == Q_LINK) { 1254 bpf_error("illegal link layer address"); 1255 } else { 1256 mask = net_mask(&v); 1257 return gen_host(v, mask, proto, dir); 1258 } 1259 1260 case Q_PORT: 1261 if (proto == Q_UDP) 1262 proto = IPPROTO_UDP; 1263 else if (proto == Q_TCP) 1264 proto = IPPROTO_TCP; 1265 else if (proto == Q_DEFAULT) 1266 proto = PROTO_UNDEF; 1267 else 1268 bpf_error("illegal qualifier of 'port'"); 1269 1270 return gen_port((int)v, proto, dir); 1271 1272 case Q_GATEWAY: 1273 bpf_error("'gateway' requires a name"); 1274 /* NOTREACHED */ 1275 1276 case Q_PROTO: 1277 return gen_proto((int)v, proto, dir); 1278 1279 case Q_UNDEF: 1280 syntax(); 1281 /* NOTREACHED */ 1282 1283 default: 1284 abort(); 1285 /* NOTREACHED */ 1286 } 1287 /* NOTREACHED */ 1288 } 1289 1290 struct block * 1291 gen_ecode(eaddr, q) 1292 u_char *eaddr; 1293 struct qual q; 1294 { 1295 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) { 1296 if (linktype == DLT_EN10MB) 1297 return gen_ehostop(eaddr, (int)q.dir); 1298 #ifdef FDDI 1299 if (linktype == DLT_FDDI) 1300 return gen_fhostop(eaddr, (int)q.dir); 1301 #endif 1302 } 1303 bpf_error("ethernet address used in non-ether expression"); 1304 /* NOTREACHED */ 1305 } 1306 1307 void 1308 sappend(s0, s1) 1309 struct slist *s0, *s1; 1310 { 1311 /* 1312 * This is definitely not the best way to do this, but the 1313 * lists will rarely get long. 1314 */ 1315 while (s0->next) 1316 s0 = s0->next; 1317 s0->next = s1; 1318 } 1319 1320 static struct slist * 1321 xfer_to_x(a) 1322 struct arth *a; 1323 { 1324 struct slist *s; 1325 1326 s = new_stmt(BPF_LDX|BPF_MEM); 1327 s->s.k = a->regno; 1328 return s; 1329 } 1330 1331 static struct slist * 1332 xfer_to_a(a) 1333 struct arth *a; 1334 { 1335 struct slist *s; 1336 1337 s = new_stmt(BPF_LD|BPF_MEM); 1338 s->s.k = a->regno; 1339 return s; 1340 } 1341 1342 struct arth * 1343 gen_load(proto, index, size) 1344 int proto; 1345 struct arth *index; 1346 int size; 1347 { 1348 struct slist *s, *tmp; 1349 struct block *b; 1350 int regno = alloc_reg(); 1351 1352 free_reg(index->regno); 1353 switch (size) { 1354 1355 default: 1356 bpf_error("data size must be 1, 2, or 4"); 1357 1358 case 1: 1359 size = BPF_B; 1360 break; 1361 1362 case 2: 1363 size = BPF_H; 1364 break; 1365 1366 case 4: 1367 size = BPF_W; 1368 break; 1369 } 1370 switch (proto) { 1371 default: 1372 bpf_error("unsupported index operation"); 1373 1374 case Q_LINK: 1375 s = xfer_to_x(index); 1376 tmp = new_stmt(BPF_LD|BPF_IND|size); 1377 sappend(s, tmp); 1378 sappend(index->s, s); 1379 break; 1380 1381 case Q_IP: 1382 case Q_ARP: 1383 case Q_RARP: 1384 case Q_DECNET: 1385 case Q_LAT: 1386 case Q_MOPRC: 1387 case Q_MOPDL: 1388 /* XXX Note that we assume a fixed link link header here. */ 1389 s = xfer_to_x(index); 1390 tmp = new_stmt(BPF_LD|BPF_IND|size); 1391 tmp->s.k = off_nl; 1392 sappend(s, tmp); 1393 sappend(index->s, s); 1394 1395 b = gen_proto_abbrev(proto); 1396 if (index->b) 1397 gen_and(index->b, b); 1398 index->b = b; 1399 break; 1400 1401 case Q_TCP: 1402 case Q_UDP: 1403 case Q_ICMP: 1404 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 1405 s->s.k = off_nl; 1406 sappend(s, xfer_to_a(index)); 1407 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X)); 1408 sappend(s, new_stmt(BPF_MISC|BPF_TAX)); 1409 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size)); 1410 tmp->s.k = off_nl; 1411 sappend(index->s, s); 1412 1413 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag()); 1414 if (index->b) 1415 gen_and(index->b, b); 1416 index->b = b; 1417 break; 1418 } 1419 index->regno = regno; 1420 s = new_stmt(BPF_ST); 1421 s->s.k = regno; 1422 sappend(index->s, s); 1423 1424 return index; 1425 } 1426 1427 struct block * 1428 gen_relation(code, a0, a1, reversed) 1429 int code; 1430 struct arth *a0, *a1; 1431 int reversed; 1432 { 1433 struct slist *s0, *s1, *s2; 1434 struct block *b, *tmp; 1435 1436 s0 = xfer_to_x(a1); 1437 s1 = xfer_to_a(a0); 1438 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X); 1439 b = new_block(JMP(code)); 1440 if (reversed) 1441 gen_not(b); 1442 1443 sappend(s1, s2); 1444 sappend(s0, s1); 1445 sappend(a1->s, s0); 1446 sappend(a0->s, a1->s); 1447 1448 b->stmts = a0->s; 1449 1450 free_reg(a0->regno); 1451 free_reg(a1->regno); 1452 1453 /* 'and' together protocol checks */ 1454 if (a0->b) { 1455 if (a1->b) { 1456 gen_and(a0->b, tmp = a1->b); 1457 } 1458 else 1459 tmp = a0->b; 1460 } else 1461 tmp = a1->b; 1462 1463 if (tmp) 1464 gen_and(tmp, b); 1465 1466 return b; 1467 } 1468 1469 struct arth * 1470 gen_loadlen() 1471 { 1472 int regno = alloc_reg(); 1473 struct arth *a = (struct arth *)newchunk(sizeof(*a)); 1474 struct slist *s; 1475 1476 s = new_stmt(BPF_LD|BPF_LEN); 1477 s->next = new_stmt(BPF_ST); 1478 s->next->s.k = regno; 1479 a->s = s; 1480 a->regno = regno; 1481 1482 return a; 1483 } 1484 1485 struct arth * 1486 gen_loadi(val) 1487 int val; 1488 { 1489 struct arth *a; 1490 struct slist *s; 1491 int reg; 1492 1493 a = (struct arth *)newchunk(sizeof(*a)); 1494 1495 reg = alloc_reg(); 1496 1497 s = new_stmt(BPF_LD|BPF_IMM); 1498 s->s.k = val; 1499 s->next = new_stmt(BPF_ST); 1500 s->next->s.k = reg; 1501 a->s = s; 1502 a->regno = reg; 1503 1504 return a; 1505 } 1506 1507 struct arth * 1508 gen_neg(a) 1509 struct arth *a; 1510 { 1511 struct slist *s; 1512 1513 s = xfer_to_a(a); 1514 sappend(a->s, s); 1515 s = new_stmt(BPF_ALU|BPF_NEG); 1516 s->s.k = 0; 1517 sappend(a->s, s); 1518 s = new_stmt(BPF_ST); 1519 s->s.k = a->regno; 1520 sappend(a->s, s); 1521 1522 return a; 1523 } 1524 1525 struct arth * 1526 gen_arth(code, a0, a1) 1527 int code; 1528 struct arth *a0, *a1; 1529 { 1530 struct slist *s0, *s1, *s2; 1531 1532 s0 = xfer_to_x(a1); 1533 s1 = xfer_to_a(a0); 1534 s2 = new_stmt(BPF_ALU|BPF_X|code); 1535 1536 sappend(s1, s2); 1537 sappend(s0, s1); 1538 sappend(a1->s, s0); 1539 sappend(a0->s, a1->s); 1540 1541 free_reg(a1->regno); 1542 1543 s0 = new_stmt(BPF_ST); 1544 a0->regno = s0->s.k = alloc_reg(); 1545 sappend(a0->s, s0); 1546 1547 return a0; 1548 } 1549 1550 /* 1551 * Here we handle simple allocation of the scratch registers. 1552 * If too many registers are alloc'd, the allocator punts. 1553 */ 1554 static int regused[BPF_MEMWORDS]; 1555 static int curreg; 1556 1557 /* 1558 * Return the next free register. 1559 */ 1560 static int 1561 alloc_reg() 1562 { 1563 int n = BPF_MEMWORDS; 1564 1565 while (--n >= 0) { 1566 if (regused[curreg]) 1567 curreg = (curreg + 1) % BPF_MEMWORDS; 1568 else { 1569 regused[curreg] = 1; 1570 return curreg; 1571 } 1572 } 1573 bpf_error("too many registers needed to evaluate expression"); 1574 /* NOTREACHED */ 1575 } 1576 1577 /* 1578 * Return a register to the table so it can 1579 * be used later. 1580 */ 1581 static void 1582 free_reg(n) 1583 int n; 1584 { 1585 regused[n] = 0; 1586 } 1587 1588 static struct block * 1589 gen_len(jmp, n) 1590 int jmp, n; 1591 { 1592 struct slist *s; 1593 struct block *b; 1594 1595 s = new_stmt(BPF_LD|BPF_LEN); 1596 s->next = new_stmt(BPF_ALU|BPF_SUB|BPF_K); 1597 s->next->s.k = n; 1598 b = new_block(JMP(jmp)); 1599 b->stmts = s; 1600 1601 return b; 1602 } 1603 1604 struct block * 1605 gen_greater(n) 1606 int n; 1607 { 1608 return gen_len(BPF_JGE, n); 1609 } 1610 1611 struct block * 1612 gen_less(n) 1613 int n; 1614 { 1615 struct block *b; 1616 1617 b = gen_len(BPF_JGT, n); 1618 gen_not(b); 1619 1620 return b; 1621 } 1622 1623 struct block * 1624 gen_byteop(op, idx, val) 1625 int op, idx, val; 1626 { 1627 struct block *b; 1628 struct slist *s; 1629 1630 switch (op) { 1631 default: 1632 abort(); 1633 1634 case '=': 1635 return gen_cmp((u_int)idx, BPF_B, (long)val); 1636 1637 case '<': 1638 b = gen_cmp((u_int)idx, BPF_B, (long)val); 1639 b->s.code = JMP(BPF_JGE); 1640 gen_not(b); 1641 return b; 1642 1643 case '>': 1644 b = gen_cmp((u_int)idx, BPF_B, (long)val); 1645 b->s.code = JMP(BPF_JGT); 1646 return b; 1647 1648 case '|': 1649 s = new_stmt(BPF_ALU|BPF_OR|BPF_K); 1650 break; 1651 1652 case '&': 1653 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 1654 break; 1655 } 1656 s->s.k = val; 1657 b = new_block(JMP(BPF_JEQ)); 1658 b->stmts = s; 1659 gen_not(b); 1660 1661 return b; 1662 } 1663 1664 struct block * 1665 gen_broadcast(proto) 1666 int proto; 1667 { 1668 u_long hostmask; 1669 struct block *b0, *b1, *b2; 1670 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1671 1672 switch (proto) { 1673 1674 case Q_DEFAULT: 1675 case Q_LINK: 1676 if (linktype == DLT_EN10MB) 1677 return gen_ehostop(ebroadcast, Q_DST); 1678 #ifdef FDDI 1679 if (linktype == DLT_FDDI) 1680 return gen_fhostop(ebroadcast, Q_DST); 1681 #endif 1682 bpf_error("not a broadcast link"); 1683 break; 1684 1685 case Q_IP: 1686 b0 = gen_linktype(ETHERTYPE_IP); 1687 hostmask = ~netmask; 1688 b1 = gen_mcmp(off_nl + 16, BPF_W, (long)0, hostmask); 1689 b2 = gen_mcmp(off_nl + 16, BPF_W, 1690 (long)(~0 & hostmask), hostmask); 1691 gen_or(b1, b2); 1692 gen_and(b0, b2); 1693 return b2; 1694 } 1695 bpf_error("only ether/ip broadcast filters supported"); 1696 } 1697 1698 struct block * 1699 gen_multicast(proto) 1700 int proto; 1701 { 1702 register struct block *b0, *b1; 1703 register struct slist *s; 1704 1705 switch (proto) { 1706 1707 case Q_DEFAULT: 1708 case Q_LINK: 1709 if (linktype == DLT_EN10MB) { 1710 /* ether[0] & 1 != 0 */ 1711 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1712 s->s.k = 0; 1713 b0 = new_block(JMP(BPF_JSET)); 1714 b0->s.k = 1; 1715 b0->stmts = s; 1716 return b0; 1717 } 1718 1719 if (linktype == DLT_FDDI) { 1720 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */ 1721 /* fddi[1] & 1 != 0 */ 1722 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1723 s->s.k = 1; 1724 b0 = new_block(JMP(BPF_JSET)); 1725 b0->s.k = 1; 1726 b0->stmts = s; 1727 return b0; 1728 } 1729 /* Link not known to support multicasts */ 1730 break; 1731 1732 case Q_IP: 1733 b0 = gen_linktype(ETHERTYPE_IP); 1734 b1 = gen_cmp(off_nl + 16, BPF_B, (long)224); 1735 b1->s.code = JMP(BPF_JGE); 1736 gen_and(b0, b1); 1737 return b1; 1738 } 1739 bpf_error("only IP multicast filters supported on ethernet/FDDI"); 1740 } 1741 1742 /* 1743 * generate command for inbound/outbound. It's here so we can 1744 * make it link-type specific. 'dir' = 0 implies "inbound", 1745 * = 1 implies "outbound". 1746 */ 1747 struct block * 1748 gen_inbound(dir) 1749 int dir; 1750 { 1751 register struct block *b0; 1752 1753 b0 = gen_relation(BPF_JEQ, 1754 gen_load(Q_LINK, gen_loadi(0), 1), 1755 gen_loadi(0), 1756 dir); 1757 return (b0); 1758 } 1759