1 /* $NetBSD: gencode.c,v 1.2 1995/03/06 11:38:21 mycroft Exp $ */ 2 3 /* 4 * Copyright (c) 1990, 1991, 1992, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that: (1) source code distributions 9 * retain the above copyright notice and this paragraph in its entirety, (2) 10 * distributions including binary code include the above copyright notice and 11 * this paragraph in its entirety in the documentation or other materials 12 * provided with the distribution, and (3) all advertising materials mentioning 13 * features or use of this software display the following acknowledgement: 14 * ``This product includes software developed by the University of California, 15 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 16 * the University nor the names of its contributors may be used to endorse 17 * or promote products derived from this software without specific prior 18 * written permission. 19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 20 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 22 */ 23 #ifndef lint 24 static char rcsid[] = 25 "@(#) Header: gencode.c,v 1.55 94/06/20 19:07:53 leres Exp (LBL)"; 26 #endif 27 28 #include <sys/types.h> 29 #include <sys/socket.h> 30 #include <sys/time.h> 31 32 #include <net/if.h> 33 #include <net/bpf.h> 34 35 #include <netinet/in.h> 36 #include <netinet/if_ether.h> 37 38 #include <memory.h> 39 #include <pcap.h> 40 #include <pcap-namedb.h> 41 #include <setjmp.h> 42 #if __STDC__ 43 #include <stdarg.h> 44 #include <stdlib.h> 45 #else 46 #include <varargs.h> 47 #endif 48 49 #include "gencode.h" 50 51 #ifndef __GNUC__ 52 #define inline 53 #endif 54 55 #ifndef ETHERTYPE_REVARP 56 #define ETHERTYPE_REVARP 0x8035 57 #endif 58 #ifndef ETHERTYPE_MOPDL 59 #define ETHERTYPE_MOPDL 0x6001 60 #endif 61 #ifndef ETHERTYPE_MOPRC 62 #define ETHERTYPE_MOPRC 0x6002 63 #endif 64 #ifndef ETHERTYPE_DN 65 #define ETHERTYPE_DN 0x6003 66 #endif 67 #ifndef ETHERTYPE_LAT 68 #define ETHERTYPE_LAT 0x6004 69 #endif 70 71 #define JMP(c) ((c)|BPF_JMP|BPF_K) 72 73 static jmp_buf top_ctx; 74 static pcap_t *bpf_pcap; 75 76 /* VARARGS */ 77 volatile void 78 #if __STDC__ || defined(SOLARIS) 79 bpf_error(char *fmt, ...) 80 #else 81 bpf_error(fmt, va_alist) 82 char *fmt; 83 va_dcl 84 #endif 85 { 86 va_list ap; 87 88 #if __STDC__ 89 va_start(ap, fmt); 90 #else 91 va_start(ap); 92 #endif 93 if (bpf_pcap != NULL) 94 (void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap); 95 va_end(ap); 96 longjmp(top_ctx, 1); 97 /* NOTREACHED */ 98 } 99 100 static void init_linktype(int); 101 102 static int alloc_reg(void); 103 static void free_reg(int); 104 105 static struct block *root; 106 107 /* 108 * We divy out chunks of memory rather than call malloc each time so 109 * we don't have to worry about leaking memory. It's probably 110 * not a big deal if all this memory was wasted but it this ever 111 * goes into a library that would probably not be a good idea. 112 */ 113 #define NCHUNKS 16 114 #define CHUNK0SIZE 1024 115 struct chunk { 116 u_int n_left; 117 void *m; 118 }; 119 120 static struct chunk chunks[NCHUNKS]; 121 static int cur_chunk; 122 123 static void *newchunk(u_int); 124 static void freechunks(void); 125 static inline struct block *new_block(int); 126 static inline struct slist *new_stmt(int); 127 static struct block *gen_retblk(int); 128 static inline void syntax(void); 129 130 static void backpatch(struct block *, struct block *); 131 static void merge(struct block *, struct block *); 132 static struct block *gen_cmp(u_int, u_int, long); 133 static struct block *gen_mcmp(u_int, u_int, long, u_long); 134 static struct block *gen_bcmp(u_int, u_int, u_char *); 135 static struct block *gen_uncond(int); 136 static inline struct block *gen_true(void); 137 static inline struct block *gen_false(void); 138 static struct block *gen_linktype(int); 139 static struct block *gen_hostop(u_long, u_long, int, int, u_int, u_int); 140 static struct block *gen_ehostop(u_char *, int); 141 #ifdef FDDI 142 static struct block *gen_fhostop(u_char *, int); 143 #endif 144 static struct block *gen_dnhostop(u_long, int, u_int); 145 static struct block *gen_host(u_long, u_long, int, int); 146 static struct block *gen_gateway(u_char *, u_long **, int, int); 147 static struct block *gen_ipfrag(void); 148 static struct block *gen_portatom(int, long); 149 struct block *gen_portop(int, int, int); 150 static struct block *gen_port(int, int, int); 151 static int lookup_proto(char *, int); 152 static struct block *gen_proto(int, int, int); 153 static u_long net_mask(u_long *); 154 static u_long net_mask(u_long *); 155 static struct slist *xfer_to_x(struct arth *); 156 static struct slist *xfer_to_a(struct arth *); 157 static struct block *gen_len(int, int); 158 159 static void * 160 newchunk(n) 161 u_int n; 162 { 163 struct chunk *cp; 164 int k, size; 165 166 /* XXX Round up to nearest long. */ 167 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1); 168 169 cp = &chunks[cur_chunk]; 170 if (n > cp->n_left) { 171 ++cp, k = ++cur_chunk; 172 if (k >= NCHUNKS) 173 bpf_error("out of memory"); 174 size = CHUNK0SIZE << k; 175 cp->m = (void *)malloc(size); 176 memset((char *)cp->m, 0, size); 177 cp->n_left = size; 178 if (n > size) 179 bpf_error("out of memory"); 180 } 181 cp->n_left -= n; 182 return (void *)((char *)cp->m + cp->n_left); 183 } 184 185 static void 186 freechunks() 187 { 188 int i; 189 190 for (i = 0; i < NCHUNKS; ++i) 191 if (chunks[i].m) 192 free(chunks[i].m); 193 } 194 195 /* 196 * A strdup whose allocations are freed after code generation is over. 197 */ 198 char * 199 sdup(s) 200 char *s; 201 { 202 int n = strlen(s) + 1; 203 char *cp = newchunk(n); 204 strcpy(cp, s); 205 return (cp); 206 } 207 208 static inline struct block * 209 new_block(code) 210 int code; 211 { 212 struct block *p; 213 214 p = (struct block *)newchunk(sizeof(*p)); 215 p->s.code = code; 216 p->head = p; 217 218 return p; 219 } 220 221 static inline struct slist * 222 new_stmt(code) 223 int code; 224 { 225 struct slist *p; 226 227 p = (struct slist *)newchunk(sizeof(*p)); 228 p->s.code = code; 229 230 return p; 231 } 232 233 static struct block * 234 gen_retblk(v) 235 int v; 236 { 237 struct block *b = new_block(BPF_RET|BPF_K); 238 239 b->s.k = v; 240 return b; 241 } 242 243 static inline void 244 syntax() 245 { 246 bpf_error("syntax error in filter expression"); 247 } 248 249 static u_long netmask; 250 static int snaplen; 251 252 int 253 pcap_compile(pcap_t *p, struct bpf_program *program, 254 char *buf, int optimize, u_long mask) 255 { 256 extern int n_errors; 257 int len; 258 259 bpf_pcap = p; 260 if (setjmp(top_ctx)) 261 return (-1); 262 263 netmask = mask; 264 snaplen = pcap_snapshot(p); 265 266 lex_init(buf ? buf : ""); 267 init_linktype(pcap_datalink(p)); 268 pcap_parse(); 269 270 if (n_errors) 271 syntax(); 272 273 if (root == NULL) 274 root = gen_retblk(snaplen); 275 276 if (optimize) { 277 bpf_optimize(&root); 278 if (root == NULL || 279 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0)) 280 bpf_error("expression rejects all packets"); 281 } 282 program->bf_insns = icode_to_fcode(root, &len); 283 program->bf_len = len; 284 285 freechunks(); 286 return (0); 287 } 288 289 /* 290 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates 291 * which of the jt and jf fields has been resolved and which is a pointer 292 * back to another unresolved block (or nil). At least one of the fields 293 * in each block is already resolved. 294 */ 295 static void 296 backpatch(list, target) 297 struct block *list, *target; 298 { 299 struct block *next; 300 301 while (list) { 302 if (!list->sense) { 303 next = JT(list); 304 JT(list) = target; 305 } else { 306 next = JF(list); 307 JF(list) = target; 308 } 309 list = next; 310 } 311 } 312 313 /* 314 * Merge the lists in b0 and b1, using the 'sense' field to indicate 315 * which of jt and jf is the link. 316 */ 317 static void 318 merge(b0, b1) 319 struct block *b0, *b1; 320 { 321 register struct block **p = &b0; 322 323 /* Find end of list. */ 324 while (*p) 325 p = !((*p)->sense) ? &JT(*p) : &JF(*p); 326 327 /* Concatenate the lists. */ 328 *p = b1; 329 } 330 331 void 332 finish_parse(p) 333 struct block *p; 334 { 335 backpatch(p, gen_retblk(snaplen)); 336 p->sense = !p->sense; 337 backpatch(p, gen_retblk(0)); 338 root = p->head; 339 } 340 341 void 342 gen_and(b0, b1) 343 struct block *b0, *b1; 344 { 345 backpatch(b0, b1->head); 346 b0->sense = !b0->sense; 347 b1->sense = !b1->sense; 348 merge(b1, b0); 349 b1->sense = !b1->sense; 350 b1->head = b0->head; 351 } 352 353 void 354 gen_or(b0, b1) 355 struct block *b0, *b1; 356 { 357 b0->sense = !b0->sense; 358 backpatch(b0, b1->head); 359 b0->sense = !b0->sense; 360 merge(b1, b0); 361 b1->head = b0->head; 362 } 363 364 void 365 gen_not(b) 366 struct block *b; 367 { 368 b->sense = !b->sense; 369 } 370 371 static struct block * 372 gen_cmp(offset, size, v) 373 u_int offset, size; 374 long v; 375 { 376 struct slist *s; 377 struct block *b; 378 379 s = new_stmt(BPF_LD|BPF_ABS|size); 380 s->s.k = offset; 381 382 b = new_block(JMP(BPF_JEQ)); 383 b->stmts = s; 384 b->s.k = v; 385 386 return b; 387 } 388 389 static struct block * 390 gen_mcmp(offset, size, v, mask) 391 u_int offset, size; 392 long v; 393 u_long mask; 394 { 395 struct block *b = gen_cmp(offset, size, v); 396 struct slist *s; 397 398 if (mask != 0xffffffff) { 399 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 400 s->s.k = mask; 401 b->stmts->next = s; 402 } 403 return b; 404 } 405 406 static struct block * 407 gen_bcmp(offset, size, v) 408 u_int offset, size; 409 u_char *v; 410 { 411 struct block *b, *tmp; 412 413 b = NULL; 414 while (size >= 4) { 415 u_char *p = &v[size - 4]; 416 long w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 417 tmp = gen_cmp(offset + size - 4, BPF_W, w); 418 if (b != NULL) 419 gen_and(b, tmp); 420 b = tmp; 421 size -= 4; 422 } 423 while (size >= 2) { 424 u_char *p = &v[size - 2]; 425 long w = (p[0] << 8) | p[1]; 426 tmp = gen_cmp(offset + size - 2, BPF_H, w); 427 if (b != NULL) 428 gen_and(b, tmp); 429 b = tmp; 430 size -= 2; 431 } 432 if (size > 0) { 433 tmp = gen_cmp(offset, BPF_B, (long)v[0]); 434 if (b != NULL) 435 gen_and(b, tmp); 436 b = tmp; 437 } 438 return b; 439 } 440 441 /* 442 * Various code constructs need to know the layout of the data link 443 * layer. These variables give the necessary offsets. off_linktype 444 * is set to -1 for no encapsulation, in which case, IP is assumed. 445 */ 446 static u_int off_linktype; 447 static u_int off_nl; 448 static int linktype; 449 #ifdef FDDI 450 extern int fddipad; 451 #endif 452 453 static void 454 init_linktype(type) 455 int type; 456 { 457 linktype = type; 458 459 switch (type) { 460 461 case DLT_EN10MB: 462 off_linktype = 12; 463 off_nl = 14; 464 return; 465 466 case DLT_SLIP: 467 /* 468 * SLIP doesn't have a link level type. The 16 byte 469 * header is hacked into our SLIP driver. 470 */ 471 off_linktype = -1; 472 off_nl = 16; 473 return; 474 475 case DLT_NULL: 476 off_linktype = -1; 477 off_nl = 0; 478 return; 479 480 case DLT_PPP: 481 off_linktype = 2; 482 off_nl = 4; 483 return; 484 485 #ifdef FDDI 486 case DLT_FDDI: 487 /* 488 * FDDI doesn't really have a link-level type field. 489 * We assume that SSAP = SNAP is being used and pick 490 * out the encapsulated Ethernet type. 491 */ 492 off_linktype = 19 + fddipad; 493 off_nl = 21 + fddipad; 494 return; 495 #endif 496 497 case DLT_IEEE802: 498 off_linktype = 20; 499 off_nl = 22; 500 return; 501 } 502 bpf_error("unknown data link type 0x%x", linktype); 503 /* NOTREACHED */ 504 } 505 506 static struct block * 507 gen_uncond(rsense) 508 int rsense; 509 { 510 struct block *b; 511 struct slist *s; 512 513 s = new_stmt(BPF_LD|BPF_IMM); 514 s->s.k = !rsense; 515 b = new_block(JMP(BPF_JEQ)); 516 b->stmts = s; 517 518 return b; 519 } 520 521 static inline struct block * 522 gen_true() 523 { 524 return gen_uncond(1); 525 } 526 527 static inline struct block * 528 gen_false() 529 { 530 return gen_uncond(0); 531 } 532 533 static struct block * 534 gen_linktype(proto) 535 int proto; 536 { 537 switch (linktype) { 538 case DLT_SLIP: 539 if (proto == ETHERTYPE_IP) 540 return gen_true(); 541 else 542 return gen_false(); 543 544 case DLT_PPP: 545 if (proto == ETHERTYPE_IP) 546 proto = 0x0021; /* XXX - need ppp.h defs */ 547 break; 548 } 549 return gen_cmp(off_linktype, BPF_H, (long)proto); 550 } 551 552 static struct block * 553 gen_hostop(addr, mask, dir, proto, src_off, dst_off) 554 u_long addr; 555 u_long mask; 556 int dir, proto; 557 u_int src_off, dst_off; 558 { 559 struct block *b0, *b1; 560 u_int offset; 561 562 switch (dir) { 563 564 case Q_SRC: 565 offset = src_off; 566 break; 567 568 case Q_DST: 569 offset = dst_off; 570 break; 571 572 case Q_AND: 573 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 574 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 575 gen_and(b0, b1); 576 return b1; 577 578 case Q_OR: 579 case Q_DEFAULT: 580 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 581 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 582 gen_or(b0, b1); 583 return b1; 584 585 default: 586 abort(); 587 } 588 b0 = gen_linktype(proto); 589 b1 = gen_mcmp(offset, BPF_W, (long)addr, mask); 590 gen_and(b0, b1); 591 return b1; 592 } 593 594 static struct block * 595 gen_ehostop(eaddr, dir) 596 u_char *eaddr; 597 int dir; 598 { 599 struct block *b0, *b1; 600 601 switch (dir) { 602 case Q_SRC: 603 return gen_bcmp(6, 6, eaddr); 604 605 case Q_DST: 606 return gen_bcmp(0, 6, eaddr); 607 608 case Q_AND: 609 b0 = gen_ehostop(eaddr, Q_SRC); 610 b1 = gen_ehostop(eaddr, Q_DST); 611 gen_and(b0, b1); 612 return b1; 613 614 case Q_DEFAULT: 615 case Q_OR: 616 b0 = gen_ehostop(eaddr, Q_SRC); 617 b1 = gen_ehostop(eaddr, Q_DST); 618 gen_or(b0, b1); 619 return b1; 620 } 621 abort(); 622 /* NOTREACHED */ 623 } 624 625 #ifdef FDDI 626 /* 627 * Like gen_ehostop, but for DLT_FDDI 628 */ 629 static struct block * 630 gen_fhostop(eaddr, dir) 631 u_char *eaddr; 632 int dir; 633 { 634 struct block *b0, *b1; 635 636 switch (dir) { 637 case Q_SRC: 638 return gen_bcmp(6 + 1 + fddipad, 6, eaddr); 639 640 case Q_DST: 641 return gen_bcmp(0 + 1 + fddipad, 6, eaddr); 642 643 case Q_AND: 644 b0 = gen_fhostop(eaddr, Q_SRC); 645 b1 = gen_fhostop(eaddr, Q_DST); 646 gen_and(b0, b1); 647 return b1; 648 649 case Q_DEFAULT: 650 case Q_OR: 651 b0 = gen_fhostop(eaddr, Q_SRC); 652 b1 = gen_fhostop(eaddr, Q_DST); 653 gen_or(b0, b1); 654 return b1; 655 } 656 abort(); 657 /* NOTREACHED */ 658 } 659 #endif 660 661 /* 662 * This is quite tricky because there may be pad bytes in front of the 663 * DECNET header, and then there are two possible data packet formats that 664 * carry both src and dst addresses, plus 5 packet types in a format that 665 * carries only the src node, plus 2 types that use a different format and 666 * also carry just the src node. 667 * 668 * Yuck. 669 * 670 * Instead of doing those all right, we just look for data packets with 671 * 0 or 1 bytes of padding. If you want to look at other packets, that 672 * will require a lot more hacking. 673 * 674 * To add support for filtering on DECNET "areas" (network numbers) 675 * one would want to add a "mask" argument to this routine. That would 676 * make the filter even more inefficient, although one could be clever 677 * and not generate masking instructions if the mask is 0xFFFF. 678 */ 679 static struct block * 680 gen_dnhostop(addr, dir, base_off) 681 u_long addr; 682 int dir; 683 u_int base_off; 684 { 685 struct block *b0, *b1, *b2, *tmp; 686 u_int offset_lh; /* offset if long header is received */ 687 u_int offset_sh; /* offset if short header is received */ 688 689 switch (dir) { 690 691 case Q_DST: 692 offset_sh = 1; /* follows flags */ 693 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */ 694 break; 695 696 case Q_SRC: 697 offset_sh = 3; /* follows flags, dstnode */ 698 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */ 699 break; 700 701 case Q_AND: 702 /* Inefficient because we do our Calvinball dance twice */ 703 b0 = gen_dnhostop(addr, Q_SRC, base_off); 704 b1 = gen_dnhostop(addr, Q_DST, base_off); 705 gen_and(b0, b1); 706 return b1; 707 708 case Q_OR: 709 case Q_DEFAULT: 710 /* Inefficient because we do our Calvinball dance twice */ 711 b0 = gen_dnhostop(addr, Q_SRC, base_off); 712 b1 = gen_dnhostop(addr, Q_DST, base_off); 713 gen_or(b0, b1); 714 return b1; 715 716 default: 717 abort(); 718 } 719 b0 = gen_linktype(ETHERTYPE_DN); 720 /* Check for pad = 1, long header case */ 721 tmp = gen_mcmp(base_off + 2, BPF_H, 722 (long)ntohs(0x0681), (long)ntohs(0x07FF)); 723 b1 = gen_cmp(base_off + 2 + 1 + offset_lh, BPF_H, (long)ntohs(addr)); 724 gen_and(tmp, b1); 725 /* Check for pad = 0, long header case */ 726 tmp = gen_mcmp(base_off + 2, BPF_B, (long)0x06, (long)0x7); 727 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (long)ntohs(addr)); 728 gen_and(tmp, b2); 729 gen_or(b2, b1); 730 /* Check for pad = 1, short header case */ 731 tmp = gen_mcmp(base_off + 2, BPF_H, 732 (long)ntohs(0x0281), (long)ntohs(0x07FF)); 733 b2 = gen_cmp(base_off + 2 + 1 + offset_sh, BPF_H, (long)ntohs(addr)); 734 gen_and(tmp, b2); 735 gen_or(b2, b1); 736 /* Check for pad = 0, short header case */ 737 tmp = gen_mcmp(base_off + 2, BPF_B, (long)0x02, (long)0x7); 738 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (long)ntohs(addr)); 739 gen_and(tmp, b2); 740 gen_or(b2, b1); 741 742 /* Combine with test for linktype */ 743 gen_and(b0, b1); 744 return b1; 745 } 746 747 static struct block * 748 gen_host(addr, mask, proto, dir) 749 u_long addr; 750 u_long mask; 751 int proto; 752 int dir; 753 { 754 struct block *b0, *b1; 755 756 switch (proto) { 757 758 case Q_DEFAULT: 759 b0 = gen_host(addr, mask, Q_IP, dir); 760 b1 = gen_host(addr, mask, Q_ARP, dir); 761 gen_or(b0, b1); 762 b0 = gen_host(addr, mask, Q_RARP, dir); 763 gen_or(b1, b0); 764 return b0; 765 766 case Q_IP: 767 return gen_hostop(addr, mask, dir, ETHERTYPE_IP, 768 off_nl + 12, off_nl + 16); 769 770 case Q_RARP: 771 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP, 772 off_nl + 14, off_nl + 24); 773 774 case Q_ARP: 775 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP, 776 off_nl + 14, off_nl + 24); 777 778 case Q_TCP: 779 bpf_error("'tcp' modifier applied to host"); 780 781 case Q_UDP: 782 bpf_error("'udp' modifier applied to host"); 783 784 case Q_ICMP: 785 bpf_error("'icmp' modifier applied to host"); 786 787 case Q_DECNET: 788 return gen_dnhostop(addr, dir, off_nl); 789 790 case Q_LAT: 791 bpf_error("LAT host filtering not implemented"); 792 793 case Q_MOPDL: 794 bpf_error("MOPDL host filtering not implemented"); 795 796 case Q_MOPRC: 797 bpf_error("MOPRC host filtering not implemented"); 798 799 default: 800 abort(); 801 } 802 /* NOTREACHED */ 803 } 804 805 static struct block * 806 gen_gateway(eaddr, alist, proto, dir) 807 u_char *eaddr; 808 u_long **alist; 809 int proto; 810 int dir; 811 { 812 struct block *b0, *b1, *tmp; 813 814 if (dir != 0) 815 bpf_error("direction applied to 'gateway'"); 816 817 switch (proto) { 818 case Q_DEFAULT: 819 case Q_IP: 820 case Q_ARP: 821 case Q_RARP: 822 if (linktype == DLT_EN10MB) 823 b0 = gen_ehostop(eaddr, Q_OR); 824 #ifdef FDDI 825 else if (linktype == DLT_FDDI) 826 b0 = gen_fhostop(eaddr, Q_OR); 827 #endif 828 else 829 bpf_error("'gateway' supported only on ethernet or FDDI"); 830 831 b1 = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 832 while (*alist) { 833 tmp = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 834 gen_or(b1, tmp); 835 b1 = tmp; 836 } 837 gen_not(b1); 838 gen_and(b0, b1); 839 return b1; 840 } 841 bpf_error("illegal modifier of 'gateway'"); 842 /* NOTREACHED */ 843 } 844 845 struct block * 846 gen_proto_abbrev(proto) 847 int proto; 848 { 849 struct block *b0, *b1; 850 851 switch (proto) { 852 853 case Q_TCP: 854 b0 = gen_linktype(ETHERTYPE_IP); 855 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_TCP); 856 gen_and(b0, b1); 857 break; 858 859 case Q_UDP: 860 b0 = gen_linktype(ETHERTYPE_IP); 861 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_UDP); 862 gen_and(b0, b1); 863 break; 864 865 case Q_ICMP: 866 b0 = gen_linktype(ETHERTYPE_IP); 867 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_ICMP); 868 gen_and(b0, b1); 869 break; 870 871 case Q_IP: 872 b1 = gen_linktype(ETHERTYPE_IP); 873 break; 874 875 case Q_ARP: 876 b1 = gen_linktype(ETHERTYPE_ARP); 877 break; 878 879 case Q_RARP: 880 b1 = gen_linktype(ETHERTYPE_REVARP); 881 break; 882 883 case Q_LINK: 884 bpf_error("link layer applied in wrong context"); 885 886 case Q_DECNET: 887 b1 = gen_linktype(ETHERTYPE_DN); 888 break; 889 890 case Q_LAT: 891 b1 = gen_linktype(ETHERTYPE_LAT); 892 break; 893 894 case Q_MOPDL: 895 b1 = gen_linktype(ETHERTYPE_MOPDL); 896 break; 897 898 case Q_MOPRC: 899 b1 = gen_linktype(ETHERTYPE_MOPRC); 900 break; 901 902 default: 903 abort(); 904 } 905 return b1; 906 } 907 908 static struct block * 909 gen_ipfrag() 910 { 911 struct slist *s; 912 struct block *b; 913 914 /* not ip frag */ 915 s = new_stmt(BPF_LD|BPF_H|BPF_ABS); 916 s->s.k = off_nl + 6; 917 b = new_block(JMP(BPF_JSET)); 918 b->s.k = 0x1fff; 919 b->stmts = s; 920 gen_not(b); 921 922 return b; 923 } 924 925 static struct block * 926 gen_portatom(off, v) 927 int off; 928 long v; 929 { 930 struct slist *s; 931 struct block *b; 932 933 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 934 s->s.k = off_nl; 935 936 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H); 937 s->next->s.k = off_nl + off; 938 939 b = new_block(JMP(BPF_JEQ)); 940 b->stmts = s; 941 b->s.k = v; 942 943 return b; 944 } 945 946 struct block * 947 gen_portop(port, proto, dir) 948 int port, proto, dir; 949 { 950 struct block *b0, *b1, *tmp; 951 952 /* ip proto 'proto' */ 953 tmp = gen_cmp(off_nl + 9, BPF_B, (long)proto); 954 b0 = gen_ipfrag(); 955 gen_and(tmp, b0); 956 957 switch (dir) { 958 case Q_SRC: 959 b1 = gen_portatom(0, (long)port); 960 break; 961 962 case Q_DST: 963 b1 = gen_portatom(2, (long)port); 964 break; 965 966 case Q_OR: 967 case Q_DEFAULT: 968 tmp = gen_portatom(0, (long)port); 969 b1 = gen_portatom(2, (long)port); 970 gen_or(tmp, b1); 971 break; 972 973 case Q_AND: 974 tmp = gen_portatom(0, (long)port); 975 b1 = gen_portatom(2, (long)port); 976 gen_and(tmp, b1); 977 break; 978 979 default: 980 abort(); 981 } 982 gen_and(b0, b1); 983 984 return b1; 985 } 986 987 static struct block * 988 gen_port(port, ip_proto, dir) 989 int port; 990 int ip_proto; 991 int dir; 992 { 993 struct block *b0, *b1, *tmp; 994 995 /* ether proto ip */ 996 b0 = gen_linktype(ETHERTYPE_IP); 997 998 switch (ip_proto) { 999 case IPPROTO_UDP: 1000 case IPPROTO_TCP: 1001 b1 = gen_portop(port, ip_proto, dir); 1002 break; 1003 1004 case PROTO_UNDEF: 1005 tmp = gen_portop(port, IPPROTO_TCP, dir); 1006 b1 = gen_portop(port, IPPROTO_UDP, dir); 1007 gen_or(tmp, b1); 1008 break; 1009 1010 default: 1011 abort(); 1012 } 1013 gen_and(b0, b1); 1014 return b1; 1015 } 1016 1017 static int 1018 lookup_proto(name, proto) 1019 char *name; 1020 int proto; 1021 { 1022 int v; 1023 1024 switch (proto) { 1025 case Q_DEFAULT: 1026 case Q_IP: 1027 v = pcap_nametoproto(name); 1028 if (v == PROTO_UNDEF) 1029 bpf_error("unknown ip proto '%s'", name); 1030 break; 1031 1032 case Q_LINK: 1033 /* XXX should look up h/w protocol type based on linktype */ 1034 v = pcap_nametoeproto(name); 1035 if (v == PROTO_UNDEF) 1036 bpf_error("unknown ether proto '%s'", name); 1037 break; 1038 1039 default: 1040 v = PROTO_UNDEF; 1041 break; 1042 } 1043 return v; 1044 } 1045 1046 static struct block * 1047 gen_proto(v, proto, dir) 1048 int v; 1049 int proto; 1050 int dir; 1051 { 1052 struct block *b0, *b1; 1053 1054 if (dir != Q_DEFAULT) 1055 bpf_error("direction applied to 'proto'"); 1056 1057 switch (proto) { 1058 case Q_DEFAULT: 1059 case Q_IP: 1060 b0 = gen_linktype(ETHERTYPE_IP); 1061 b1 = gen_cmp(off_nl + 9, BPF_B, (long)v); 1062 gen_and(b0, b1); 1063 return b1; 1064 1065 case Q_ARP: 1066 bpf_error("arp does not encapsulate another protocol"); 1067 /* NOTREACHED */ 1068 1069 case Q_RARP: 1070 bpf_error("rarp does not encapsulate another protocol"); 1071 /* NOTREACHED */ 1072 1073 case Q_DECNET: 1074 bpf_error("decnet encapsulation is not specifiable"); 1075 /* NOTREACHED */ 1076 1077 case Q_LAT: 1078 bpf_error("lat does not encapsulate another protocol"); 1079 /* NOTREACHED */ 1080 1081 case Q_MOPRC: 1082 bpf_error("moprc does not encapsulate another protocol"); 1083 /* NOTREACHED */ 1084 1085 case Q_MOPDL: 1086 bpf_error("mopdl does not encapsulate another protocol"); 1087 /* NOTREACHED */ 1088 1089 case Q_LINK: 1090 return gen_linktype(v); 1091 1092 case Q_UDP: 1093 bpf_error("'udp proto' is bogus"); 1094 /* NOTREACHED */ 1095 1096 case Q_TCP: 1097 bpf_error("'tcp proto' is bogus"); 1098 /* NOTREACHED */ 1099 1100 case Q_ICMP: 1101 bpf_error("'icmp proto' is bogus"); 1102 /* NOTREACHED */ 1103 1104 default: 1105 abort(); 1106 /* NOTREACHED */ 1107 } 1108 /* NOTREACHED */ 1109 } 1110 1111 /* 1112 * Left justify 'addr' and return its resulting network mask. 1113 */ 1114 static u_long 1115 net_mask(addr) 1116 u_long *addr; 1117 { 1118 register u_long m = 0xffffffff; 1119 1120 if (*addr) 1121 while ((*addr & 0xff000000) == 0) 1122 *addr <<= 8, m <<= 8; 1123 1124 return m; 1125 } 1126 1127 struct block * 1128 gen_scode(name, q) 1129 char *name; 1130 struct qual q; 1131 { 1132 int proto = q.proto; 1133 int dir = q.dir; 1134 u_char *eaddr; 1135 u_long mask, addr, **alist; 1136 struct block *b, *tmp; 1137 int port, real_proto; 1138 1139 switch (q.addr) { 1140 1141 case Q_NET: 1142 addr = pcap_nametonetaddr(name); 1143 if (addr == 0) 1144 bpf_error("unknown network '%s'", name); 1145 mask = net_mask(&addr); 1146 return gen_host(addr, mask, proto, dir); 1147 1148 case Q_DEFAULT: 1149 case Q_HOST: 1150 if (proto == Q_LINK) { 1151 switch (linktype) { 1152 case DLT_EN10MB: 1153 eaddr = pcap_ether_hostton(name); 1154 if (eaddr == NULL) 1155 bpf_error("unknown ether host '%s'", name); 1156 return gen_ehostop(eaddr, dir); 1157 1158 #ifdef FDDI 1159 case DLT_FDDI: 1160 eaddr = pcap_ether_hostton(name); 1161 if (eaddr == NULL) 1162 bpf_error("unknown FDDI host '%s'", name); 1163 return gen_fhostop(eaddr, dir); 1164 #endif 1165 default: 1166 bpf_error("only ethernet/FDDI supports link-level host name"); 1167 break; 1168 } 1169 } else if (proto == Q_DECNET) { 1170 unsigned short dn_addr = __pcap_nametodnaddr(name); 1171 /* 1172 * I don't think DECNET hosts can be multihomed, so 1173 * there is no need to build up a list of addresses 1174 */ 1175 return (gen_host(dn_addr, 0, proto, dir)); 1176 } else { 1177 alist = pcap_nametoaddr(name); 1178 if (alist == NULL || *alist == NULL) 1179 bpf_error("unknown host '%s'", name); 1180 b = gen_host(**alist++, 0xffffffffL, proto, dir); 1181 while (*alist) { 1182 tmp = gen_host(**alist++, 0xffffffffL, 1183 proto, dir); 1184 gen_or(b, tmp); 1185 b = tmp; 1186 } 1187 return b; 1188 } 1189 1190 case Q_PORT: 1191 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP) 1192 bpf_error("illegal qualifier of 'port'"); 1193 if (pcap_nametoport(name, &port, &real_proto) == 0) 1194 bpf_error("unknown port '%s'", name); 1195 if (proto == Q_UDP) { 1196 if (real_proto == IPPROTO_TCP) 1197 bpf_error("port '%s' is tcp", name); 1198 else 1199 /* override PROTO_UNDEF */ 1200 real_proto = IPPROTO_UDP; 1201 } 1202 if (proto == Q_TCP) { 1203 if (real_proto == IPPROTO_UDP) 1204 bpf_error("port '%s' is udp", name); 1205 else 1206 /* override PROTO_UNDEF */ 1207 real_proto = IPPROTO_TCP; 1208 } 1209 return gen_port(port, real_proto, dir); 1210 1211 case Q_GATEWAY: 1212 eaddr = pcap_ether_hostton(name); 1213 if (eaddr == NULL) 1214 bpf_error("unknown ether host: %s", name); 1215 1216 alist = pcap_nametoaddr(name); 1217 if (alist == NULL || *alist == NULL) 1218 bpf_error("unknown host '%s'", name); 1219 return gen_gateway(eaddr, alist, proto, dir); 1220 1221 case Q_PROTO: 1222 real_proto = lookup_proto(name, proto); 1223 if (real_proto >= 0) 1224 return gen_proto(real_proto, proto, dir); 1225 else 1226 bpf_error("unknown protocol: %s", name); 1227 1228 case Q_UNDEF: 1229 syntax(); 1230 /* NOTREACHED */ 1231 } 1232 abort(); 1233 /* NOTREACHED */ 1234 } 1235 1236 struct block * 1237 gen_ncode(v, q) 1238 u_long v; 1239 struct qual q; 1240 { 1241 u_long mask; 1242 int proto = q.proto; 1243 int dir = q.dir; 1244 1245 switch (q.addr) { 1246 1247 case Q_DEFAULT: 1248 case Q_HOST: 1249 case Q_NET: 1250 if (proto == Q_DECNET) 1251 return gen_host(v, 0, proto, dir); 1252 else if (proto == Q_LINK) { 1253 bpf_error("illegal link layer address"); 1254 } else { 1255 mask = net_mask(&v); 1256 return gen_host(v, mask, proto, dir); 1257 } 1258 1259 case Q_PORT: 1260 if (proto == Q_UDP) 1261 proto = IPPROTO_UDP; 1262 else if (proto == Q_TCP) 1263 proto = IPPROTO_TCP; 1264 else if (proto == Q_DEFAULT) 1265 proto = PROTO_UNDEF; 1266 else 1267 bpf_error("illegal qualifier of 'port'"); 1268 1269 return gen_port((int)v, proto, dir); 1270 1271 case Q_GATEWAY: 1272 bpf_error("'gateway' requires a name"); 1273 /* NOTREACHED */ 1274 1275 case Q_PROTO: 1276 return gen_proto((int)v, proto, dir); 1277 1278 case Q_UNDEF: 1279 syntax(); 1280 /* NOTREACHED */ 1281 1282 default: 1283 abort(); 1284 /* NOTREACHED */ 1285 } 1286 /* NOTREACHED */ 1287 } 1288 1289 struct block * 1290 gen_ecode(eaddr, q) 1291 u_char *eaddr; 1292 struct qual q; 1293 { 1294 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) { 1295 if (linktype == DLT_EN10MB) 1296 return gen_ehostop(eaddr, (int)q.dir); 1297 #ifdef FDDI 1298 if (linktype == DLT_FDDI) 1299 return gen_fhostop(eaddr, (int)q.dir); 1300 #endif 1301 } 1302 bpf_error("ethernet address used in non-ether expression"); 1303 /* NOTREACHED */ 1304 } 1305 1306 void 1307 sappend(s0, s1) 1308 struct slist *s0, *s1; 1309 { 1310 /* 1311 * This is definitely not the best way to do this, but the 1312 * lists will rarely get long. 1313 */ 1314 while (s0->next) 1315 s0 = s0->next; 1316 s0->next = s1; 1317 } 1318 1319 static struct slist * 1320 xfer_to_x(a) 1321 struct arth *a; 1322 { 1323 struct slist *s; 1324 1325 s = new_stmt(BPF_LDX|BPF_MEM); 1326 s->s.k = a->regno; 1327 return s; 1328 } 1329 1330 static struct slist * 1331 xfer_to_a(a) 1332 struct arth *a; 1333 { 1334 struct slist *s; 1335 1336 s = new_stmt(BPF_LD|BPF_MEM); 1337 s->s.k = a->regno; 1338 return s; 1339 } 1340 1341 struct arth * 1342 gen_load(proto, index, size) 1343 int proto; 1344 struct arth *index; 1345 int size; 1346 { 1347 struct slist *s, *tmp; 1348 struct block *b; 1349 int regno = alloc_reg(); 1350 1351 free_reg(index->regno); 1352 switch (size) { 1353 1354 default: 1355 bpf_error("data size must be 1, 2, or 4"); 1356 1357 case 1: 1358 size = BPF_B; 1359 break; 1360 1361 case 2: 1362 size = BPF_H; 1363 break; 1364 1365 case 4: 1366 size = BPF_W; 1367 break; 1368 } 1369 switch (proto) { 1370 default: 1371 bpf_error("unsupported index operation"); 1372 1373 case Q_LINK: 1374 s = xfer_to_x(index); 1375 tmp = new_stmt(BPF_LD|BPF_IND|size); 1376 sappend(s, tmp); 1377 sappend(index->s, s); 1378 break; 1379 1380 case Q_IP: 1381 case Q_ARP: 1382 case Q_RARP: 1383 case Q_DECNET: 1384 case Q_LAT: 1385 case Q_MOPRC: 1386 case Q_MOPDL: 1387 /* XXX Note that we assume a fixed link link header here. */ 1388 s = xfer_to_x(index); 1389 tmp = new_stmt(BPF_LD|BPF_IND|size); 1390 tmp->s.k = off_nl; 1391 sappend(s, tmp); 1392 sappend(index->s, s); 1393 1394 b = gen_proto_abbrev(proto); 1395 if (index->b) 1396 gen_and(index->b, b); 1397 index->b = b; 1398 break; 1399 1400 case Q_TCP: 1401 case Q_UDP: 1402 case Q_ICMP: 1403 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 1404 s->s.k = off_nl; 1405 sappend(s, xfer_to_a(index)); 1406 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X)); 1407 sappend(s, new_stmt(BPF_MISC|BPF_TAX)); 1408 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size)); 1409 tmp->s.k = off_nl; 1410 sappend(index->s, s); 1411 1412 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag()); 1413 if (index->b) 1414 gen_and(index->b, b); 1415 index->b = b; 1416 break; 1417 } 1418 index->regno = regno; 1419 s = new_stmt(BPF_ST); 1420 s->s.k = regno; 1421 sappend(index->s, s); 1422 1423 return index; 1424 } 1425 1426 struct block * 1427 gen_relation(code, a0, a1, reversed) 1428 int code; 1429 struct arth *a0, *a1; 1430 int reversed; 1431 { 1432 struct slist *s0, *s1, *s2; 1433 struct block *b, *tmp; 1434 1435 s0 = xfer_to_x(a1); 1436 s1 = xfer_to_a(a0); 1437 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X); 1438 b = new_block(JMP(code)); 1439 if (reversed) 1440 gen_not(b); 1441 1442 sappend(s1, s2); 1443 sappend(s0, s1); 1444 sappend(a1->s, s0); 1445 sappend(a0->s, a1->s); 1446 1447 b->stmts = a0->s; 1448 1449 free_reg(a0->regno); 1450 free_reg(a1->regno); 1451 1452 /* 'and' together protocol checks */ 1453 if (a0->b) { 1454 if (a1->b) { 1455 gen_and(a0->b, tmp = a1->b); 1456 } 1457 else 1458 tmp = a0->b; 1459 } else 1460 tmp = a1->b; 1461 1462 if (tmp) 1463 gen_and(tmp, b); 1464 1465 return b; 1466 } 1467 1468 struct arth * 1469 gen_loadlen() 1470 { 1471 int regno = alloc_reg(); 1472 struct arth *a = (struct arth *)newchunk(sizeof(*a)); 1473 struct slist *s; 1474 1475 s = new_stmt(BPF_LD|BPF_LEN); 1476 s->next = new_stmt(BPF_ST); 1477 s->next->s.k = regno; 1478 a->s = s; 1479 a->regno = regno; 1480 1481 return a; 1482 } 1483 1484 struct arth * 1485 gen_loadi(val) 1486 int val; 1487 { 1488 struct arth *a; 1489 struct slist *s; 1490 int reg; 1491 1492 a = (struct arth *)newchunk(sizeof(*a)); 1493 1494 reg = alloc_reg(); 1495 1496 s = new_stmt(BPF_LD|BPF_IMM); 1497 s->s.k = val; 1498 s->next = new_stmt(BPF_ST); 1499 s->next->s.k = reg; 1500 a->s = s; 1501 a->regno = reg; 1502 1503 return a; 1504 } 1505 1506 struct arth * 1507 gen_neg(a) 1508 struct arth *a; 1509 { 1510 struct slist *s; 1511 1512 s = xfer_to_a(a); 1513 sappend(a->s, s); 1514 s = new_stmt(BPF_ALU|BPF_NEG); 1515 s->s.k = 0; 1516 sappend(a->s, s); 1517 s = new_stmt(BPF_ST); 1518 s->s.k = a->regno; 1519 sappend(a->s, s); 1520 1521 return a; 1522 } 1523 1524 struct arth * 1525 gen_arth(code, a0, a1) 1526 int code; 1527 struct arth *a0, *a1; 1528 { 1529 struct slist *s0, *s1, *s2; 1530 1531 s0 = xfer_to_x(a1); 1532 s1 = xfer_to_a(a0); 1533 s2 = new_stmt(BPF_ALU|BPF_X|code); 1534 1535 sappend(s1, s2); 1536 sappend(s0, s1); 1537 sappend(a1->s, s0); 1538 sappend(a0->s, a1->s); 1539 1540 free_reg(a1->regno); 1541 1542 s0 = new_stmt(BPF_ST); 1543 a0->regno = s0->s.k = alloc_reg(); 1544 sappend(a0->s, s0); 1545 1546 return a0; 1547 } 1548 1549 /* 1550 * Here we handle simple allocation of the scratch registers. 1551 * If too many registers are alloc'd, the allocator punts. 1552 */ 1553 static int regused[BPF_MEMWORDS]; 1554 static int curreg; 1555 1556 /* 1557 * Return the next free register. 1558 */ 1559 static int 1560 alloc_reg() 1561 { 1562 int n = BPF_MEMWORDS; 1563 1564 while (--n >= 0) { 1565 if (regused[curreg]) 1566 curreg = (curreg + 1) % BPF_MEMWORDS; 1567 else { 1568 regused[curreg] = 1; 1569 return curreg; 1570 } 1571 } 1572 bpf_error("too many registers needed to evaluate expression"); 1573 /* NOTREACHED */ 1574 } 1575 1576 /* 1577 * Return a register to the table so it can 1578 * be used later. 1579 */ 1580 static void 1581 free_reg(n) 1582 int n; 1583 { 1584 regused[n] = 0; 1585 } 1586 1587 static struct block * 1588 gen_len(jmp, n) 1589 int jmp, n; 1590 { 1591 struct slist *s; 1592 struct block *b; 1593 1594 s = new_stmt(BPF_LD|BPF_LEN); 1595 s->next = new_stmt(BPF_ALU|BPF_SUB|BPF_K); 1596 s->next->s.k = n; 1597 b = new_block(JMP(jmp)); 1598 b->stmts = s; 1599 1600 return b; 1601 } 1602 1603 struct block * 1604 gen_greater(n) 1605 int n; 1606 { 1607 return gen_len(BPF_JGE, n); 1608 } 1609 1610 struct block * 1611 gen_less(n) 1612 int n; 1613 { 1614 struct block *b; 1615 1616 b = gen_len(BPF_JGT, n); 1617 gen_not(b); 1618 1619 return b; 1620 } 1621 1622 struct block * 1623 gen_byteop(op, idx, val) 1624 int op, idx, val; 1625 { 1626 struct block *b; 1627 struct slist *s; 1628 1629 switch (op) { 1630 default: 1631 abort(); 1632 1633 case '=': 1634 return gen_cmp((u_int)idx, BPF_B, (long)val); 1635 1636 case '<': 1637 b = gen_cmp((u_int)idx, BPF_B, (long)val); 1638 b->s.code = JMP(BPF_JGE); 1639 gen_not(b); 1640 return b; 1641 1642 case '>': 1643 b = gen_cmp((u_int)idx, BPF_B, (long)val); 1644 b->s.code = JMP(BPF_JGT); 1645 return b; 1646 1647 case '|': 1648 s = new_stmt(BPF_ALU|BPF_OR|BPF_K); 1649 break; 1650 1651 case '&': 1652 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 1653 break; 1654 } 1655 s->s.k = val; 1656 b = new_block(JMP(BPF_JEQ)); 1657 b->stmts = s; 1658 gen_not(b); 1659 1660 return b; 1661 } 1662 1663 struct block * 1664 gen_broadcast(proto) 1665 int proto; 1666 { 1667 u_long hostmask; 1668 struct block *b0, *b1, *b2; 1669 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1670 1671 switch (proto) { 1672 1673 case Q_DEFAULT: 1674 case Q_LINK: 1675 if (linktype == DLT_EN10MB) 1676 return gen_ehostop(ebroadcast, Q_DST); 1677 #ifdef FDDI 1678 if (linktype == DLT_FDDI) 1679 return gen_fhostop(ebroadcast, Q_DST); 1680 #endif 1681 bpf_error("not a broadcast link"); 1682 break; 1683 1684 case Q_IP: 1685 b0 = gen_linktype(ETHERTYPE_IP); 1686 hostmask = ~netmask; 1687 b1 = gen_mcmp(off_nl + 16, BPF_W, (long)0, hostmask); 1688 b2 = gen_mcmp(off_nl + 16, BPF_W, 1689 (long)(~0 & hostmask), hostmask); 1690 gen_or(b1, b2); 1691 gen_and(b0, b2); 1692 return b2; 1693 } 1694 bpf_error("only ether/ip broadcast filters supported"); 1695 } 1696 1697 struct block * 1698 gen_multicast(proto) 1699 int proto; 1700 { 1701 register struct block *b0, *b1; 1702 register struct slist *s; 1703 1704 switch (proto) { 1705 1706 case Q_DEFAULT: 1707 case Q_LINK: 1708 if (linktype == DLT_EN10MB) { 1709 /* ether[0] & 1 != 0 */ 1710 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1711 s->s.k = 0; 1712 b0 = new_block(JMP(BPF_JSET)); 1713 b0->s.k = 1; 1714 b0->stmts = s; 1715 return b0; 1716 } 1717 1718 if (linktype == DLT_FDDI) { 1719 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */ 1720 /* fddi[1] & 1 != 0 */ 1721 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1722 s->s.k = 1; 1723 b0 = new_block(JMP(BPF_JSET)); 1724 b0->s.k = 1; 1725 b0->stmts = s; 1726 return b0; 1727 } 1728 /* Link not known to support multicasts */ 1729 break; 1730 1731 case Q_IP: 1732 b0 = gen_linktype(ETHERTYPE_IP); 1733 b1 = gen_cmp(off_nl + 16, BPF_B, (long)224); 1734 b1->s.code = JMP(BPF_JGE); 1735 gen_and(b0, b1); 1736 return b1; 1737 } 1738 bpf_error("only IP multicast filters supported on ethernet/FDDI"); 1739 } 1740 1741 /* 1742 * generate command for inbound/outbound. It's here so we can 1743 * make it link-type specific. 'dir' = 0 implies "inbound", 1744 * = 1 implies "outbound". 1745 */ 1746 struct block * 1747 gen_inbound(dir) 1748 int dir; 1749 { 1750 register struct block *b0; 1751 1752 b0 = gen_relation(BPF_JEQ, 1753 gen_load(Q_LINK, gen_loadi(0), 1), 1754 gen_loadi(0), 1755 dir); 1756 return (b0); 1757 } 1758