1 /* $OpenBSD: gencode.c,v 1.8 1998/06/11 00:01:18 provos Exp $ */ 2 3 /* 4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that: (1) source code distributions 9 * retain the above copyright notice and this paragraph in its entirety, (2) 10 * distributions including binary code include the above copyright notice and 11 * this paragraph in its entirety in the documentation or other materials 12 * provided with the distribution, and (3) all advertising materials mentioning 13 * features or use of this software display the following acknowledgement: 14 * ``This product includes software developed by the University of California, 15 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 16 * the University nor the names of its contributors may be used to endorse 17 * or promote products derived from this software without specific prior 18 * written permission. 19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 20 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 22 */ 23 #ifndef lint 24 static char rcsid[] = 25 "@(#) Header: gencode.c,v 1.81 96/06/19 23:09:09 leres Exp (LBL)"; 26 #endif 27 28 #include <sys/types.h> 29 #include <sys/socket.h> 30 #include <sys/time.h> 31 32 #ifdef __STDC__ 33 struct mbuf; 34 struct rtentry; 35 #endif 36 37 #include <net/if.h> 38 #include <net/bpf.h> 39 40 #include <netinet/in.h> 41 #include <netinet/if_ether.h> 42 43 #include <stdlib.h> 44 #include <memory.h> 45 #include <pcap.h> 46 #include <pcap-namedb.h> 47 #include <setjmp.h> 48 #ifdef __STDC__ 49 #include <stdarg.h> 50 #else 51 #include <varargs.h> 52 #endif 53 54 #ifdef HAVE_OS_PROTO_H 55 #include "os-proto.h" 56 #endif 57 58 #include "pcap-int.h" 59 60 #include "gencode.h" 61 62 #ifndef ETHERTYPE_REVARP 63 #define ETHERTYPE_REVARP 0x8035 64 #endif 65 #ifndef ETHERTYPE_MOPDL 66 #define ETHERTYPE_MOPDL 0x6001 67 #endif 68 #ifndef ETHERTYPE_MOPRC 69 #define ETHERTYPE_MOPRC 0x6002 70 #endif 71 #ifndef ETHERTYPE_DN 72 #define ETHERTYPE_DN 0x6003 73 #endif 74 #ifndef ETHERTYPE_LAT 75 #define ETHERTYPE_LAT 0x6004 76 #endif 77 78 #define JMP(c) ((c)|BPF_JMP|BPF_K) 79 80 /* Locals */ 81 static jmp_buf top_ctx; 82 static pcap_t *bpf_pcap; 83 84 /* XXX */ 85 #ifdef PCAP_FDDIPAD 86 int pcap_fddipad = PCAP_FDDIPAD; 87 #else 88 int pcap_fddipad; 89 #endif 90 91 /* VARARGS */ 92 __dead void 93 #ifdef __STDC__ 94 bpf_error(const char *fmt, ...) 95 #else 96 bpf_error(fmt, va_alist) 97 const char *fmt; 98 va_dcl 99 #endif 100 { 101 va_list ap; 102 103 #ifdef __STDC__ 104 va_start(ap, fmt); 105 #else 106 va_start(ap); 107 #endif 108 if (bpf_pcap != NULL) 109 (void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap); 110 va_end(ap); 111 longjmp(top_ctx, 1); 112 /* NOTREACHED */ 113 } 114 115 static void init_linktype(int); 116 117 static int alloc_reg(void); 118 static void free_reg(int); 119 120 static struct block *root; 121 122 /* 123 * We divy out chunks of memory rather than call malloc each time so 124 * we don't have to worry about leaking memory. It's probably 125 * not a big deal if all this memory was wasted but it this ever 126 * goes into a library that would probably not be a good idea. 127 */ 128 #define NCHUNKS 16 129 #define CHUNK0SIZE 1024 130 struct chunk { 131 u_int n_left; 132 void *m; 133 }; 134 135 static struct chunk chunks[NCHUNKS]; 136 static int cur_chunk; 137 138 static void *newchunk(u_int); 139 static void freechunks(void); 140 static __inline struct block *new_block(int); 141 static __inline struct slist *new_stmt(int); 142 static struct block *gen_retblk(int); 143 static __inline void syntax(void); 144 145 static void backpatch(struct block *, struct block *); 146 static void merge(struct block *, struct block *); 147 static struct block *gen_cmp(u_int, u_int, bpf_int32); 148 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32); 149 static struct block *gen_bcmp(u_int, u_int, u_char *); 150 static struct block *gen_uncond(int); 151 static __inline struct block *gen_true(void); 152 static __inline struct block *gen_false(void); 153 static struct block *gen_linktype(int); 154 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int); 155 static struct block *gen_ehostop(u_char *, int); 156 static struct block *gen_fhostop(u_char *, int); 157 static struct block *gen_dnhostop(bpf_u_int32, int, u_int); 158 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int); 159 static struct block *gen_gateway(u_char *, bpf_u_int32 **, int, int); 160 static struct block *gen_ipfrag(void); 161 static struct block *gen_portatom(int, bpf_int32); 162 struct block *gen_portop(int, int, int); 163 static struct block *gen_port(int, int, int); 164 static int lookup_proto(char *, int); 165 static struct block *gen_proto(int, int, int); 166 static bpf_u_int32 net_mask(bpf_u_int32 *); 167 static struct slist *xfer_to_x(struct arth *); 168 static struct slist *xfer_to_a(struct arth *); 169 static struct block *gen_len(int, int); 170 171 static void * 172 newchunk(n) 173 u_int n; 174 { 175 struct chunk *cp; 176 int k, size; 177 178 /* XXX Round up to nearest long. */ 179 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1); 180 181 cp = &chunks[cur_chunk]; 182 if (n > cp->n_left) { 183 ++cp, k = ++cur_chunk; 184 if (k >= NCHUNKS) 185 bpf_error("out of memory"); 186 size = CHUNK0SIZE << k; 187 cp->m = (void *)malloc(size); 188 memset((char *)cp->m, 0, size); 189 cp->n_left = size; 190 if (n > size) 191 bpf_error("out of memory"); 192 } 193 cp->n_left -= n; 194 return (void *)((char *)cp->m + cp->n_left); 195 } 196 197 static void 198 freechunks() 199 { 200 int i; 201 202 cur_chunk = 0; 203 for (i = 0; i < NCHUNKS; ++i) 204 if (chunks[i].m != NULL) { 205 free(chunks[i].m); 206 chunks[i].m = NULL; 207 } 208 } 209 210 /* 211 * A strdup whose allocations are freed after code generation is over. 212 */ 213 char * 214 sdup(s) 215 char *s; 216 { 217 int n = strlen(s) + 1; 218 char *cp = newchunk(n); 219 strcpy(cp, s); 220 return (cp); 221 } 222 223 static __inline struct block * 224 new_block(code) 225 int code; 226 { 227 struct block *p; 228 229 p = (struct block *)newchunk(sizeof(*p)); 230 p->s.code = code; 231 p->head = p; 232 233 return p; 234 } 235 236 static __inline struct slist * 237 new_stmt(code) 238 int code; 239 { 240 struct slist *p; 241 242 p = (struct slist *)newchunk(sizeof(*p)); 243 p->s.code = code; 244 245 return p; 246 } 247 248 static struct block * 249 gen_retblk(v) 250 int v; 251 { 252 struct block *b = new_block(BPF_RET|BPF_K); 253 254 b->s.k = v; 255 return b; 256 } 257 258 static __inline void 259 syntax() 260 { 261 bpf_error("syntax error in filter expression"); 262 } 263 264 static bpf_u_int32 netmask; 265 static int snaplen; 266 267 int 268 pcap_compile(pcap_t *p, struct bpf_program *program, 269 char *buf, int optimize, bpf_u_int32 mask) 270 { 271 extern int n_errors; 272 int len; 273 274 n_errors = 0; 275 root = NULL; 276 bpf_pcap = p; 277 if (setjmp(top_ctx)) { 278 freechunks(); 279 return (-1); 280 } 281 282 netmask = mask; 283 snaplen = pcap_snapshot(p); 284 285 lex_init(buf ? buf : ""); 286 init_linktype(pcap_datalink(p)); 287 (void)pcap_parse(); 288 289 if (n_errors) 290 syntax(); 291 292 if (root == NULL) 293 root = gen_retblk(snaplen); 294 295 if (optimize) { 296 bpf_optimize(&root); 297 if (root == NULL || 298 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0)) 299 bpf_error("expression rejects all packets"); 300 } 301 program->bf_insns = icode_to_fcode(root, &len); 302 program->bf_len = len; 303 304 freechunks(); 305 return (0); 306 } 307 308 /* 309 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates 310 * which of the jt and jf fields has been resolved and which is a pointer 311 * back to another unresolved block (or nil). At least one of the fields 312 * in each block is already resolved. 313 */ 314 static void 315 backpatch(list, target) 316 struct block *list, *target; 317 { 318 struct block *next; 319 320 while (list) { 321 if (!list->sense) { 322 next = JT(list); 323 JT(list) = target; 324 } else { 325 next = JF(list); 326 JF(list) = target; 327 } 328 list = next; 329 } 330 } 331 332 /* 333 * Merge the lists in b0 and b1, using the 'sense' field to indicate 334 * which of jt and jf is the link. 335 */ 336 static void 337 merge(b0, b1) 338 struct block *b0, *b1; 339 { 340 register struct block **p = &b0; 341 342 /* Find end of list. */ 343 while (*p) 344 p = !((*p)->sense) ? &JT(*p) : &JF(*p); 345 346 /* Concatenate the lists. */ 347 *p = b1; 348 } 349 350 void 351 finish_parse(p) 352 struct block *p; 353 { 354 backpatch(p, gen_retblk(snaplen)); 355 p->sense = !p->sense; 356 backpatch(p, gen_retblk(0)); 357 root = p->head; 358 } 359 360 void 361 gen_and(b0, b1) 362 struct block *b0, *b1; 363 { 364 backpatch(b0, b1->head); 365 b0->sense = !b0->sense; 366 b1->sense = !b1->sense; 367 merge(b1, b0); 368 b1->sense = !b1->sense; 369 b1->head = b0->head; 370 } 371 372 void 373 gen_or(b0, b1) 374 struct block *b0, *b1; 375 { 376 b0->sense = !b0->sense; 377 backpatch(b0, b1->head); 378 b0->sense = !b0->sense; 379 merge(b1, b0); 380 b1->head = b0->head; 381 } 382 383 void 384 gen_not(b) 385 struct block *b; 386 { 387 b->sense = !b->sense; 388 } 389 390 static struct block * 391 gen_cmp(offset, size, v) 392 u_int offset, size; 393 bpf_int32 v; 394 { 395 struct slist *s; 396 struct block *b; 397 398 s = new_stmt(BPF_LD|BPF_ABS|size); 399 s->s.k = offset; 400 401 b = new_block(JMP(BPF_JEQ)); 402 b->stmts = s; 403 b->s.k = v; 404 405 return b; 406 } 407 408 static struct block * 409 gen_mcmp(offset, size, v, mask) 410 u_int offset, size; 411 bpf_int32 v; 412 bpf_u_int32 mask; 413 { 414 struct block *b = gen_cmp(offset, size, v); 415 struct slist *s; 416 417 if (mask != 0xffffffff) { 418 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 419 s->s.k = mask; 420 b->stmts->next = s; 421 } 422 return b; 423 } 424 425 static struct block * 426 gen_bcmp(offset, size, v) 427 u_int offset, size; 428 u_char *v; 429 { 430 struct block *b, *tmp; 431 432 b = NULL; 433 while (size >= 4) { 434 u_char *p = &v[size - 4]; 435 bpf_int32 w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 436 tmp = gen_cmp(offset + size - 4, BPF_W, w); 437 if (b != NULL) 438 gen_and(b, tmp); 439 b = tmp; 440 size -= 4; 441 } 442 while (size >= 2) { 443 u_char *p = &v[size - 2]; 444 bpf_int32 w = (p[0] << 8) | p[1]; 445 tmp = gen_cmp(offset + size - 2, BPF_H, w); 446 if (b != NULL) 447 gen_and(b, tmp); 448 b = tmp; 449 size -= 2; 450 } 451 if (size > 0) { 452 tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]); 453 if (b != NULL) 454 gen_and(b, tmp); 455 b = tmp; 456 } 457 return b; 458 } 459 460 /* 461 * Various code constructs need to know the layout of the data link 462 * layer. These variables give the necessary offsets. off_linktype 463 * is set to -1 for no encapsulation, in which case, IP is assumed. 464 */ 465 static u_int off_linktype; 466 static u_int off_nl; 467 static int linktype; 468 469 static void 470 init_linktype(type) 471 int type; 472 { 473 linktype = type; 474 475 switch (type) { 476 477 case DLT_EN10MB: 478 off_linktype = 12; 479 off_nl = 14; 480 return; 481 482 case DLT_SLIP: 483 /* 484 * SLIP doesn't have a link level type. The 16 byte 485 * header is hacked into our SLIP driver. 486 */ 487 off_linktype = -1; 488 off_nl = 16; 489 return; 490 491 case DLT_NULL: 492 off_linktype = -1; 493 off_nl = 0; 494 return; 495 496 case DLT_LOOP: 497 off_linktype = -1; 498 off_nl = 4; 499 return; 500 501 case DLT_ENC: 502 off_linktype = -1; 503 off_nl = 12; 504 return; 505 506 case DLT_PPP: 507 off_linktype = 2; 508 off_nl = 4; 509 return; 510 511 case DLT_FDDI: 512 /* 513 * FDDI doesn't really have a link-level type field. 514 * We assume that SSAP = SNAP is being used and pick 515 * out the encapsulated Ethernet type. 516 */ 517 off_linktype = 19; 518 #ifdef PCAP_FDDIPAD 519 off_linktype += pcap_fddipad; 520 #endif 521 off_nl = 21; 522 #ifdef PCAP_FDDIPAD 523 off_nl += pcap_fddipad; 524 #endif 525 return; 526 527 case DLT_IEEE802: 528 off_linktype = 20; 529 off_nl = 22; 530 return; 531 532 case DLT_ATM_RFC1483: 533 /* 534 * assume routed, non-ISO PDUs 535 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00) 536 */ 537 off_linktype = 6; 538 off_nl = 8; 539 return; 540 } 541 bpf_error("unknown data link type 0x%x", linktype); 542 /* NOTREACHED */ 543 } 544 545 static struct block * 546 gen_uncond(rsense) 547 int rsense; 548 { 549 struct block *b; 550 struct slist *s; 551 552 s = new_stmt(BPF_LD|BPF_IMM); 553 s->s.k = !rsense; 554 b = new_block(JMP(BPF_JEQ)); 555 b->stmts = s; 556 557 return b; 558 } 559 560 static __inline struct block * 561 gen_true() 562 { 563 return gen_uncond(1); 564 } 565 566 static __inline struct block * 567 gen_false() 568 { 569 return gen_uncond(0); 570 } 571 572 static struct block * 573 gen_linktype(proto) 574 int proto; 575 { 576 switch (linktype) { 577 case DLT_SLIP: 578 if (proto == ETHERTYPE_IP) 579 return gen_true(); 580 else 581 return gen_false(); 582 583 case DLT_PPP: 584 if (proto == ETHERTYPE_IP) 585 proto = 0x0021; /* XXX - need ppp.h defs */ 586 break; 587 588 case DLT_ENC: 589 case DLT_LOOP: 590 case DLT_NULL: 591 /* XXX */ 592 if (proto == ETHERTYPE_IP) 593 return (gen_cmp(0, BPF_W, htonl((bpf_int32) AF_INET))); 594 else 595 return gen_false(); 596 } 597 return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto); 598 } 599 600 static struct block * 601 gen_hostop(addr, mask, dir, proto, src_off, dst_off) 602 bpf_u_int32 addr; 603 bpf_u_int32 mask; 604 int dir, proto; 605 u_int src_off, dst_off; 606 { 607 struct block *b0, *b1; 608 u_int offset; 609 610 switch (dir) { 611 612 case Q_SRC: 613 offset = src_off; 614 break; 615 616 case Q_DST: 617 offset = dst_off; 618 break; 619 620 case Q_AND: 621 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 622 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 623 gen_and(b0, b1); 624 return b1; 625 626 case Q_OR: 627 case Q_DEFAULT: 628 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 629 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 630 gen_or(b0, b1); 631 return b1; 632 633 default: 634 abort(); 635 } 636 b0 = gen_linktype(proto); 637 b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask); 638 gen_and(b0, b1); 639 return b1; 640 } 641 642 static struct block * 643 gen_ehostop(eaddr, dir) 644 u_char *eaddr; 645 int dir; 646 { 647 struct block *b0, *b1; 648 649 switch (dir) { 650 case Q_SRC: 651 return gen_bcmp(6, 6, eaddr); 652 653 case Q_DST: 654 return gen_bcmp(0, 6, eaddr); 655 656 case Q_AND: 657 b0 = gen_ehostop(eaddr, Q_SRC); 658 b1 = gen_ehostop(eaddr, Q_DST); 659 gen_and(b0, b1); 660 return b1; 661 662 case Q_DEFAULT: 663 case Q_OR: 664 b0 = gen_ehostop(eaddr, Q_SRC); 665 b1 = gen_ehostop(eaddr, Q_DST); 666 gen_or(b0, b1); 667 return b1; 668 } 669 abort(); 670 /* NOTREACHED */ 671 } 672 673 /* 674 * Like gen_ehostop, but for DLT_FDDI 675 */ 676 static struct block * 677 gen_fhostop(eaddr, dir) 678 u_char *eaddr; 679 int dir; 680 { 681 struct block *b0, *b1; 682 683 switch (dir) { 684 case Q_SRC: 685 #ifdef PCAP_FDDIPAD 686 return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr); 687 #else 688 return gen_bcmp(6 + 1, 6, eaddr); 689 #endif 690 691 case Q_DST: 692 #ifdef PCAP_FDDIPAD 693 return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr); 694 #else 695 return gen_bcmp(0 + 1, 6, eaddr); 696 #endif 697 698 case Q_AND: 699 b0 = gen_fhostop(eaddr, Q_SRC); 700 b1 = gen_fhostop(eaddr, Q_DST); 701 gen_and(b0, b1); 702 return b1; 703 704 case Q_DEFAULT: 705 case Q_OR: 706 b0 = gen_fhostop(eaddr, Q_SRC); 707 b1 = gen_fhostop(eaddr, Q_DST); 708 gen_or(b0, b1); 709 return b1; 710 } 711 abort(); 712 /* NOTREACHED */ 713 } 714 715 /* 716 * This is quite tricky because there may be pad bytes in front of the 717 * DECNET header, and then there are two possible data packet formats that 718 * carry both src and dst addresses, plus 5 packet types in a format that 719 * carries only the src node, plus 2 types that use a different format and 720 * also carry just the src node. 721 * 722 * Yuck. 723 * 724 * Instead of doing those all right, we just look for data packets with 725 * 0 or 1 bytes of padding. If you want to look at other packets, that 726 * will require a lot more hacking. 727 * 728 * To add support for filtering on DECNET "areas" (network numbers) 729 * one would want to add a "mask" argument to this routine. That would 730 * make the filter even more inefficient, although one could be clever 731 * and not generate masking instructions if the mask is 0xFFFF. 732 */ 733 static struct block * 734 gen_dnhostop(addr, dir, base_off) 735 bpf_u_int32 addr; 736 int dir; 737 u_int base_off; 738 { 739 struct block *b0, *b1, *b2, *tmp; 740 u_int offset_lh; /* offset if long header is received */ 741 u_int offset_sh; /* offset if short header is received */ 742 743 switch (dir) { 744 745 case Q_DST: 746 offset_sh = 1; /* follows flags */ 747 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */ 748 break; 749 750 case Q_SRC: 751 offset_sh = 3; /* follows flags, dstnode */ 752 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */ 753 break; 754 755 case Q_AND: 756 /* Inefficient because we do our Calvinball dance twice */ 757 b0 = gen_dnhostop(addr, Q_SRC, base_off); 758 b1 = gen_dnhostop(addr, Q_DST, base_off); 759 gen_and(b0, b1); 760 return b1; 761 762 case Q_OR: 763 case Q_DEFAULT: 764 /* Inefficient because we do our Calvinball dance twice */ 765 b0 = gen_dnhostop(addr, Q_SRC, base_off); 766 b1 = gen_dnhostop(addr, Q_DST, base_off); 767 gen_or(b0, b1); 768 return b1; 769 770 default: 771 abort(); 772 } 773 b0 = gen_linktype(ETHERTYPE_DN); 774 /* Check for pad = 1, long header case */ 775 tmp = gen_mcmp(base_off + 2, BPF_H, 776 (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF)); 777 b1 = gen_cmp(base_off + 2 + 1 + offset_lh, 778 BPF_H, (bpf_int32)ntohs(addr)); 779 gen_and(tmp, b1); 780 /* Check for pad = 0, long header case */ 781 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7); 782 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr)); 783 gen_and(tmp, b2); 784 gen_or(b2, b1); 785 /* Check for pad = 1, short header case */ 786 tmp = gen_mcmp(base_off + 2, BPF_H, 787 (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF)); 788 b2 = gen_cmp(base_off + 2 + 1 + offset_sh, 789 BPF_H, (bpf_int32)ntohs(addr)); 790 gen_and(tmp, b2); 791 gen_or(b2, b1); 792 /* Check for pad = 0, short header case */ 793 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7); 794 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr)); 795 gen_and(tmp, b2); 796 gen_or(b2, b1); 797 798 /* Combine with test for linktype */ 799 gen_and(b0, b1); 800 return b1; 801 } 802 803 static struct block * 804 gen_host(addr, mask, proto, dir) 805 bpf_u_int32 addr; 806 bpf_u_int32 mask; 807 int proto; 808 int dir; 809 { 810 struct block *b0, *b1; 811 812 switch (proto) { 813 814 case Q_DEFAULT: 815 b0 = gen_host(addr, mask, Q_IP, dir); 816 b1 = gen_host(addr, mask, Q_ARP, dir); 817 gen_or(b0, b1); 818 b0 = gen_host(addr, mask, Q_RARP, dir); 819 gen_or(b1, b0); 820 return b0; 821 822 case Q_IP: 823 return gen_hostop(addr, mask, dir, ETHERTYPE_IP, 824 off_nl + 12, off_nl + 16); 825 826 case Q_RARP: 827 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP, 828 off_nl + 14, off_nl + 24); 829 830 case Q_ARP: 831 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP, 832 off_nl + 14, off_nl + 24); 833 834 case Q_TCP: 835 bpf_error("'tcp' modifier applied to host"); 836 837 case Q_UDP: 838 bpf_error("'udp' modifier applied to host"); 839 840 case Q_ICMP: 841 bpf_error("'icmp' modifier applied to host"); 842 843 case Q_IGMP: 844 bpf_error("'igmp' modifier applied to host"); 845 846 case Q_DECNET: 847 return gen_dnhostop(addr, dir, off_nl); 848 849 case Q_LAT: 850 bpf_error("LAT host filtering not implemented"); 851 852 case Q_MOPDL: 853 bpf_error("MOPDL host filtering not implemented"); 854 855 case Q_MOPRC: 856 bpf_error("MOPRC host filtering not implemented"); 857 858 default: 859 abort(); 860 } 861 /* NOTREACHED */ 862 } 863 864 static struct block * 865 gen_gateway(eaddr, alist, proto, dir) 866 u_char *eaddr; 867 bpf_u_int32 **alist; 868 int proto; 869 int dir; 870 { 871 struct block *b0, *b1, *tmp; 872 873 if (dir != 0) 874 bpf_error("direction applied to 'gateway'"); 875 876 switch (proto) { 877 case Q_DEFAULT: 878 case Q_IP: 879 case Q_ARP: 880 case Q_RARP: 881 if (linktype == DLT_EN10MB) 882 b0 = gen_ehostop(eaddr, Q_OR); 883 else if (linktype == DLT_FDDI) 884 b0 = gen_fhostop(eaddr, Q_OR); 885 else 886 bpf_error( 887 "'gateway' supported only on ethernet or FDDI"); 888 889 b1 = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 890 while (*alist) { 891 tmp = gen_host(**alist++, 0xffffffffL, proto, Q_OR); 892 gen_or(b1, tmp); 893 b1 = tmp; 894 } 895 gen_not(b1); 896 gen_and(b0, b1); 897 return b1; 898 } 899 bpf_error("illegal modifier of 'gateway'"); 900 /* NOTREACHED */ 901 } 902 903 struct block * 904 gen_proto_abbrev(proto) 905 int proto; 906 { 907 struct block *b0, *b1; 908 909 switch (proto) { 910 911 case Q_TCP: 912 b0 = gen_linktype(ETHERTYPE_IP); 913 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP); 914 gen_and(b0, b1); 915 break; 916 917 case Q_UDP: 918 b0 = gen_linktype(ETHERTYPE_IP); 919 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP); 920 gen_and(b0, b1); 921 break; 922 923 case Q_ICMP: 924 b0 = gen_linktype(ETHERTYPE_IP); 925 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP); 926 gen_and(b0, b1); 927 break; 928 929 case Q_IGMP: 930 b0 = gen_linktype(ETHERTYPE_IP); 931 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2); 932 gen_and(b0, b1); 933 break; 934 935 case Q_IP: 936 b1 = gen_linktype(ETHERTYPE_IP); 937 break; 938 939 case Q_ARP: 940 b1 = gen_linktype(ETHERTYPE_ARP); 941 break; 942 943 case Q_RARP: 944 b1 = gen_linktype(ETHERTYPE_REVARP); 945 break; 946 947 case Q_LINK: 948 bpf_error("link layer applied in wrong context"); 949 950 case Q_DECNET: 951 b1 = gen_linktype(ETHERTYPE_DN); 952 break; 953 954 case Q_LAT: 955 b1 = gen_linktype(ETHERTYPE_LAT); 956 break; 957 958 case Q_MOPDL: 959 b1 = gen_linktype(ETHERTYPE_MOPDL); 960 break; 961 962 case Q_MOPRC: 963 b1 = gen_linktype(ETHERTYPE_MOPRC); 964 break; 965 966 default: 967 abort(); 968 } 969 return b1; 970 } 971 972 static struct block * 973 gen_ipfrag() 974 { 975 struct slist *s; 976 struct block *b; 977 978 /* not ip frag */ 979 s = new_stmt(BPF_LD|BPF_H|BPF_ABS); 980 s->s.k = off_nl + 6; 981 b = new_block(JMP(BPF_JSET)); 982 b->s.k = 0x1fff; 983 b->stmts = s; 984 gen_not(b); 985 986 return b; 987 } 988 989 static struct block * 990 gen_portatom(off, v) 991 int off; 992 bpf_int32 v; 993 { 994 struct slist *s; 995 struct block *b; 996 997 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 998 s->s.k = off_nl; 999 1000 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H); 1001 s->next->s.k = off_nl + off; 1002 1003 b = new_block(JMP(BPF_JEQ)); 1004 b->stmts = s; 1005 b->s.k = v; 1006 1007 return b; 1008 } 1009 1010 struct block * 1011 gen_portop(port, proto, dir) 1012 int port, proto, dir; 1013 { 1014 struct block *b0, *b1, *tmp; 1015 1016 /* ip proto 'proto' */ 1017 tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto); 1018 b0 = gen_ipfrag(); 1019 gen_and(tmp, b0); 1020 1021 switch (dir) { 1022 case Q_SRC: 1023 b1 = gen_portatom(0, (bpf_int32)port); 1024 break; 1025 1026 case Q_DST: 1027 b1 = gen_portatom(2, (bpf_int32)port); 1028 break; 1029 1030 case Q_OR: 1031 case Q_DEFAULT: 1032 tmp = gen_portatom(0, (bpf_int32)port); 1033 b1 = gen_portatom(2, (bpf_int32)port); 1034 gen_or(tmp, b1); 1035 break; 1036 1037 case Q_AND: 1038 tmp = gen_portatom(0, (bpf_int32)port); 1039 b1 = gen_portatom(2, (bpf_int32)port); 1040 gen_and(tmp, b1); 1041 break; 1042 1043 default: 1044 abort(); 1045 } 1046 gen_and(b0, b1); 1047 1048 return b1; 1049 } 1050 1051 static struct block * 1052 gen_port(port, ip_proto, dir) 1053 int port; 1054 int ip_proto; 1055 int dir; 1056 { 1057 struct block *b0, *b1, *tmp; 1058 1059 /* ether proto ip */ 1060 b0 = gen_linktype(ETHERTYPE_IP); 1061 1062 switch (ip_proto) { 1063 case IPPROTO_UDP: 1064 case IPPROTO_TCP: 1065 b1 = gen_portop(port, ip_proto, dir); 1066 break; 1067 1068 case PROTO_UNDEF: 1069 tmp = gen_portop(port, IPPROTO_TCP, dir); 1070 b1 = gen_portop(port, IPPROTO_UDP, dir); 1071 gen_or(tmp, b1); 1072 break; 1073 1074 default: 1075 abort(); 1076 } 1077 gen_and(b0, b1); 1078 return b1; 1079 } 1080 1081 static int 1082 lookup_proto(name, proto) 1083 char *name; 1084 int proto; 1085 { 1086 int v; 1087 1088 switch (proto) { 1089 case Q_DEFAULT: 1090 case Q_IP: 1091 v = pcap_nametoproto(name); 1092 if (v == PROTO_UNDEF) 1093 bpf_error("unknown ip proto '%s'", name); 1094 break; 1095 1096 case Q_LINK: 1097 /* XXX should look up h/w protocol type based on linktype */ 1098 v = pcap_nametoeproto(name); 1099 if (v == PROTO_UNDEF) 1100 bpf_error("unknown ether proto '%s'", name); 1101 break; 1102 1103 default: 1104 v = PROTO_UNDEF; 1105 break; 1106 } 1107 return v; 1108 } 1109 1110 static struct block * 1111 gen_proto(v, proto, dir) 1112 int v; 1113 int proto; 1114 int dir; 1115 { 1116 struct block *b0, *b1; 1117 1118 if (dir != Q_DEFAULT) 1119 bpf_error("direction applied to 'proto'"); 1120 1121 switch (proto) { 1122 case Q_DEFAULT: 1123 case Q_IP: 1124 b0 = gen_linktype(ETHERTYPE_IP); 1125 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v); 1126 gen_and(b0, b1); 1127 return b1; 1128 1129 case Q_ARP: 1130 bpf_error("arp does not encapsulate another protocol"); 1131 /* NOTREACHED */ 1132 1133 case Q_RARP: 1134 bpf_error("rarp does not encapsulate another protocol"); 1135 /* NOTREACHED */ 1136 1137 case Q_DECNET: 1138 bpf_error("decnet encapsulation is not specifiable"); 1139 /* NOTREACHED */ 1140 1141 case Q_LAT: 1142 bpf_error("lat does not encapsulate another protocol"); 1143 /* NOTREACHED */ 1144 1145 case Q_MOPRC: 1146 bpf_error("moprc does not encapsulate another protocol"); 1147 /* NOTREACHED */ 1148 1149 case Q_MOPDL: 1150 bpf_error("mopdl does not encapsulate another protocol"); 1151 /* NOTREACHED */ 1152 1153 case Q_LINK: 1154 return gen_linktype(v); 1155 1156 case Q_UDP: 1157 bpf_error("'udp proto' is bogus"); 1158 /* NOTREACHED */ 1159 1160 case Q_TCP: 1161 bpf_error("'tcp proto' is bogus"); 1162 /* NOTREACHED */ 1163 1164 case Q_ICMP: 1165 bpf_error("'icmp proto' is bogus"); 1166 /* NOTREACHED */ 1167 1168 case Q_IGMP: 1169 bpf_error("'igmp proto' is bogus"); 1170 /* NOTREACHED */ 1171 1172 default: 1173 abort(); 1174 /* NOTREACHED */ 1175 } 1176 /* NOTREACHED */ 1177 } 1178 1179 /* 1180 * Left justify 'addr' and return its resulting network mask. 1181 */ 1182 static bpf_u_int32 1183 net_mask(addr) 1184 bpf_u_int32 *addr; 1185 { 1186 register bpf_u_int32 m = 0xffffffff; 1187 1188 if (*addr) 1189 while ((*addr & 0xff000000) == 0) 1190 *addr <<= 8, m <<= 8; 1191 1192 return m; 1193 } 1194 1195 struct block * 1196 gen_scode(name, q) 1197 char *name; 1198 struct qual q; 1199 { 1200 int proto = q.proto; 1201 int dir = q.dir; 1202 u_char *eaddr; 1203 bpf_u_int32 mask, addr, **alist; 1204 struct block *b, *tmp; 1205 int port, real_proto; 1206 1207 switch (q.addr) { 1208 1209 case Q_NET: 1210 addr = pcap_nametonetaddr(name); 1211 if (addr == 0) 1212 bpf_error("unknown network '%s'", name); 1213 mask = net_mask(&addr); 1214 return gen_host(addr, mask, proto, dir); 1215 1216 case Q_DEFAULT: 1217 case Q_HOST: 1218 if (proto == Q_LINK) { 1219 switch (linktype) { 1220 1221 case DLT_EN10MB: 1222 eaddr = pcap_ether_hostton(name); 1223 if (eaddr == NULL) 1224 bpf_error( 1225 "unknown ether host '%s'", name); 1226 return gen_ehostop(eaddr, dir); 1227 1228 case DLT_FDDI: 1229 eaddr = pcap_ether_hostton(name); 1230 if (eaddr == NULL) 1231 bpf_error( 1232 "unknown FDDI host '%s'", name); 1233 return gen_fhostop(eaddr, dir); 1234 1235 default: 1236 bpf_error( 1237 "only ethernet/FDDI supports link-level host name"); 1238 break; 1239 } 1240 } else if (proto == Q_DECNET) { 1241 unsigned short dn_addr = __pcap_nametodnaddr(name); 1242 /* 1243 * I don't think DECNET hosts can be multihomed, so 1244 * there is no need to build up a list of addresses 1245 */ 1246 return (gen_host(dn_addr, 0, proto, dir)); 1247 } else { 1248 alist = pcap_nametoaddr(name); 1249 if (alist == NULL || *alist == NULL) 1250 bpf_error("unknown host '%s'", name); 1251 b = gen_host(**alist++, 0xffffffffL, proto, dir); 1252 while (*alist) { 1253 tmp = gen_host(**alist++, 0xffffffffL, 1254 proto, dir); 1255 gen_or(b, tmp); 1256 b = tmp; 1257 } 1258 return b; 1259 } 1260 1261 case Q_PORT: 1262 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP) 1263 bpf_error("illegal qualifier of 'port'"); 1264 if (pcap_nametoport(name, &port, &real_proto) == 0) 1265 bpf_error("unknown port '%s'", name); 1266 if (proto == Q_UDP) { 1267 if (real_proto == IPPROTO_TCP) 1268 bpf_error("port '%s' is tcp", name); 1269 else 1270 /* override PROTO_UNDEF */ 1271 real_proto = IPPROTO_UDP; 1272 } 1273 if (proto == Q_TCP) { 1274 if (real_proto == IPPROTO_UDP) 1275 bpf_error("port '%s' is udp", name); 1276 else 1277 /* override PROTO_UNDEF */ 1278 real_proto = IPPROTO_TCP; 1279 } 1280 return gen_port(port, real_proto, dir); 1281 1282 case Q_GATEWAY: 1283 eaddr = pcap_ether_hostton(name); 1284 if (eaddr == NULL) 1285 bpf_error("unknown ether host: %s", name); 1286 1287 alist = pcap_nametoaddr(name); 1288 if (alist == NULL || *alist == NULL) 1289 bpf_error("unknown host '%s'", name); 1290 return gen_gateway(eaddr, alist, proto, dir); 1291 1292 case Q_PROTO: 1293 real_proto = lookup_proto(name, proto); 1294 if (real_proto >= 0) 1295 return gen_proto(real_proto, proto, dir); 1296 else 1297 bpf_error("unknown protocol: %s", name); 1298 1299 case Q_UNDEF: 1300 syntax(); 1301 /* NOTREACHED */ 1302 } 1303 abort(); 1304 /* NOTREACHED */ 1305 } 1306 1307 struct block * 1308 gen_ncode(v, q) 1309 bpf_u_int32 v; 1310 struct qual q; 1311 { 1312 bpf_u_int32 mask; 1313 int proto = q.proto; 1314 int dir = q.dir; 1315 1316 switch (q.addr) { 1317 1318 case Q_DEFAULT: 1319 case Q_HOST: 1320 case Q_NET: 1321 if (proto == Q_DECNET) 1322 return gen_host(v, 0, proto, dir); 1323 else if (proto == Q_LINK) { 1324 bpf_error("illegal link layer address"); 1325 } else { 1326 mask = net_mask(&v); 1327 return gen_host(v, mask, proto, dir); 1328 } 1329 1330 case Q_PORT: 1331 if (proto == Q_UDP) 1332 proto = IPPROTO_UDP; 1333 else if (proto == Q_TCP) 1334 proto = IPPROTO_TCP; 1335 else if (proto == Q_DEFAULT) 1336 proto = PROTO_UNDEF; 1337 else 1338 bpf_error("illegal qualifier of 'port'"); 1339 1340 return gen_port((int)v, proto, dir); 1341 1342 case Q_GATEWAY: 1343 bpf_error("'gateway' requires a name"); 1344 /* NOTREACHED */ 1345 1346 case Q_PROTO: 1347 return gen_proto((int)v, proto, dir); 1348 1349 case Q_UNDEF: 1350 syntax(); 1351 /* NOTREACHED */ 1352 1353 default: 1354 abort(); 1355 /* NOTREACHED */ 1356 } 1357 /* NOTREACHED */ 1358 } 1359 1360 struct block * 1361 gen_ecode(eaddr, q) 1362 u_char *eaddr; 1363 struct qual q; 1364 { 1365 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) { 1366 if (linktype == DLT_EN10MB) 1367 return gen_ehostop(eaddr, (int)q.dir); 1368 if (linktype == DLT_FDDI) 1369 return gen_fhostop(eaddr, (int)q.dir); 1370 } 1371 bpf_error("ethernet address used in non-ether expression"); 1372 /* NOTREACHED */ 1373 } 1374 1375 void 1376 sappend(s0, s1) 1377 struct slist *s0, *s1; 1378 { 1379 /* 1380 * This is definitely not the best way to do this, but the 1381 * lists will rarely get long. 1382 */ 1383 while (s0->next) 1384 s0 = s0->next; 1385 s0->next = s1; 1386 } 1387 1388 static struct slist * 1389 xfer_to_x(a) 1390 struct arth *a; 1391 { 1392 struct slist *s; 1393 1394 s = new_stmt(BPF_LDX|BPF_MEM); 1395 s->s.k = a->regno; 1396 return s; 1397 } 1398 1399 static struct slist * 1400 xfer_to_a(a) 1401 struct arth *a; 1402 { 1403 struct slist *s; 1404 1405 s = new_stmt(BPF_LD|BPF_MEM); 1406 s->s.k = a->regno; 1407 return s; 1408 } 1409 1410 struct arth * 1411 gen_load(proto, index, size) 1412 int proto; 1413 struct arth *index; 1414 int size; 1415 { 1416 struct slist *s, *tmp; 1417 struct block *b; 1418 int regno = alloc_reg(); 1419 1420 free_reg(index->regno); 1421 switch (size) { 1422 1423 default: 1424 bpf_error("data size must be 1, 2, or 4"); 1425 1426 case 1: 1427 size = BPF_B; 1428 break; 1429 1430 case 2: 1431 size = BPF_H; 1432 break; 1433 1434 case 4: 1435 size = BPF_W; 1436 break; 1437 } 1438 switch (proto) { 1439 default: 1440 bpf_error("unsupported index operation"); 1441 1442 case Q_LINK: 1443 s = xfer_to_x(index); 1444 tmp = new_stmt(BPF_LD|BPF_IND|size); 1445 sappend(s, tmp); 1446 sappend(index->s, s); 1447 break; 1448 1449 case Q_IP: 1450 case Q_ARP: 1451 case Q_RARP: 1452 case Q_DECNET: 1453 case Q_LAT: 1454 case Q_MOPRC: 1455 case Q_MOPDL: 1456 /* XXX Note that we assume a fixed link link header here. */ 1457 s = xfer_to_x(index); 1458 tmp = new_stmt(BPF_LD|BPF_IND|size); 1459 tmp->s.k = off_nl; 1460 sappend(s, tmp); 1461 sappend(index->s, s); 1462 1463 b = gen_proto_abbrev(proto); 1464 if (index->b) 1465 gen_and(index->b, b); 1466 index->b = b; 1467 break; 1468 1469 case Q_TCP: 1470 case Q_UDP: 1471 case Q_ICMP: 1472 case Q_IGMP: 1473 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 1474 s->s.k = off_nl; 1475 sappend(s, xfer_to_a(index)); 1476 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X)); 1477 sappend(s, new_stmt(BPF_MISC|BPF_TAX)); 1478 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size)); 1479 tmp->s.k = off_nl; 1480 sappend(index->s, s); 1481 1482 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag()); 1483 if (index->b) 1484 gen_and(index->b, b); 1485 index->b = b; 1486 break; 1487 } 1488 index->regno = regno; 1489 s = new_stmt(BPF_ST); 1490 s->s.k = regno; 1491 sappend(index->s, s); 1492 1493 return index; 1494 } 1495 1496 struct block * 1497 gen_relation(code, a0, a1, reversed) 1498 int code; 1499 struct arth *a0, *a1; 1500 int reversed; 1501 { 1502 struct slist *s0, *s1, *s2; 1503 struct block *b, *tmp; 1504 1505 s0 = xfer_to_x(a1); 1506 s1 = xfer_to_a(a0); 1507 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X); 1508 b = new_block(JMP(code)); 1509 if (code == BPF_JGT || code == BPF_JGE) { 1510 reversed = !reversed; 1511 b->s.k = 0x80000000; 1512 } 1513 if (reversed) 1514 gen_not(b); 1515 1516 sappend(s1, s2); 1517 sappend(s0, s1); 1518 sappend(a1->s, s0); 1519 sappend(a0->s, a1->s); 1520 1521 b->stmts = a0->s; 1522 1523 free_reg(a0->regno); 1524 free_reg(a1->regno); 1525 1526 /* 'and' together protocol checks */ 1527 if (a0->b) { 1528 if (a1->b) { 1529 gen_and(a0->b, tmp = a1->b); 1530 } 1531 else 1532 tmp = a0->b; 1533 } else 1534 tmp = a1->b; 1535 1536 if (tmp) 1537 gen_and(tmp, b); 1538 1539 return b; 1540 } 1541 1542 struct arth * 1543 gen_loadlen() 1544 { 1545 int regno = alloc_reg(); 1546 struct arth *a = (struct arth *)newchunk(sizeof(*a)); 1547 struct slist *s; 1548 1549 s = new_stmt(BPF_LD|BPF_LEN); 1550 s->next = new_stmt(BPF_ST); 1551 s->next->s.k = regno; 1552 a->s = s; 1553 a->regno = regno; 1554 1555 return a; 1556 } 1557 1558 struct arth * 1559 gen_loadi(val) 1560 int val; 1561 { 1562 struct arth *a; 1563 struct slist *s; 1564 int reg; 1565 1566 a = (struct arth *)newchunk(sizeof(*a)); 1567 1568 reg = alloc_reg(); 1569 1570 s = new_stmt(BPF_LD|BPF_IMM); 1571 s->s.k = val; 1572 s->next = new_stmt(BPF_ST); 1573 s->next->s.k = reg; 1574 a->s = s; 1575 a->regno = reg; 1576 1577 return a; 1578 } 1579 1580 struct arth * 1581 gen_neg(a) 1582 struct arth *a; 1583 { 1584 struct slist *s; 1585 1586 s = xfer_to_a(a); 1587 sappend(a->s, s); 1588 s = new_stmt(BPF_ALU|BPF_NEG); 1589 s->s.k = 0; 1590 sappend(a->s, s); 1591 s = new_stmt(BPF_ST); 1592 s->s.k = a->regno; 1593 sappend(a->s, s); 1594 1595 return a; 1596 } 1597 1598 struct arth * 1599 gen_arth(code, a0, a1) 1600 int code; 1601 struct arth *a0, *a1; 1602 { 1603 struct slist *s0, *s1, *s2; 1604 1605 s0 = xfer_to_x(a1); 1606 s1 = xfer_to_a(a0); 1607 s2 = new_stmt(BPF_ALU|BPF_X|code); 1608 1609 sappend(s1, s2); 1610 sappend(s0, s1); 1611 sappend(a1->s, s0); 1612 sappend(a0->s, a1->s); 1613 1614 free_reg(a1->regno); 1615 1616 s0 = new_stmt(BPF_ST); 1617 a0->regno = s0->s.k = alloc_reg(); 1618 sappend(a0->s, s0); 1619 1620 return a0; 1621 } 1622 1623 /* 1624 * Here we handle simple allocation of the scratch registers. 1625 * If too many registers are alloc'd, the allocator punts. 1626 */ 1627 static int regused[BPF_MEMWORDS]; 1628 static int curreg; 1629 1630 /* 1631 * Return the next free register. 1632 */ 1633 static int 1634 alloc_reg() 1635 { 1636 int n = BPF_MEMWORDS; 1637 1638 while (--n >= 0) { 1639 if (regused[curreg]) 1640 curreg = (curreg + 1) % BPF_MEMWORDS; 1641 else { 1642 regused[curreg] = 1; 1643 return curreg; 1644 } 1645 } 1646 bpf_error("too many registers needed to evaluate expression"); 1647 /* NOTREACHED */ 1648 } 1649 1650 /* 1651 * Return a register to the table so it can 1652 * be used later. 1653 */ 1654 static void 1655 free_reg(n) 1656 int n; 1657 { 1658 regused[n] = 0; 1659 } 1660 1661 static struct block * 1662 gen_len(jmp, n) 1663 int jmp, n; 1664 { 1665 struct slist *s; 1666 struct block *b; 1667 1668 s = new_stmt(BPF_LD|BPF_LEN); 1669 b = new_block(JMP(jmp)); 1670 b->stmts = s; 1671 b->s.k = n; 1672 1673 return b; 1674 } 1675 1676 struct block * 1677 gen_greater(n) 1678 int n; 1679 { 1680 return gen_len(BPF_JGE, n); 1681 } 1682 1683 struct block * 1684 gen_less(n) 1685 int n; 1686 { 1687 struct block *b; 1688 1689 b = gen_len(BPF_JGT, n); 1690 gen_not(b); 1691 1692 return b; 1693 } 1694 1695 struct block * 1696 gen_byteop(op, idx, val) 1697 int op, idx, val; 1698 { 1699 struct block *b; 1700 struct slist *s; 1701 1702 switch (op) { 1703 default: 1704 abort(); 1705 1706 case '=': 1707 return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1708 1709 case '<': 1710 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1711 b->s.code = JMP(BPF_JGE); 1712 gen_not(b); 1713 return b; 1714 1715 case '>': 1716 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1717 b->s.code = JMP(BPF_JGT); 1718 return b; 1719 1720 case '|': 1721 s = new_stmt(BPF_ALU|BPF_OR|BPF_K); 1722 break; 1723 1724 case '&': 1725 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 1726 break; 1727 } 1728 s->s.k = val; 1729 b = new_block(JMP(BPF_JEQ)); 1730 b->stmts = s; 1731 gen_not(b); 1732 1733 return b; 1734 } 1735 1736 struct block * 1737 gen_broadcast(proto) 1738 int proto; 1739 { 1740 bpf_u_int32 hostmask; 1741 struct block *b0, *b1, *b2; 1742 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1743 1744 switch (proto) { 1745 1746 case Q_DEFAULT: 1747 case Q_LINK: 1748 if (linktype == DLT_EN10MB) 1749 return gen_ehostop(ebroadcast, Q_DST); 1750 if (linktype == DLT_FDDI) 1751 return gen_fhostop(ebroadcast, Q_DST); 1752 bpf_error("not a broadcast link"); 1753 break; 1754 1755 case Q_IP: 1756 b0 = gen_linktype(ETHERTYPE_IP); 1757 hostmask = ~netmask; 1758 b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask); 1759 b2 = gen_mcmp(off_nl + 16, BPF_W, 1760 (bpf_int32)(~0 & hostmask), hostmask); 1761 gen_or(b1, b2); 1762 gen_and(b0, b2); 1763 return b2; 1764 } 1765 bpf_error("only ether/ip broadcast filters supported"); 1766 } 1767 1768 struct block * 1769 gen_multicast(proto) 1770 int proto; 1771 { 1772 register struct block *b0, *b1; 1773 register struct slist *s; 1774 1775 switch (proto) { 1776 1777 case Q_DEFAULT: 1778 case Q_LINK: 1779 if (linktype == DLT_EN10MB) { 1780 /* ether[0] & 1 != 0 */ 1781 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1782 s->s.k = 0; 1783 b0 = new_block(JMP(BPF_JSET)); 1784 b0->s.k = 1; 1785 b0->stmts = s; 1786 return b0; 1787 } 1788 1789 if (linktype == DLT_FDDI) { 1790 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */ 1791 /* fddi[1] & 1 != 0 */ 1792 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1793 s->s.k = 1; 1794 b0 = new_block(JMP(BPF_JSET)); 1795 b0->s.k = 1; 1796 b0->stmts = s; 1797 return b0; 1798 } 1799 /* Link not known to support multicasts */ 1800 break; 1801 1802 case Q_IP: 1803 b0 = gen_linktype(ETHERTYPE_IP); 1804 b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224); 1805 b1->s.code = JMP(BPF_JGE); 1806 gen_and(b0, b1); 1807 return b1; 1808 } 1809 bpf_error("only IP multicast filters supported on ethernet/FDDI"); 1810 } 1811 1812 /* 1813 * generate command for inbound/outbound. It's here so we can 1814 * make it link-type specific. 'dir' = 0 implies "inbound", 1815 * = 1 implies "outbound". 1816 */ 1817 struct block * 1818 gen_inbound(dir) 1819 int dir; 1820 { 1821 register struct block *b0; 1822 1823 b0 = gen_relation(BPF_JEQ, 1824 gen_load(Q_LINK, gen_loadi(0), 1), 1825 gen_loadi(0), 1826 dir); 1827 return (b0); 1828 } 1829