1 /* 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that: (1) source code distributions 7 * retain the above copyright notice and this paragraph in its entirety, (2) 8 * distributions including binary code include the above copyright notice and 9 * this paragraph in its entirety in the documentation or other materials 10 * provided with the distribution, and (3) all advertising materials mentioning 11 * features or use of this software display the following acknowledgement: 12 * ``This product includes software developed by the University of California, 13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 14 * the University nor the names of its contributors may be used to endorse 15 * or promote products derived from this software without specific prior 16 * written permission. 17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 20 * 21 * This code is derived from the Stanford/CMU enet packet filter, 22 * (net/enet.c) distributed in 4.3BSD Unix. 23 */ 24 #ifndef lint 25 static char rcsid[] = 26 "$Header: bpf.c,v 1.23 91/01/30 18:22:13 mccanne Exp $"; 27 #endif 28 29 #include "bpfilter.h" 30 31 #if (NBPFILTER > 0) 32 33 #ifndef __GNUC__ 34 #define inline 35 #endif 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/buf.h> 41 #include <sys/dir.h> 42 #include <sys/proc.h> 43 #include <sys/user.h> 44 #include <sys/ioctl.h> 45 #include <sys/map.h> 46 47 #include <sys/file.h> 48 #ifdef sparc 49 #include <sys/stream.h> 50 #endif 51 #include <sys/tty.h> 52 #include <sys/uio.h> 53 54 #include <sys/protosw.h> 55 #include <sys/socket.h> 56 #include <net/if.h> 57 58 #include <net/bpf.h> 59 #include <net/bpfdesc.h> 60 61 #include <sys/errno.h> 62 63 #include <netinet/in.h> 64 #include <netinet/if_ether.h> 65 #include <sys/kernel.h> 66 67 #define PRINET 26 /* interruptible */ 68 69 /* 70 * The default read buffer size is patchable. 71 */ 72 int bpf_bufsize = MCLBYTES; 73 74 /* 75 * 'bpf_iftab' is the driver state table per logical unit number 76 * 'bpf_dtab' holds the descriptors, indexed by minor device # 77 * 'bpf_units' is the number of attached units 78 * 79 * We really don't need NBPFILTER bpf_if entries, but this eliminates 80 * the need to account for all possible drivers here. 81 * This problem will go away when these structures are allocated dynamically. 82 */ 83 static struct bpf_if bpf_iftab[NBPFILTER]; 84 static struct bpf_d bpf_dtab[NBPFILTER]; 85 static u_int bpf_units = 0; 86 87 static void bpf_ifname(); 88 static void catchpacket(); 89 static int bpf_setif(); 90 static int bpf_initd(); 91 92 static int 93 bpf_movein(uio, linktype, mp, sockp) 94 register struct uio *uio; 95 int linktype; 96 register struct mbuf **mp; 97 register struct sockaddr *sockp; 98 { 99 struct mbuf *m; 100 int error; 101 int len; 102 int hlen; 103 104 /* 105 * Build a sockaddr based on the data link layer type. 106 * We do this at this level because the ethernet header 107 * is copied directly into the data field of the sockaddr. 108 * In the case of SLIP, there is no header and the packet 109 * is forwarded as is. 110 * Also, we are careful to leave room at the front of the mbuf 111 * for the link level header. 112 */ 113 switch (linktype) { 114 case DLT_SLIP: 115 sockp->sa_family = AF_INET; 116 hlen = 0; 117 break; 118 119 case DLT_EN10MB: 120 sockp->sa_family = AF_UNSPEC; 121 /* XXX Would MAXLINKHDR be better? */ 122 hlen = sizeof(struct ether_header); 123 break; 124 125 case DLT_FDDI: 126 sockp->sa_family = AF_UNSPEC; 127 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 128 hlen = 24; 129 break; 130 131 default: 132 return (EIO); 133 } 134 135 len = uio->uio_resid; 136 if ((unsigned)len > MCLBYTES) 137 return (EIO); 138 139 MGET(m, M_WAIT, MT_DATA); 140 if (m == 0) 141 return (ENOBUFS); 142 if (len > MLEN) { 143 MCLGET(m, M_WAIT); 144 if ((m->m_flags & M_EXT) == 0) { 145 error = ENOBUFS; 146 goto bad; 147 } 148 } 149 m->m_len = len; 150 *mp = m; 151 /* 152 * Make room for link header. 153 */ 154 if (hlen) { 155 m->m_len -= hlen; 156 m->m_data += hlen; /* XXX */ 157 158 error = uiomove((caddr_t)sockp->sa_data, hlen, uio); 159 if (error) 160 goto bad; 161 } 162 error = uiomove(mtod(m, caddr_t), len - hlen, uio); 163 if (!error) 164 return (0); 165 bad: 166 m_freem(m); 167 return (error); 168 } 169 170 /* 171 * Attach 'd' to the bpf interface 'bp', i.e. make 'd' listen on 'bp'. 172 * Must be called at splimp. 173 */ 174 static void 175 bpf_attachd(d, bp) 176 struct bpf_d *d; 177 struct bpf_if *bp; 178 { 179 /* Point d at bp. */ 180 d->bd_bif = bp; 181 182 /* Add d to bp's list of listeners. */ 183 d->bd_next = bp->bif_dlist; 184 bp->bif_dlist = d; 185 186 /* 187 * Let the driver know we're here (if it doesn't already). 188 */ 189 *bp->bif_driverp = bp; 190 } 191 192 static void 193 bpf_detachd(d) 194 struct bpf_d *d; 195 { 196 struct bpf_d **p; 197 struct bpf_if *bp; 198 199 bp = d->bd_bif; 200 /* 201 * Check if this descriptor had requested promiscuous mode. 202 * If so, turn it off. 203 */ 204 if (d->bd_promisc) { 205 d->bd_promisc = 0; 206 if (ifpromisc(bp->bif_ifp, 0)) 207 /* 208 * Something is really wrong if we were able to put 209 * the driver into promiscuous mode, but can't 210 * take it out. 211 */ 212 panic("bpf_detachd: ifpromisc failed"); 213 } 214 /* Remove 'd' from the interface's descriptor list. */ 215 p = &bp->bif_dlist; 216 while (*p != d) { 217 p = &(*p)->bd_next; 218 if (*p == 0) 219 panic("bpf_detachd: descriptor not in list"); 220 } 221 *p = (*p)->bd_next; 222 if (bp->bif_dlist == 0) 223 /* 224 * Let the driver know that there are no more listeners. 225 */ 226 *d->bd_bif->bif_driverp = 0; 227 d->bd_bif = 0; 228 } 229 230 231 /* 232 * Mark a descriptor free by making it point to itself. 233 * This is probably cheaper than marking with a constant since 234 * the address should be in a register anyway. 235 */ 236 #define D_ISFREE(d) ((d) == (d)->bd_next) 237 #define D_MARKFREE(d) ((d)->bd_next = (d)) 238 #define D_MARKUSED(d) ((d)->bd_next = 0) 239 240 /* 241 * bpfopen - open ethernet device 242 * 243 * Errors: ENXIO - illegal minor device number 244 * EBUSY - too many files open 245 */ 246 /* ARGSUSED */ 247 int 248 bpfopen(dev, flag) 249 dev_t dev; 250 int flag; 251 { 252 int error, s; 253 register struct bpf_d *d; 254 255 if (minor(dev) >= NBPFILTER) 256 return (ENXIO); 257 258 /* 259 * Each minor can be opened by only one process. If the requested 260 * minor is in use, return EBUSY. 261 */ 262 s = splimp(); 263 d = &bpf_dtab[minor(dev)]; 264 if (!D_ISFREE(d)) { 265 splx(s); 266 return (EBUSY); 267 } else 268 /* Mark "free" and do most initialization. */ 269 bzero((char *)d, sizeof(*d)); 270 splx(s); 271 272 error = bpf_initd(d); 273 if (error) { 274 D_MARKFREE(d); 275 return (error); 276 } 277 return (0); 278 } 279 280 /* 281 * Close the descriptor by detaching it from its interface, 282 * deallocating its buffers, and marking it free. 283 */ 284 /* ARGSUSED */ 285 bpfclose(dev, flag) 286 dev_t dev; 287 int flag; 288 { 289 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 290 int s; 291 292 s = splimp(); 293 if (d->bd_bif) 294 bpf_detachd(d); 295 splx(s); 296 297 /* Free the buffer space. */ 298 if (d->bd_hbuf) 299 free(d->bd_hbuf, M_DEVBUF); 300 if (d->bd_fbuf) 301 free(d->bd_fbuf, M_DEVBUF); 302 free(d->bd_sbuf, M_DEVBUF); 303 if (d->bd_filter) 304 free((caddr_t)d->bd_filter, M_DEVBUF); 305 306 D_MARKFREE(d); 307 } 308 309 /* 310 * Rotate the packet buffers in descriptor d. Move the store buffer 311 * into the hold slot, and the free buffer into the store slot. 312 * Zero the length of the new store buffer. 313 */ 314 #define ROTATE_BUFFERS(d) \ 315 (d)->bd_hbuf = (d)->bd_sbuf; \ 316 (d)->bd_hlen = (d)->bd_slen; \ 317 (d)->bd_sbuf = (d)->bd_fbuf; \ 318 (d)->bd_slen = 0; \ 319 (d)->bd_fbuf = 0; 320 /* 321 * bpfread - read next chunk of packets from buffers 322 */ 323 int 324 bpfread(dev, uio) 325 dev_t dev; 326 register struct uio *uio; 327 { 328 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 329 int error; 330 int s; 331 332 /* 333 * Restrict application to use a buffer the same size as 334 * as kernel buffers. 335 */ 336 if (uio->uio_resid != d->bd_bufsize) 337 return (EIO); 338 339 s = splimp(); 340 /* 341 * If the hold buffer is empty, then set a timer and sleep 342 * until either the timeout has occurred or enough packets have 343 * arrived to fill the store buffer. 344 */ 345 while (d->bd_hbuf == 0) { 346 if (d->bd_immediate && d->bd_slen != 0) { 347 /* 348 * A packet(s) either arrived since the previous 349 * read or arrived while we were asleep. 350 * Rotate the buffers and return what's here. 351 */ 352 ROTATE_BUFFERS(d); 353 break; 354 } 355 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); 356 if (error == EINTR || error == ERESTART) { 357 splx(s); 358 return (error); 359 } 360 if (error == EWOULDBLOCK) { 361 /* 362 * On a timeout, return what's in the buffer, 363 * which may be nothing. If there is something 364 * in the store buffer, we can rotate the buffers. 365 */ 366 if (d->bd_hbuf) 367 /* 368 * We filled up the buffer in between 369 * getting the timeout and arriving 370 * here, so we don't need to rotate. 371 */ 372 break; 373 374 if (d->bd_slen == 0) { 375 splx(s); 376 return (0); 377 } 378 ROTATE_BUFFERS(d); 379 break; 380 } 381 } 382 /* 383 * At this point, we know we have something in the hold slot. 384 */ 385 splx(s); 386 387 /* 388 * Move data from hold buffer into user space. 389 * We know the entire buffer is transferred since 390 * we checked above that the read buffer is bpf_bufsize bytes. 391 */ 392 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 393 394 s = splimp(); 395 d->bd_fbuf = d->bd_hbuf; 396 d->bd_hbuf = 0; 397 splx(s); 398 399 return (error); 400 } 401 402 403 /* 404 * If there are processes sleeping on this descriptor, wake them up. 405 */ 406 static inline void 407 bpf_wakeup(d) 408 register struct bpf_d *d; 409 { 410 wakeup((caddr_t)d); 411 if (d->bd_selproc) { 412 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 413 d->bd_selcoll = 0; 414 d->bd_selproc = 0; 415 } 416 } 417 418 int 419 bpfwrite(dev, uio) 420 dev_t dev; 421 struct uio *uio; 422 { 423 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 424 struct ifnet *ifp; 425 struct mbuf *m; 426 int error, s; 427 static struct sockaddr dst; 428 429 if (d->bd_bif == 0) 430 return (ENXIO); 431 432 ifp = d->bd_bif->bif_ifp; 433 434 if (uio->uio_resid == 0) 435 return (0); 436 if (uio->uio_resid > ifp->if_mtu) 437 return (EMSGSIZE); 438 439 error = bpf_movein(uio, (int)d->bd_bif->bif_devp.bdev_type, &m, &dst); 440 if (error) 441 return (error); 442 443 s = splnet(); 444 error = (*ifp->if_output)(ifp, m, &dst); 445 splx(s); 446 /* 447 * The driver frees the mbuf. 448 */ 449 return (error); 450 } 451 452 /* 453 * Reset a descriptor by flushing its packet bufferand clearing the receive 454 * and drop counts. Should be called at splimp. 455 */ 456 static void 457 reset_d(d) 458 struct bpf_d *d; 459 { 460 if (d->bd_hbuf) { 461 /* Free the hold buffer. */ 462 d->bd_fbuf = d->bd_hbuf; 463 d->bd_hbuf = 0; 464 } 465 d->bd_slen = 0; 466 d->bd_rcount = 0; 467 d->bd_dcount = 0; 468 } 469 470 /* 471 * bpfioctl - packet filter control 472 * 473 * FIONREAD Check for read packet available. 474 * SIOCGIFADDR Get interface address - convenient hook to driver. 475 * BIOCGFLEN Get max filter len. 476 * BIOCGBLEN Get buffer len [for read()]. 477 * BIOCSETF Set ethernet read filter. 478 * BIOCFLUSH Flush read packet buffer. 479 * BIOCPROMISC Put interface into promiscuous mode. 480 * BIOCDEVP Get device parameters. 481 * BIOCGETIF Get interface name. 482 * BIOCSETIF Set interface. 483 * BIOCSRTIMEOUT Set read timeout. 484 * BIOCGRTIMEOUT Get read timeout. 485 * BIOCGSTATS Get packet stats. 486 * BIOCIMMEDIATE Set immediate mode. 487 */ 488 /* ARGSUSED */ 489 int 490 bpfioctl(dev, cmd, addr, flag) 491 dev_t dev; 492 int cmd; 493 caddr_t addr; 494 int flag; 495 { 496 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 497 int s, error = 0; 498 499 switch (cmd) { 500 501 default: 502 error = EINVAL; 503 break; 504 505 /* 506 * Check for read packet available. 507 */ 508 case FIONREAD: 509 { 510 int n; 511 512 s = splimp(); 513 n = d->bd_slen; 514 if (d->bd_hbuf) 515 n += d->bd_hlen; 516 splx(s); 517 518 *(int *)addr = n; 519 break; 520 } 521 522 case SIOCGIFADDR: 523 { 524 struct ifnet *ifp; 525 526 if (d->bd_bif == 0) 527 error = EINVAL; 528 else { 529 ifp = d->bd_bif->bif_ifp; 530 error = (*ifp->if_ioctl)(ifp, cmd, addr); 531 } 532 break; 533 } 534 535 /* 536 * Get max filter len. 537 */ 538 case BIOCGFLEN: 539 *(u_int *)addr = BPF_MAXINSNS; 540 break; 541 /* 542 * Get buffer len [for read()]. 543 */ 544 case BIOCGBLEN: 545 *(u_int *)addr = d->bd_bufsize; 546 break; 547 548 /* 549 * Set ethernet read filter. 550 */ 551 case BIOCSETF: 552 error = bpf_setf(d, (struct bpf_program *)addr); 553 break; 554 555 /* 556 * Flush read packet buffer. 557 */ 558 case BIOCFLUSH: 559 s = splimp(); 560 reset_d(d); 561 splx(s); 562 break; 563 564 /* 565 * Put interface into promiscuous mode. 566 */ 567 case BIOCPROMISC: 568 if (d->bd_bif == 0) { 569 /* 570 * No interface attached yet. 571 */ 572 error = EINVAL; 573 break; 574 } 575 s = splimp(); 576 if (d->bd_promisc == 0) { 577 d->bd_promisc = 1; 578 error = ifpromisc(d->bd_bif->bif_ifp, 1); 579 } 580 splx(s); 581 break; 582 583 /* 584 * Get device parameters. 585 */ 586 case BIOCDEVP: 587 if (d->bd_bif == 0) 588 error = EINVAL; 589 else 590 *(struct bpf_devp *)addr = d->bd_bif->bif_devp; 591 break; 592 593 /* 594 * Set interface name. 595 */ 596 case BIOCGETIF: 597 if (d->bd_bif == 0) 598 error = EINVAL; 599 else 600 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 601 break; 602 603 /* 604 * Set interface. 605 */ 606 case BIOCSETIF: 607 error = bpf_setif(d, (struct ifreq *)addr); 608 break; 609 610 /* 611 * Set read timeout. 612 */ 613 case BIOCSRTIMEOUT: 614 { 615 struct timeval *tv = (struct timeval *)addr; 616 u_long msec; 617 618 /* Compute number of milliseconds. */ 619 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 620 /* Scale milliseconds to ticks. Assume hard 621 clock has millisecond or greater resolution 622 (i.e. tick >= 1000). For 10ms hardclock, 623 tick/1000 = 10, so rtout<-msec/10. */ 624 d->bd_rtout = msec / (tick / 1000); 625 break; 626 } 627 628 /* 629 * Get read timeout. 630 */ 631 case BIOCGRTIMEOUT: 632 { 633 struct timeval *tv = (struct timeval *)addr; 634 u_long msec = d->bd_rtout; 635 636 msec *= tick / 1000; 637 tv->tv_sec = msec / 1000; 638 tv->tv_usec = msec % 1000; 639 break; 640 } 641 642 /* 643 * Get packet stats. 644 */ 645 case BIOCGSTATS: 646 { 647 struct bpf_stat *bs = (struct bpf_stat *)addr; 648 649 bs->bs_recv = d->bd_rcount; 650 bs->bs_drop = d->bd_dcount; 651 break; 652 } 653 654 /* 655 * Set immediate mode. 656 */ 657 case BIOCIMMEDIATE: 658 d->bd_immediate = *(u_int *)addr; 659 break; 660 } 661 return (error); 662 } 663 664 /* 665 * Set d's packet filter program to 'fp'. If 'd' already has a filter, 666 * free it and replace it. Returns EINVAL for bogus requests. 667 */ 668 int 669 bpf_setf(d, fp) 670 struct bpf_d *d; 671 struct bpf_program *fp; 672 { 673 struct bpf_insn *fcode, *old; 674 u_int flen, size; 675 int s; 676 677 old = d->bd_filter; 678 if (fp->bf_insns == 0) { 679 if (fp->bf_len != 0) 680 return (EINVAL); 681 s = splimp(); 682 d->bd_filter = 0; 683 reset_d(d); 684 splx(s); 685 if (old != 0) 686 free((caddr_t)old, M_DEVBUF); 687 return (0); 688 } 689 flen = fp->bf_len; 690 if (flen > BPF_MAXINSNS) 691 return (EINVAL); 692 693 size = flen * sizeof(*fp->bf_insns); 694 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 695 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size)) 696 return (EINVAL); 697 698 if (bpf_validate(fcode, (int)flen)) { 699 s = splimp(); 700 d->bd_filter = fcode; 701 reset_d(d); 702 splx(s); 703 if (old != 0) 704 free((caddr_t)old, M_DEVBUF); 705 706 return (0); 707 } 708 free((caddr_t)fcode, M_DEVBUF); 709 return (EINVAL); 710 } 711 712 /* 713 * Detach 'd' from its current interface (if attached at all) and attach to 714 * the interface named 'name'. Return ioctl error code or 0. 715 */ 716 static int 717 bpf_setif(d, ifr) 718 struct bpf_d *d; 719 struct ifreq *ifr; 720 { 721 struct bpf_if *bp; 722 char *cp; 723 int unit, i, s; 724 725 /* 726 * Separate string into name part and unit number. Put a null 727 * byte at the end of the name part, and compute the number. 728 * If the a unit number is unspecified, the default is 0, 729 * as initialized above. XXX This should be common code. 730 */ 731 unit = 0; 732 cp = ifr->ifr_name; 733 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 734 while (*cp++) { 735 if (*cp >= '0' && *cp <= '9') { 736 unit = *cp - '0'; 737 *cp++ = '\0'; 738 while (*cp) 739 unit = 10 * unit + *cp++ - '0'; 740 break; 741 } 742 } 743 /* 744 * Look through attached interfaces for the named one. 745 */ 746 bp = bpf_iftab; 747 for (i = 0; i < NBPFILTER; ++bp, ++i) { 748 struct ifnet *ifp = bp->bif_ifp; 749 750 if (ifp == 0 || unit != ifp->if_unit 751 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 752 continue; 753 /* 754 * We found the requested interface. If we're 755 * already attached to it, just flush the buffer. 756 * If it's not up, return an error. 757 */ 758 if ((ifp->if_flags & IFF_UP) == 0) 759 return (ENETDOWN); 760 s = splimp(); 761 if (bp != d->bd_bif) { 762 if (d->bd_bif) 763 /* 764 * Detach if attached to something else. 765 */ 766 bpf_detachd(d); 767 768 bpf_attachd(d, bp); 769 } 770 reset_d(d); 771 splx(s); 772 return (0); 773 } 774 /* Not found. */ 775 return (ENXIO); 776 } 777 778 /* 779 * Lookup the name of the 'ifp' interface and return it in 'ifr->ifr_name'. 780 * We augment the ifp's base name with its unit number. 781 */ 782 static void 783 bpf_ifname(ifp, ifr) 784 struct ifnet *ifp; 785 struct ifreq *ifr; 786 { 787 char *s = ifp->if_name; 788 char *d = ifr->ifr_name; 789 790 while (*d++ = *s++) 791 ; 792 /* XXX Assume that unit number is less than 10. */ 793 *d++ = ifp->if_unit + '0'; 794 *d = '\0'; 795 } 796 797 /* 798 * Support for select() system call 799 * Inspired by the code in tty.c for the same purpose. 800 * 801 * bpfselect - returns true iff the specific operation 802 * will not block indefinitely. Otherwise, return 803 * false but make a note that a selwakeup() must be done. 804 */ 805 int 806 bpfselect(dev, rw, p) 807 register dev_t dev; 808 int rw; 809 struct proc *p; 810 { 811 register struct bpf_d *d; 812 register int s; 813 814 if (rw != FREAD) 815 return (0); 816 /* 817 * An imitation of the FIONREAD ioctl code. 818 */ 819 d = &bpf_dtab[minor(dev)]; 820 821 s = splimp(); 822 if (d->bd_slen != 0 || d->bd_hbuf && d->bd_hlen != 0) { 823 /* 824 * There is data waiting. 825 */ 826 splx(s); 827 return (1); 828 } 829 /* 830 * No data ready. If there's already a select() waiting on this 831 * minor device then this is a collision. This shouldn't happen 832 * because minors really should not be shared, but if a process 833 * forks while one of these is open, it is possible that both 834 * processes could select on the same descriptor. 835 */ 836 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 837 d->bd_selcoll = 1; 838 else 839 d->bd_selproc = p; 840 841 splx(s); 842 return (0); 843 } 844 845 /* 846 * bpf_tap - incoming linkage from device drivers 847 */ 848 void 849 bpf_tap(arg, pkt, pktlen) 850 caddr_t arg; 851 register u_char *pkt; 852 register u_int pktlen; 853 { 854 struct bpf_if *bp; 855 register struct bpf_d *d; 856 register u_int slen; 857 extern bcopy(); 858 /* 859 * Note that the ipl does not have to be raised at this point. 860 * The only problem that could arise here is that if two different 861 * interfaces shared any data. This is not the case. 862 */ 863 bp = (struct bpf_if *)arg; 864 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 865 ++d->bd_rcount; 866 if (d->bd_filter) 867 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 868 else 869 slen = (u_int)-1; 870 871 if (slen != 0) 872 catchpacket(d, pkt, pktlen, slen, (void (*)())bcopy); 873 } 874 } 875 876 /* 877 * Copy data from an mbuf chain into a buffer. This code is derived 878 * from m_copydata in sys/uipc_mbuf.c. 879 */ 880 static void 881 bpf_m_copydata(src, dst, len) 882 u_char *src; 883 u_char *dst; 884 register int len; 885 { 886 register struct mbuf *m = (struct mbuf *)src; 887 register unsigned count; 888 889 while (len > 0) { 890 if (m == 0) 891 panic("bpf_m_copydata"); 892 count = MIN(m->m_len, len); 893 (void)bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 894 len -= count; 895 dst += count; 896 m = m->m_next; 897 } 898 } 899 900 /* 901 * Length of ethernet and TCP/IP header with no IP options. 902 */ 903 #define BPF_MIN_SNAPLEN 50 904 905 /* 906 * bpf_mtap - incoming linkage from device drivers, when packet 907 * is in an mbuf chain 908 */ 909 void 910 bpf_mtap(arg, m0) 911 caddr_t arg; 912 struct mbuf *m0; 913 { 914 struct bpf_if *bp = (struct bpf_if *)arg; 915 struct bpf_d *d; 916 u_char *cp; 917 u_int slen, pktlen; 918 int nbytes; 919 struct mbuf *m; 920 static u_char buf[BPF_MIN_SNAPLEN]; 921 922 if (m0->m_len >= BPF_MIN_SNAPLEN) { 923 slen = m0->m_len; 924 cp = mtod(m0, u_char *); 925 } 926 else { 927 nbytes = BPF_MIN_SNAPLEN; 928 cp = buf; 929 m = m0; 930 while (m && nbytes > 0) { 931 slen = MIN(m->m_len, nbytes); 932 bcopy(mtod(m, char *), (char *)cp, slen); 933 cp += slen; 934 nbytes -= slen; 935 m = m->m_next; 936 } 937 if (nbytes > 0) 938 /* Packet too small? */ 939 return; 940 941 slen = BPF_MIN_SNAPLEN; 942 cp = buf; 943 } 944 pktlen = 0; 945 m = m0; 946 while (m) { 947 pktlen += m->m_len; 948 m = m->m_next; 949 } 950 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 951 ++d->bd_rcount; 952 if (d->bd_filter) 953 slen = bpf_filter(d->bd_filter, cp, pktlen, slen); 954 else 955 slen = (u_int)-1; 956 if (slen != 0) 957 catchpacket(d, (u_char *)m0, pktlen, slen, 958 bpf_m_copydata); 959 } 960 } 961 962 /* 963 * Move the packet data from interface memory ('pkt') into the 964 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 965 * otherwise 0. 'copy' is the routine called to do the actual data 966 * transfer. 'bcopy' is passed in to copy contiguous chunks, while 967 * 'bpf_m_copydata' is passed in to copy mbuf chains. In the latter 968 * case, 'pkt' is really an mbuf. 969 */ 970 static void 971 catchpacket(d, pkt, pktlen, snaplen, cpfn) 972 register struct bpf_d *d; 973 register u_char *pkt; 974 register u_int pktlen, snaplen; 975 register void (*cpfn)(); 976 { 977 register struct bpf_hdr *hp; 978 register int totlen, curlen; 979 register int hdrlen = d->bd_bif->bif_hdrlen; 980 /* 981 * Figure out how many bytes to move. If the packet is 982 * greater or equal to the snapshot length, transfer that 983 * much. Otherwise, transfer the whole packet (unless 984 * we hit the buffer size limit). 985 */ 986 if (snaplen <= pktlen) 987 totlen = snaplen + hdrlen; 988 else { 989 totlen = pktlen + hdrlen; 990 if (totlen > d->bd_bufsize) 991 totlen = d->bd_bufsize; 992 } 993 994 /* 995 * Round up the end of the previous packet to the next longword. 996 */ 997 curlen = BPF_WORDALIGN(d->bd_slen); 998 if (curlen + totlen > d->bd_bufsize) { 999 /* 1000 * This packet will overflow the storage buffer. 1001 * Rotate the buffers if we can, then wakeup any 1002 * pending reads. 1003 */ 1004 if (d->bd_fbuf == 0) { 1005 /* 1006 * We haven't completed the previous read yet, 1007 * so drop the packet. 1008 */ 1009 ++d->bd_dcount; 1010 return; 1011 } 1012 ROTATE_BUFFERS(d); 1013 bpf_wakeup(d); 1014 curlen = 0; 1015 } 1016 else if (d->bd_immediate) 1017 /* 1018 * Immediate mode is set. A packet arrived so any 1019 * reads should be woken up. 1020 */ 1021 bpf_wakeup(d); 1022 1023 /* 1024 * Append the bpf header. 1025 */ 1026 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1027 #ifdef sun 1028 uniqtime(&hp->bh_tstamp); 1029 #else 1030 #ifdef hp300 1031 microtime(&hp->bh_tstamp); 1032 #else 1033 hp->bh_tstamp = time; 1034 #endif 1035 #endif 1036 hp->bh_datalen = pktlen; 1037 hp->bh_hdrlen = hdrlen; 1038 /* 1039 * Copy the packet data into the store buffer and update its length. 1040 */ 1041 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1042 d->bd_slen = curlen + totlen; 1043 } 1044 1045 /* 1046 * Initialize all nonzero fields of a descriptor. 1047 */ 1048 static int 1049 bpf_initd(d) 1050 register struct bpf_d *d; 1051 { 1052 d->bd_bufsize = bpf_bufsize; 1053 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1054 if (d->bd_fbuf == 0) 1055 return (ENOBUFS); 1056 1057 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1058 if (d->bd_sbuf == 0) { 1059 free(d->bd_fbuf, M_DEVBUF); 1060 return (ENOBUFS); 1061 } 1062 d->bd_slen = 0; 1063 d->bd_hlen = 0; 1064 return (0); 1065 } 1066 1067 /* 1068 * Register 'ifp' with bpf. 'devp' is the link-level device descriptor 1069 * and 'driverp' is a pointer to the 'struct bpf_if *' in the driver's softc. 1070 */ 1071 void 1072 bpfattach(driverp, ifp, devp) 1073 caddr_t *driverp; 1074 struct ifnet *ifp; 1075 struct bpf_devp *devp; 1076 { 1077 struct bpf_if *bp; 1078 int i; 1079 1080 if (bpf_units >= NBPFILTER) { 1081 printf("bpf: too many interfaces: %s%d not attached\n", 1082 ifp->if_name, ifp->if_unit); 1083 return; 1084 } 1085 bp = &bpf_iftab[bpf_units++]; 1086 1087 bp->bif_dlist = 0; 1088 bp->bif_driverp = (struct bpf_if **)driverp; 1089 bp->bif_ifp = ifp; 1090 bp->bif_devp = *devp; 1091 1092 *bp->bif_driverp = 0; 1093 1094 /* 1095 * Compute the length of the bpf header. This is not necessarily 1096 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1097 * that the network layer header begins on a longword boundary (for 1098 * performance reasons and to alleviate alignment restrictions). 1099 */ 1100 i = devp->bdev_hdrlen; 1101 bp->bif_hdrlen = BPF_WORDALIGN(i + SIZEOF_BPF_HDR) - i; 1102 1103 /* 1104 * Mark all the descriptors free if this hasn't been done. 1105 */ 1106 if (!D_ISFREE(&bpf_dtab[0])) 1107 for (i = 0; i < NBPFILTER; ++i) 1108 D_MARKFREE(&bpf_dtab[i]); 1109 1110 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1111 } 1112 1113 /* XXX This routine belongs in net/if.c. */ 1114 /* 1115 * Set/clear promiscuous mode on interface ifp based on the truth value` 1116 * of pswitch. The calls are reference counted so that only the first 1117 * on request actually has an effect, as does the final off request. 1118 * Results are undefined if the off and on requests are not matched. 1119 */ 1120 int 1121 ifpromisc(ifp, pswitch) 1122 struct ifnet *ifp; 1123 int pswitch; 1124 { 1125 /* 1126 * If the device is not configured up, we cannot put it in 1127 * promiscuous mode. 1128 */ 1129 if ((ifp->if_flags & IFF_UP) == 0) 1130 return (ENETDOWN); 1131 1132 if (pswitch) { 1133 if (ifp->if_pcount++ != 0) 1134 return (0); 1135 ifp->if_flags |= IFF_PROMISC; 1136 } else { 1137 if (--ifp->if_pcount > 0) 1138 return (0); 1139 ifp->if_flags &= ~IFF_PROMISC; 1140 } 1141 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)0)); 1142 } 1143 1144 #endif (NBPFILTER > 0) 1145