1 /* 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that: (1) source code distributions 7 * retain the above copyright notice and this paragraph in its entirety, (2) 8 * distributions including binary code include the above copyright notice and 9 * this paragraph in its entirety in the documentation or other materials 10 * provided with the distribution, and (3) all advertising materials mentioning 11 * features or use of this software display the following acknowledgement: 12 * ``This product includes software developed by the University of California, 13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 14 * the University nor the names of its contributors may be used to endorse 15 * or promote products derived from this software without specific prior 16 * written permission. 17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 20 * 21 * This code is derived from the Stanford/CMU enet packet filter, 22 * (net/enet.c) distributed in 4.3BSD Unix. 23 */ 24 #ifndef lint 25 static char rcsid[] = 26 "$Header: bpf.c,v 1.23 91/01/30 18:22:13 mccanne Exp $"; 27 #endif 28 29 #include "bpfilter.h" 30 31 #if (NBPFILTER > 0) 32 33 #ifndef __GNUC__ 34 #define inline 35 #endif 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/buf.h> 41 #include <sys/dir.h> 42 #include <sys/proc.h> 43 #include <sys/user.h> 44 #include <sys/ioctl.h> 45 #include <sys/map.h> 46 47 #include <sys/file.h> 48 #ifdef sparc 49 #include <sys/stream.h> 50 #endif 51 #include <sys/tty.h> 52 #include <sys/uio.h> 53 54 #include <sys/protosw.h> 55 #include <sys/socket.h> 56 #include <net/if.h> 57 58 #include <net/bpf.h> 59 #include <net/bpfdesc.h> 60 61 #include <sys/errno.h> 62 63 #include <netinet/in.h> 64 #include <netinet/if_ether.h> 65 #include <sys/kernel.h> 66 67 #define PRINET 26 /* interruptible */ 68 69 /* 70 * The default read buffer size is patchable. 71 */ 72 int bpf_bufsize = MCLBYTES; 73 74 /* 75 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 76 * bpf_dtab holds the descriptors, indexed by minor device # 77 * 78 * We really don't need NBPFILTER bpf_if entries, but this eliminates 79 * the need to account for all possible drivers here. 80 * This problem will go away when these structures are allocated dynamically. 81 */ 82 static struct bpf_if *bpf_iflist; 83 static struct bpf_d bpf_dtab[NBPFILTER]; 84 85 static void bpf_ifname(); 86 static void catchpacket(); 87 static int bpf_setif(); 88 static int bpf_initd(); 89 90 static int 91 bpf_movein(uio, linktype, mp, sockp) 92 register struct uio *uio; 93 int linktype; 94 register struct mbuf **mp; 95 register struct sockaddr *sockp; 96 { 97 struct mbuf *m; 98 int error; 99 int len; 100 int hlen; 101 102 /* 103 * Build a sockaddr based on the data link layer type. 104 * We do this at this level because the ethernet header 105 * is copied directly into the data field of the sockaddr. 106 * In the case of SLIP, there is no header and the packet 107 * is forwarded as is. 108 * Also, we are careful to leave room at the front of the mbuf 109 * for the link level header. 110 */ 111 switch (linktype) { 112 case DLT_SLIP: 113 sockp->sa_family = AF_INET; 114 hlen = 0; 115 break; 116 117 case DLT_EN10MB: 118 sockp->sa_family = AF_UNSPEC; 119 /* XXX Would MAXLINKHDR be better? */ 120 hlen = sizeof(struct ether_header); 121 break; 122 123 case DLT_FDDI: 124 sockp->sa_family = AF_UNSPEC; 125 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 126 hlen = 24; 127 break; 128 129 default: 130 return (EIO); 131 } 132 133 len = uio->uio_resid; 134 if ((unsigned)len > MCLBYTES) 135 return (EIO); 136 137 MGET(m, M_WAIT, MT_DATA); 138 if (m == 0) 139 return (ENOBUFS); 140 if (len > MLEN) { 141 MCLGET(m, M_WAIT); 142 if ((m->m_flags & M_EXT) == 0) { 143 error = ENOBUFS; 144 goto bad; 145 } 146 } 147 m->m_len = len; 148 *mp = m; 149 /* 150 * Make room for link header. 151 */ 152 if (hlen) { 153 m->m_len -= hlen; 154 m->m_data += hlen; /* XXX */ 155 156 error = uiomove((caddr_t)sockp->sa_data, hlen, uio); 157 if (error) 158 goto bad; 159 } 160 error = uiomove(mtod(m, caddr_t), len - hlen, uio); 161 if (!error) 162 return (0); 163 bad: 164 m_freem(m); 165 return (error); 166 } 167 168 /* 169 * Attach 'd' to the bpf interface 'bp', i.e. make 'd' listen on 'bp'. 170 * Must be called at splimp. 171 */ 172 static void 173 bpf_attachd(d, bp) 174 struct bpf_d *d; 175 struct bpf_if *bp; 176 { 177 /* Point d at bp. */ 178 d->bd_bif = bp; 179 180 /* Add d to bp's list of listeners. */ 181 d->bd_next = bp->bif_dlist; 182 bp->bif_dlist = d; 183 184 /* 185 * Let the driver know we're here (if it doesn't already). 186 */ 187 *bp->bif_driverp = bp; 188 } 189 190 static void 191 bpf_detachd(d) 192 struct bpf_d *d; 193 { 194 struct bpf_d **p; 195 struct bpf_if *bp; 196 197 bp = d->bd_bif; 198 /* 199 * Check if this descriptor had requested promiscuous mode. 200 * If so, turn it off. 201 */ 202 if (d->bd_promisc) { 203 d->bd_promisc = 0; 204 if (ifpromisc(bp->bif_ifp, 0)) 205 /* 206 * Something is really wrong if we were able to put 207 * the driver into promiscuous mode, but can't 208 * take it out. 209 */ 210 panic("bpf_detachd: ifpromisc failed"); 211 } 212 /* Remove 'd' from the interface's descriptor list. */ 213 p = &bp->bif_dlist; 214 while (*p != d) { 215 p = &(*p)->bd_next; 216 if (*p == 0) 217 panic("bpf_detachd: descriptor not in list"); 218 } 219 *p = (*p)->bd_next; 220 if (bp->bif_dlist == 0) 221 /* 222 * Let the driver know that there are no more listeners. 223 */ 224 *d->bd_bif->bif_driverp = 0; 225 d->bd_bif = 0; 226 } 227 228 229 /* 230 * Mark a descriptor free by making it point to itself. 231 * This is probably cheaper than marking with a constant since 232 * the address should be in a register anyway. 233 */ 234 #define D_ISFREE(d) ((d) == (d)->bd_next) 235 #define D_MARKFREE(d) ((d)->bd_next = (d)) 236 #define D_MARKUSED(d) ((d)->bd_next = 0) 237 238 /* 239 * bpfopen - open ethernet device 240 * 241 * Errors: ENXIO - illegal minor device number 242 * EBUSY - too many files open 243 */ 244 /* ARGSUSED */ 245 int 246 bpfopen(dev, flag) 247 dev_t dev; 248 int flag; 249 { 250 int error, s; 251 register struct bpf_d *d; 252 253 if (minor(dev) >= NBPFILTER) 254 return (ENXIO); 255 256 /* 257 * Each minor can be opened by only one process. If the requested 258 * minor is in use, return EBUSY. 259 */ 260 s = splimp(); 261 d = &bpf_dtab[minor(dev)]; 262 if (!D_ISFREE(d)) { 263 splx(s); 264 return (EBUSY); 265 } else 266 /* Mark "free" and do most initialization. */ 267 bzero((char *)d, sizeof(*d)); 268 splx(s); 269 270 error = bpf_initd(d); 271 if (error) { 272 D_MARKFREE(d); 273 return (error); 274 } 275 return (0); 276 } 277 278 /* 279 * Close the descriptor by detaching it from its interface, 280 * deallocating its buffers, and marking it free. 281 */ 282 /* ARGSUSED */ 283 bpfclose(dev, flag) 284 dev_t dev; 285 int flag; 286 { 287 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 288 int s; 289 290 s = splimp(); 291 if (d->bd_bif) 292 bpf_detachd(d); 293 splx(s); 294 295 /* Free the buffer space. */ 296 if (d->bd_hbuf) 297 free(d->bd_hbuf, M_DEVBUF); 298 if (d->bd_fbuf) 299 free(d->bd_fbuf, M_DEVBUF); 300 free(d->bd_sbuf, M_DEVBUF); 301 if (d->bd_filter) 302 free((caddr_t)d->bd_filter, M_DEVBUF); 303 304 D_MARKFREE(d); 305 } 306 307 /* 308 * Rotate the packet buffers in descriptor d. Move the store buffer 309 * into the hold slot, and the free buffer into the store slot. 310 * Zero the length of the new store buffer. 311 */ 312 #define ROTATE_BUFFERS(d) \ 313 (d)->bd_hbuf = (d)->bd_sbuf; \ 314 (d)->bd_hlen = (d)->bd_slen; \ 315 (d)->bd_sbuf = (d)->bd_fbuf; \ 316 (d)->bd_slen = 0; \ 317 (d)->bd_fbuf = 0; 318 /* 319 * bpfread - read next chunk of packets from buffers 320 */ 321 int 322 bpfread(dev, uio) 323 dev_t dev; 324 register struct uio *uio; 325 { 326 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 327 int error; 328 int s; 329 330 /* 331 * Restrict application to use a buffer the same size as 332 * as kernel buffers. 333 */ 334 if (uio->uio_resid != d->bd_bufsize) 335 return (EINVAL); 336 337 s = splimp(); 338 /* 339 * If the hold buffer is empty, then set a timer and sleep 340 * until either the timeout has occurred or enough packets have 341 * arrived to fill the store buffer. 342 */ 343 while (d->bd_hbuf == 0) { 344 if (d->bd_immediate && d->bd_slen != 0) { 345 /* 346 * A packet(s) either arrived since the previous 347 * read or arrived while we were asleep. 348 * Rotate the buffers and return what's here. 349 */ 350 ROTATE_BUFFERS(d); 351 break; 352 } 353 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); 354 if (error == EINTR || error == ERESTART) { 355 splx(s); 356 return (error); 357 } 358 if (error == EWOULDBLOCK) { 359 /* 360 * On a timeout, return what's in the buffer, 361 * which may be nothing. If there is something 362 * in the store buffer, we can rotate the buffers. 363 */ 364 if (d->bd_hbuf) 365 /* 366 * We filled up the buffer in between 367 * getting the timeout and arriving 368 * here, so we don't need to rotate. 369 */ 370 break; 371 372 if (d->bd_slen == 0) { 373 splx(s); 374 return (0); 375 } 376 ROTATE_BUFFERS(d); 377 break; 378 } 379 } 380 /* 381 * At this point, we know we have something in the hold slot. 382 */ 383 splx(s); 384 385 /* 386 * Move data from hold buffer into user space. 387 * We know the entire buffer is transferred since 388 * we checked above that the read buffer is bpf_bufsize bytes. 389 */ 390 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 391 392 s = splimp(); 393 d->bd_fbuf = d->bd_hbuf; 394 d->bd_hbuf = 0; 395 splx(s); 396 397 return (error); 398 } 399 400 401 /* 402 * If there are processes sleeping on this descriptor, wake them up. 403 */ 404 static inline void 405 bpf_wakeup(d) 406 register struct bpf_d *d; 407 { 408 wakeup((caddr_t)d); 409 if (d->bd_selproc) { 410 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 411 d->bd_selcoll = 0; 412 d->bd_selproc = 0; 413 } 414 } 415 416 int 417 bpfwrite(dev, uio) 418 dev_t dev; 419 struct uio *uio; 420 { 421 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 422 struct ifnet *ifp; 423 struct mbuf *m; 424 int error, s; 425 static struct sockaddr dst; 426 427 if (d->bd_bif == 0) 428 return (ENXIO); 429 430 ifp = d->bd_bif->bif_ifp; 431 432 if (uio->uio_resid == 0) 433 return (0); 434 if (uio->uio_resid > ifp->if_mtu) 435 return (EMSGSIZE); 436 437 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst); 438 if (error) 439 return (error); 440 441 s = splnet(); 442 error = (*ifp->if_output)(ifp, m, &dst); 443 splx(s); 444 /* 445 * The driver frees the mbuf. 446 */ 447 return (error); 448 } 449 450 /* 451 * Reset a descriptor by flushing its packet bufferand clearing the receive 452 * and drop counts. Should be called at splimp. 453 */ 454 static void 455 reset_d(d) 456 struct bpf_d *d; 457 { 458 if (d->bd_hbuf) { 459 /* Free the hold buffer. */ 460 d->bd_fbuf = d->bd_hbuf; 461 d->bd_hbuf = 0; 462 } 463 d->bd_slen = 0; 464 d->bd_rcount = 0; 465 d->bd_dcount = 0; 466 } 467 468 /* 469 * FIONREAD Check for read packet available. 470 * SIOCGIFADDR Get interface address - convenient hook to driver. 471 * BIOCGFLEN Get max filter len. 472 * BIOCGBLEN Get buffer len [for read()]. 473 * BIOCSETF Set ethernet read filter. 474 * BIOCFLUSH Flush read packet buffer. 475 * BIOCPROMISC Put interface into promiscuous mode. 476 * BIOCGDLT Get link layer type. 477 * BIOCGETIF Get interface name. 478 * BIOCSETIF Set interface. 479 * BIOCSRTIMEOUT Set read timeout. 480 * BIOCGRTIMEOUT Get read timeout. 481 * BIOCGSTATS Get packet stats. 482 * BIOCIMMEDIATE Set immediate mode. 483 */ 484 /* ARGSUSED */ 485 int 486 bpfioctl(dev, cmd, addr, flag) 487 dev_t dev; 488 int cmd; 489 caddr_t addr; 490 int flag; 491 { 492 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 493 int s, error = 0; 494 495 switch (cmd) { 496 497 default: 498 error = EINVAL; 499 break; 500 501 /* 502 * Check for read packet available. 503 */ 504 case FIONREAD: 505 { 506 int n; 507 508 s = splimp(); 509 n = d->bd_slen; 510 if (d->bd_hbuf) 511 n += d->bd_hlen; 512 splx(s); 513 514 *(int *)addr = n; 515 break; 516 } 517 518 case SIOCGIFADDR: 519 { 520 struct ifnet *ifp; 521 522 if (d->bd_bif == 0) 523 error = EINVAL; 524 else { 525 ifp = d->bd_bif->bif_ifp; 526 error = (*ifp->if_ioctl)(ifp, cmd, addr); 527 } 528 break; 529 } 530 531 /* 532 * Get max filter len. 533 */ 534 case BIOCGFLEN: 535 *(u_int *)addr = BPF_MAXINSNS; 536 break; 537 /* 538 * Get buffer len [for read()]. 539 */ 540 case BIOCGBLEN: 541 *(u_int *)addr = d->bd_bufsize; 542 break; 543 544 /* 545 * Set ethernet read filter. 546 */ 547 case BIOCSETF: 548 error = bpf_setf(d, (struct bpf_program *)addr); 549 break; 550 551 /* 552 * Flush read packet buffer. 553 */ 554 case BIOCFLUSH: 555 s = splimp(); 556 reset_d(d); 557 splx(s); 558 break; 559 560 /* 561 * Put interface into promiscuous mode. 562 */ 563 case BIOCPROMISC: 564 if (d->bd_bif == 0) { 565 /* 566 * No interface attached yet. 567 */ 568 error = EINVAL; 569 break; 570 } 571 s = splimp(); 572 if (d->bd_promisc == 0) { 573 d->bd_promisc = 1; 574 error = ifpromisc(d->bd_bif->bif_ifp, 1); 575 } 576 splx(s); 577 break; 578 579 /* 580 * Get device parameters. 581 */ 582 case BIOCGDLT: 583 if (d->bd_bif == 0) 584 error = EINVAL; 585 else 586 *(u_int *)addr = d->bd_bif->bif_dlt; 587 break; 588 589 /* 590 * Set interface name. 591 */ 592 case BIOCGETIF: 593 if (d->bd_bif == 0) 594 error = EINVAL; 595 else 596 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 597 break; 598 599 /* 600 * Set interface. 601 */ 602 case BIOCSETIF: 603 error = bpf_setif(d, (struct ifreq *)addr); 604 break; 605 606 /* 607 * Set read timeout. 608 */ 609 case BIOCSRTIMEOUT: 610 { 611 struct timeval *tv = (struct timeval *)addr; 612 u_long msec; 613 614 /* Compute number of milliseconds. */ 615 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 616 /* Scale milliseconds to ticks. Assume hard 617 clock has millisecond or greater resolution 618 (i.e. tick >= 1000). For 10ms hardclock, 619 tick/1000 = 10, so rtout<-msec/10. */ 620 d->bd_rtout = msec / (tick / 1000); 621 break; 622 } 623 624 /* 625 * Get read timeout. 626 */ 627 case BIOCGRTIMEOUT: 628 { 629 struct timeval *tv = (struct timeval *)addr; 630 u_long msec = d->bd_rtout; 631 632 msec *= tick / 1000; 633 tv->tv_sec = msec / 1000; 634 tv->tv_usec = msec % 1000; 635 break; 636 } 637 638 /* 639 * Get packet stats. 640 */ 641 case BIOCGSTATS: 642 { 643 struct bpf_stat *bs = (struct bpf_stat *)addr; 644 645 bs->bs_recv = d->bd_rcount; 646 bs->bs_drop = d->bd_dcount; 647 break; 648 } 649 650 /* 651 * Set immediate mode. 652 */ 653 case BIOCIMMEDIATE: 654 d->bd_immediate = *(u_int *)addr; 655 break; 656 } 657 return (error); 658 } 659 660 /* 661 * Set d's packet filter program to 'fp'. If 'd' already has a filter, 662 * free it and replace it. Returns EINVAL for bogus requests. 663 */ 664 int 665 bpf_setf(d, fp) 666 struct bpf_d *d; 667 struct bpf_program *fp; 668 { 669 struct bpf_insn *fcode, *old; 670 u_int flen, size; 671 int s; 672 673 old = d->bd_filter; 674 if (fp->bf_insns == 0) { 675 if (fp->bf_len != 0) 676 return (EINVAL); 677 s = splimp(); 678 d->bd_filter = 0; 679 reset_d(d); 680 splx(s); 681 if (old != 0) 682 free((caddr_t)old, M_DEVBUF); 683 return (0); 684 } 685 flen = fp->bf_len; 686 if (flen > BPF_MAXINSNS) 687 return (EINVAL); 688 689 size = flen * sizeof(*fp->bf_insns); 690 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 691 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size)) 692 return (EINVAL); 693 694 if (bpf_validate(fcode, (int)flen)) { 695 s = splimp(); 696 d->bd_filter = fcode; 697 reset_d(d); 698 splx(s); 699 if (old != 0) 700 free((caddr_t)old, M_DEVBUF); 701 702 return (0); 703 } 704 free((caddr_t)fcode, M_DEVBUF); 705 return (EINVAL); 706 } 707 708 /* 709 * Detach 'd' from its current interface (if attached at all) and attach to 710 * the interface named 'name'. Return ioctl error code or 0. 711 */ 712 static int 713 bpf_setif(d, ifr) 714 struct bpf_d *d; 715 struct ifreq *ifr; 716 { 717 struct bpf_if *bp; 718 char *cp; 719 int unit, s; 720 721 /* 722 * Separate string into name part and unit number. Put a null 723 * byte at the end of the name part, and compute the number. 724 * If the a unit number is unspecified, the default is 0, 725 * as initialized above. XXX This should be common code. 726 */ 727 unit = 0; 728 cp = ifr->ifr_name; 729 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 730 while (*cp++) { 731 if (*cp >= '0' && *cp <= '9') { 732 unit = *cp - '0'; 733 *cp++ = '\0'; 734 while (*cp) 735 unit = 10 * unit + *cp++ - '0'; 736 break; 737 } 738 } 739 /* 740 * Look through attached interfaces for the named one. 741 */ 742 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 743 struct ifnet *ifp = bp->bif_ifp; 744 745 if (ifp == 0 || unit != ifp->if_unit 746 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 747 continue; 748 /* 749 * We found the requested interface. If we're 750 * already attached to it, just flush the buffer. 751 * If it's not up, return an error. 752 */ 753 if ((ifp->if_flags & IFF_UP) == 0) 754 return (ENETDOWN); 755 s = splimp(); 756 if (bp != d->bd_bif) { 757 if (d->bd_bif) 758 /* 759 * Detach if attached to something else. 760 */ 761 bpf_detachd(d); 762 763 bpf_attachd(d, bp); 764 } 765 reset_d(d); 766 splx(s); 767 return (0); 768 } 769 /* Not found. */ 770 return (ENXIO); 771 } 772 773 /* 774 * Lookup the name of the 'ifp' interface and return it in 'ifr->ifr_name'. 775 * We augment the ifp's base name with its unit number. 776 */ 777 static void 778 bpf_ifname(ifp, ifr) 779 struct ifnet *ifp; 780 struct ifreq *ifr; 781 { 782 char *s = ifp->if_name; 783 char *d = ifr->ifr_name; 784 785 while (*d++ = *s++) 786 ; 787 /* XXX Assume that unit number is less than 10. */ 788 *d++ = ifp->if_unit + '0'; 789 *d = '\0'; 790 } 791 792 /* 793 * Support for select() system call 794 * Inspired by the code in tty.c for the same purpose. 795 * 796 * bpfselect - returns true iff the specific operation 797 * will not block indefinitely. Otherwise, return 798 * false but make a note that a selwakeup() must be done. 799 */ 800 int 801 bpfselect(dev, rw, p) 802 register dev_t dev; 803 int rw; 804 struct proc *p; 805 { 806 register struct bpf_d *d; 807 register int s; 808 809 if (rw != FREAD) 810 return (0); 811 /* 812 * An imitation of the FIONREAD ioctl code. 813 */ 814 d = &bpf_dtab[minor(dev)]; 815 816 s = splimp(); 817 if (d->bd_slen != 0 || d->bd_hbuf && d->bd_hlen != 0) { 818 /* 819 * There is data waiting. 820 */ 821 splx(s); 822 return (1); 823 } 824 /* 825 * No data ready. If there's already a select() waiting on this 826 * minor device then this is a collision. This shouldn't happen 827 * because minors really should not be shared, but if a process 828 * forks while one of these is open, it is possible that both 829 * processes could select on the same descriptor. 830 */ 831 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 832 d->bd_selcoll = 1; 833 else 834 d->bd_selproc = p; 835 836 splx(s); 837 return (0); 838 } 839 840 /* 841 * bpf_tap - incoming linkage from device drivers 842 */ 843 void 844 bpf_tap(arg, pkt, pktlen) 845 caddr_t arg; 846 register u_char *pkt; 847 register u_int pktlen; 848 { 849 struct bpf_if *bp; 850 register struct bpf_d *d; 851 register u_int slen; 852 /* 853 * Note that the ipl does not have to be raised at this point. 854 * The only problem that could arise here is that if two different 855 * interfaces shared any data. This is not the case. 856 */ 857 bp = (struct bpf_if *)arg; 858 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 859 ++d->bd_rcount; 860 if (d->bd_filter == 0) { 861 catchpacket(d, pkt, pktlen, (u_int)-1, bcopy); 862 continue; 863 } 864 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 865 if (slen != 0) 866 catchpacket(d, pkt, pktlen, slen, bcopy); 867 } 868 } 869 870 /* 871 * Copy data from an mbuf chain into a buffer. This code is derived 872 * from m_copydata in sys/uipc_mbuf.c. 873 */ 874 static void 875 bpf_mcopy(src, dst, len) 876 u_char *src; 877 u_char *dst; 878 register int len; 879 { 880 register struct mbuf *m = (struct mbuf *)src; 881 register unsigned count; 882 883 while (len > 0) { 884 if (m == 0) 885 panic("bpf_mcopy"); 886 count = MIN(m->m_len, len); 887 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 888 m = m->m_next; 889 dst += count; 890 len -= count; 891 } 892 } 893 894 /* 895 * Length of ethernet and TCP/IP header with no IP options. 896 */ 897 #define BPF_MIN_SNAPLEN 50 898 899 /* 900 * bpf_mtap - incoming linkage from device drivers, when packet 901 * is in an mbuf chain 902 */ 903 void 904 bpf_mtap(arg, m) 905 caddr_t arg; 906 struct mbuf *m; 907 { 908 struct bpf_if *bp = (struct bpf_if *)arg; 909 struct bpf_d *d; 910 u_int pktlen, slen; 911 struct mbuf *m0; 912 #ifdef notdef 913 u_char *cp; 914 int nbytes; 915 static u_char buf[BPF_MIN_SNAPLEN]; 916 917 if (m->m_len >= BPF_MIN_SNAPLEN) { 918 slen = m->m_len; 919 cp = mtod(m, u_char *); 920 } 921 else { 922 nbytes = BPF_MIN_SNAPLEN; 923 cp = buf; 924 m0 = m; 925 while (m0 && nbytes > 0) { 926 slen = MIN(m0->m_len, nbytes); 927 bcopy(mtod(m0, char *), (char *)cp, slen); 928 cp += slen; 929 nbytes -= slen; 930 m0 = m0->m_next; 931 } 932 if (nbytes > 0) 933 /* Packet too small? */ 934 return; 935 936 slen = BPF_MIN_SNAPLEN; 937 cp = buf; 938 } 939 #endif 940 pktlen = 0; 941 m0 = m; 942 while (m0) { 943 pktlen += m0->m_len; 944 m0 = m0->m_next; 945 } 946 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 947 ++d->bd_rcount; 948 if (d->bd_filter == 0) { 949 catchpacket(d, (u_char *)m, pktlen, (u_int)-1, 950 bpf_mcopy); 951 continue; 952 } 953 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 954 if (slen != 0) 955 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 956 } 957 } 958 959 /* 960 * Move the packet data from interface memory (pkt) into the 961 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 962 * otherwise 0. 'copy' is the routine called to do the actual data 963 * transfer. 'bcopy' is passed in to copy contiguous chunks, while 964 * 'bpf_mcopy' is passed in to copy mbuf chains. In the latter 965 * case, 'pkt' is really an mbuf. 966 */ 967 static void 968 catchpacket(d, pkt, pktlen, snaplen, cpfn) 969 register struct bpf_d *d; 970 register u_char *pkt; 971 register u_int pktlen, snaplen; 972 register void (*cpfn)(); 973 { 974 register struct bpf_hdr *hp; 975 register int totlen, curlen; 976 register int hdrlen = d->bd_bif->bif_hdrlen; 977 /* 978 * Figure out how many bytes to move. If the packet is 979 * greater or equal to the snapshot length, transfer that 980 * much. Otherwise, transfer the whole packet (unless 981 * we hit the buffer size limit). 982 */ 983 if (snaplen <= pktlen) 984 totlen = snaplen + hdrlen; 985 else { 986 totlen = pktlen + hdrlen; 987 if (totlen > d->bd_bufsize) 988 totlen = d->bd_bufsize; 989 } 990 991 /* 992 * Round up the end of the previous packet to the next longword. 993 */ 994 curlen = BPF_WORDALIGN(d->bd_slen); 995 if (curlen + totlen > d->bd_bufsize) { 996 /* 997 * This packet will overflow the storage buffer. 998 * Rotate the buffers if we can, then wakeup any 999 * pending reads. 1000 */ 1001 if (d->bd_fbuf == 0) { 1002 /* 1003 * We haven't completed the previous read yet, 1004 * so drop the packet. 1005 */ 1006 ++d->bd_dcount; 1007 return; 1008 } 1009 ROTATE_BUFFERS(d); 1010 bpf_wakeup(d); 1011 curlen = 0; 1012 } 1013 else if (d->bd_immediate) 1014 /* 1015 * Immediate mode is set. A packet arrived so any 1016 * reads should be woken up. 1017 */ 1018 bpf_wakeup(d); 1019 1020 /* 1021 * Append the bpf header. 1022 */ 1023 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1024 #ifdef sun 1025 uniqtime(&hp->bh_tstamp); 1026 #else 1027 #ifdef hp300 1028 microtime(&hp->bh_tstamp); 1029 #else 1030 hp->bh_tstamp = time; 1031 #endif 1032 #endif 1033 hp->bh_datalen = pktlen; 1034 hp->bh_hdrlen = hdrlen; 1035 /* 1036 * Copy the packet data into the store buffer and update its length. 1037 */ 1038 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1039 d->bd_slen = curlen + totlen; 1040 } 1041 1042 /* 1043 * Initialize all nonzero fields of a descriptor. 1044 */ 1045 static int 1046 bpf_initd(d) 1047 register struct bpf_d *d; 1048 { 1049 d->bd_bufsize = bpf_bufsize; 1050 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1051 if (d->bd_fbuf == 0) 1052 return (ENOBUFS); 1053 1054 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1055 if (d->bd_sbuf == 0) { 1056 free(d->bd_fbuf, M_DEVBUF); 1057 return (ENOBUFS); 1058 } 1059 d->bd_slen = 0; 1060 d->bd_hlen = 0; 1061 return (0); 1062 } 1063 1064 /* 1065 * Register 'ifp' with bpf. XXX 1066 * and 'driverp' is a pointer to the 'struct bpf_if *' in the driver's softc. 1067 */ 1068 void 1069 bpfattach(driverp, ifp, dlt, hdrlen) 1070 caddr_t *driverp; 1071 struct ifnet *ifp; 1072 u_int dlt, hdrlen; 1073 { 1074 struct bpf_if *bp; 1075 int i; 1076 1077 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1078 if (bp == 0) 1079 panic("bpfattach"); 1080 1081 bp->bif_dlist = 0; 1082 bp->bif_driverp = (struct bpf_if **)driverp; 1083 bp->bif_ifp = ifp; 1084 bp->bif_dlt = dlt; 1085 1086 bp->bif_next = bpf_iflist; 1087 bpf_iflist = bp; 1088 1089 *bp->bif_driverp = 0; 1090 1091 /* 1092 * Compute the length of the bpf header. This is not necessarily 1093 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1094 * that the network layer header begins on a longword boundary (for 1095 * performance reasons and to alleviate alignment restrictions). 1096 */ 1097 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1098 1099 /* 1100 * Mark all the descriptors free if this hasn't been done. 1101 */ 1102 if (!D_ISFREE(&bpf_dtab[0])) 1103 for (i = 0; i < NBPFILTER; ++i) 1104 D_MARKFREE(&bpf_dtab[i]); 1105 1106 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1107 } 1108 1109 /* XXX This routine belongs in net/if.c. */ 1110 /* 1111 * Set/clear promiscuous mode on interface ifp based on the truth value` 1112 * of pswitch. The calls are reference counted so that only the first 1113 * on request actually has an effect, as does the final off request. 1114 * Results are undefined if the off and on requests are not matched. 1115 */ 1116 int 1117 ifpromisc(ifp, pswitch) 1118 struct ifnet *ifp; 1119 int pswitch; 1120 { 1121 /* 1122 * If the device is not configured up, we cannot put it in 1123 * promiscuous mode. 1124 */ 1125 if ((ifp->if_flags & IFF_UP) == 0) 1126 return (ENETDOWN); 1127 1128 if (pswitch) { 1129 if (ifp->if_pcount++ != 0) 1130 return (0); 1131 ifp->if_flags |= IFF_PROMISC; 1132 } else { 1133 if (--ifp->if_pcount > 0) 1134 return (0); 1135 ifp->if_flags &= ~IFF_PROMISC; 1136 } 1137 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)0)); 1138 } 1139 1140 #endif (NBPFILTER > 0) 1141