1 /*- 2 * Copyright (c) 1990-1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 7.5 (Berkeley) 7/15/91 39 * 40 * static char rcsid[] = 41 * "$Header: bpf.c,v 1.33 91/10/27 21:21:58 mccanne Exp $"; 42 */ 43 44 #include "bpfilter.h" 45 46 #if NBPFILTER > 0 47 48 #ifndef __GNUC__ 49 #define inline 50 #else 51 #define inline __inline__ 52 #endif 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/mbuf.h> 57 #include <sys/buf.h> 58 #include <sys/dir.h> 59 #include <sys/proc.h> 60 #include <sys/user.h> 61 #include <sys/ioctl.h> 62 #include <sys/map.h> 63 64 #include <sys/file.h> 65 #if defined(sparc) && BSD < 199103 66 #include <sys/stream.h> 67 #endif 68 #include <sys/tty.h> 69 #include <sys/uio.h> 70 71 #include <sys/protosw.h> 72 #include <sys/socket.h> 73 #include <net/if.h> 74 75 #include <net/bpf.h> 76 #include <net/bpfdesc.h> 77 78 #include <sys/errno.h> 79 80 #include <netinet/in.h> 81 #include <netinet/if_ether.h> 82 #include <sys/kernel.h> 83 84 /* 85 * Older BSDs don't have kernel malloc. 86 */ 87 #if BSD < 199103 88 extern bcopy(); 89 static caddr_t bpf_alloc(); 90 #define malloc(size, type, canwait) bpf_alloc(size, canwait) 91 #define free(cp, type) m_free(*(struct mbuf **)(cp - 8)) 92 #define M_WAITOK M_WAIT 93 #define BPF_BUFSIZE (MCLBYTES-8) 94 #define ERESTART EINTR 95 #else 96 #define BPF_BUFSIZE 4096 97 #endif 98 99 #define PRINET 26 /* interruptible */ 100 101 /* 102 * The default read buffer size is patchable. 103 */ 104 int bpf_bufsize = BPF_BUFSIZE; 105 106 /* 107 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 108 * bpf_dtab holds the descriptors, indexed by minor device # 109 * 110 * We really don't need NBPFILTER bpf_if entries, but this eliminates 111 * the need to account for all possible drivers here. 112 * This problem will go away when these structures are allocated dynamically. 113 */ 114 static struct bpf_if *bpf_iflist; 115 static struct bpf_d bpf_dtab[NBPFILTER]; 116 117 static void bpf_ifname(); 118 static void catchpacket(); 119 static int bpf_setif(); 120 static int bpf_initd(); 121 122 static int 123 bpf_movein(uio, linktype, mp, sockp) 124 register struct uio *uio; 125 int linktype; 126 register struct mbuf **mp; 127 register struct sockaddr *sockp; 128 { 129 struct mbuf *m; 130 int error; 131 int len; 132 int hlen; 133 134 /* 135 * Build a sockaddr based on the data link layer type. 136 * We do this at this level because the ethernet header 137 * is copied directly into the data field of the sockaddr. 138 * In the case of SLIP, there is no header and the packet 139 * is forwarded as is. 140 * Also, we are careful to leave room at the front of the mbuf 141 * for the link level header. 142 */ 143 switch (linktype) { 144 case DLT_SLIP: 145 sockp->sa_family = AF_INET; 146 hlen = 0; 147 break; 148 149 case DLT_EN10MB: 150 sockp->sa_family = AF_UNSPEC; 151 /* XXX Would MAXLINKHDR be better? */ 152 hlen = sizeof(struct ether_header); 153 break; 154 155 case DLT_FDDI: 156 sockp->sa_family = AF_UNSPEC; 157 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 158 hlen = 24; 159 break; 160 161 default: 162 return (EIO); 163 } 164 165 len = uio->uio_resid; 166 if ((unsigned)len > MCLBYTES) 167 return (EIO); 168 169 MGET(m, M_WAIT, MT_DATA); 170 if (m == 0) 171 return (ENOBUFS); 172 if (len > MLEN) { 173 #if BSD >= 199103 174 MCLGET(m, M_WAIT); 175 if ((m->m_flags & M_EXT) == 0) { 176 #else 177 MCLGET(m); 178 if (m->m_len == MCLBYTES) { 179 #endif 180 error = ENOBUFS; 181 goto bad; 182 } 183 } 184 m->m_len = len; 185 *mp = m; 186 /* 187 * Make room for link header. 188 */ 189 if (hlen) { 190 m->m_len -= hlen; 191 #if BSD >= 199103 192 m->m_data += hlen; /* XXX */ 193 #else 194 m->m_off += hlen; 195 #endif 196 error = uiomove((caddr_t)sockp->sa_data, hlen, uio); 197 if (error) 198 goto bad; 199 } 200 error = uiomove(mtod(m, caddr_t), len - hlen, uio); 201 if (!error) 202 return (0); 203 bad: 204 m_freem(m); 205 return (error); 206 } 207 208 /* 209 * Attach file to the bpf interface, i.e. make d listen on bp. 210 * Must be called at splimp. 211 */ 212 static void 213 bpf_attachd(d, bp) 214 struct bpf_d *d; 215 struct bpf_if *bp; 216 { 217 /* 218 * Point d at bp, and add d to the interface's list of listeners. 219 * Finally, point the driver's bpf cookie at the interface so 220 * it will divert packets to bpf. 221 */ 222 d->bd_bif = bp; 223 d->bd_next = bp->bif_dlist; 224 bp->bif_dlist = d; 225 226 *bp->bif_driverp = bp; 227 } 228 229 /* 230 * Detach a file from its interface. 231 */ 232 static void 233 bpf_detachd(d) 234 struct bpf_d *d; 235 { 236 struct bpf_d **p; 237 struct bpf_if *bp; 238 239 bp = d->bd_bif; 240 /* 241 * Check if this descriptor had requested promiscuous mode. 242 * If so, turn it off. 243 */ 244 if (d->bd_promisc) { 245 d->bd_promisc = 0; 246 if (ifpromisc(bp->bif_ifp, 0)) 247 /* 248 * Something is really wrong if we were able to put 249 * the driver into promiscuous mode, but can't 250 * take it out. 251 */ 252 panic("bpf: ifpromisc failed"); 253 } 254 /* Remove d from the interface's descriptor list. */ 255 p = &bp->bif_dlist; 256 while (*p != d) { 257 p = &(*p)->bd_next; 258 if (*p == 0) 259 panic("bpf_detachd: descriptor not in list"); 260 } 261 *p = (*p)->bd_next; 262 if (bp->bif_dlist == 0) 263 /* 264 * Let the driver know that there are no more listeners. 265 */ 266 *d->bd_bif->bif_driverp = 0; 267 d->bd_bif = 0; 268 } 269 270 271 /* 272 * Mark a descriptor free by making it point to itself. 273 * This is probably cheaper than marking with a constant since 274 * the address should be in a register anyway. 275 */ 276 #define D_ISFREE(d) ((d) == (d)->bd_next) 277 #define D_MARKFREE(d) ((d)->bd_next = (d)) 278 #define D_MARKUSED(d) ((d)->bd_next = 0) 279 280 /* 281 * bpfopen - open ethernet device 282 * 283 * Errors: ENXIO - illegal minor device number 284 * EBUSY - too many files open 285 */ 286 /* ARGSUSED */ 287 int 288 bpfopen(dev, flag) 289 dev_t dev; 290 int flag; 291 { 292 int error, s; 293 register struct bpf_d *d; 294 295 if (minor(dev) >= NBPFILTER) 296 return (ENXIO); 297 298 /* 299 * Each minor can be opened by only one process. If the requested 300 * minor is in use, return EBUSY. 301 */ 302 s = splimp(); 303 d = &bpf_dtab[minor(dev)]; 304 if (!D_ISFREE(d)) { 305 splx(s); 306 return (EBUSY); 307 } else 308 /* Mark "free" and do most initialization. */ 309 bzero((char *)d, sizeof(*d)); 310 splx(s); 311 312 error = bpf_initd(d); 313 if (error) { 314 D_MARKFREE(d); 315 return (error); 316 } 317 return (0); 318 } 319 320 /* 321 * Close the descriptor by detaching it from its interface, 322 * deallocating its buffers, and marking it free. 323 */ 324 /* ARGSUSED */ 325 bpfclose(dev, flag) 326 dev_t dev; 327 int flag; 328 { 329 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 330 int s; 331 332 s = splimp(); 333 if (d->bd_bif) 334 bpf_detachd(d); 335 splx(s); 336 337 bpf_freed(d); 338 } 339 340 #if BSD < 199103 341 static 342 bpf_timeout(arg) 343 caddr_t arg; 344 { 345 struct bpf_d *d = (struct bpf_d *)arg; 346 d->bd_timedout = 1; 347 wakeup(arg); 348 } 349 350 static int 351 tsleep(cp, pri, s, t) 352 register caddr_t cp; 353 register int pri; 354 char *s; 355 register int t; 356 { 357 register struct bpf_d *d = (struct bpf_d *)cp; 358 register int error; 359 360 if (t != 0) { 361 d->bd_timedout = 0; 362 timeout(bpf_timeout, cp); 363 } 364 error = sleep(cp, pri); 365 if (t != 0) { 366 if (d->bd_timedout != 0) 367 return EWOULDBLOCK; 368 untimeout(bpf_timeout, cp); 369 } 370 return error; 371 } 372 #endif 373 374 /* 375 * Rotate the packet buffers in descriptor d. Move the store buffer 376 * into the hold slot, and the free buffer into the store slot. 377 * Zero the length of the new store buffer. 378 */ 379 #define ROTATE_BUFFERS(d) \ 380 (d)->bd_hbuf = (d)->bd_sbuf; \ 381 (d)->bd_hlen = (d)->bd_slen; \ 382 (d)->bd_sbuf = (d)->bd_fbuf; \ 383 (d)->bd_slen = 0; \ 384 (d)->bd_fbuf = 0; 385 /* 386 * bpfread - read next chunk of packets from buffers 387 */ 388 int 389 bpfread(dev, uio) 390 dev_t dev; 391 register struct uio *uio; 392 { 393 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 394 int error; 395 int s; 396 397 /* 398 * Restrict application to use a buffer the same size as 399 * as kernel buffers. 400 */ 401 if (uio->uio_resid != d->bd_bufsize) 402 return (EINVAL); 403 404 s = splimp(); 405 /* 406 * If the hold buffer is empty, then set a timer and sleep 407 * until either the timeout has occurred or enough packets have 408 * arrived to fill the store buffer. 409 */ 410 while (d->bd_hbuf == 0) { 411 if (d->bd_immediate && d->bd_slen != 0) { 412 /* 413 * A packet(s) either arrived since the previous 414 * read or arrived while we were asleep. 415 * Rotate the buffers and return what's here. 416 */ 417 ROTATE_BUFFERS(d); 418 break; 419 } 420 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); 421 if (error == EINTR || error == ERESTART) { 422 splx(s); 423 return (error); 424 } 425 if (error == EWOULDBLOCK) { 426 /* 427 * On a timeout, return what's in the buffer, 428 * which may be nothing. If there is something 429 * in the store buffer, we can rotate the buffers. 430 */ 431 if (d->bd_hbuf) 432 /* 433 * We filled up the buffer in between 434 * getting the timeout and arriving 435 * here, so we don't need to rotate. 436 */ 437 break; 438 439 if (d->bd_slen == 0) { 440 splx(s); 441 return (0); 442 } 443 ROTATE_BUFFERS(d); 444 break; 445 } 446 } 447 /* 448 * At this point, we know we have something in the hold slot. 449 */ 450 splx(s); 451 452 /* 453 * Move data from hold buffer into user space. 454 * We know the entire buffer is transferred since 455 * we checked above that the read buffer is bpf_bufsize bytes. 456 */ 457 #if BSD >= 199103 458 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 459 #else 460 error = uiomove(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 461 #endif 462 s = splimp(); 463 d->bd_fbuf = d->bd_hbuf; 464 d->bd_hbuf = 0; 465 splx(s); 466 467 return (error); 468 } 469 470 471 /* 472 * If there are processes sleeping on this descriptor, wake them up. 473 */ 474 static inline void 475 bpf_wakeup(d) 476 register struct bpf_d *d; 477 { 478 wakeup((caddr_t)d); 479 if (d->bd_selproc) { 480 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 481 d->bd_selcoll = 0; 482 d->bd_selproc = 0; 483 } 484 } 485 486 int 487 bpfwrite(dev, uio) 488 dev_t dev; 489 struct uio *uio; 490 { 491 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 492 struct ifnet *ifp; 493 struct mbuf *m; 494 int error, s; 495 static struct sockaddr dst; 496 497 if (d->bd_bif == 0) 498 return (ENXIO); 499 500 ifp = d->bd_bif->bif_ifp; 501 502 if (uio->uio_resid == 0) 503 return (0); 504 if (uio->uio_resid > ifp->if_mtu) 505 return (EMSGSIZE); 506 507 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst); 508 if (error) 509 return (error); 510 511 s = splnet(); 512 #if BSD >= 199103 513 error = (*ifp->if_output)(ifp, m, &dst, (struct rtenty *)0); 514 #else 515 error = (*ifp->if_output)(ifp, m, &dst); 516 #endif 517 splx(s); 518 /* 519 * The driver frees the mbuf. 520 */ 521 return (error); 522 } 523 524 /* 525 * Reset a descriptor by flushing its packet bufferand clearing the receive 526 * and drop counts. Should be called at splimp. 527 */ 528 static void 529 reset_d(d) 530 struct bpf_d *d; 531 { 532 if (d->bd_hbuf) { 533 /* Free the hold buffer. */ 534 d->bd_fbuf = d->bd_hbuf; 535 d->bd_hbuf = 0; 536 } 537 d->bd_slen = 0; 538 d->bd_rcount = 0; 539 d->bd_dcount = 0; 540 } 541 542 /* 543 * FIONREAD Check for read packet available. 544 * SIOCGIFADDR Get interface address - convenient hook to driver. 545 * BIOCGBLEN Get buffer len [for read()]. 546 * BIOCSETF Set ethernet read filter. 547 * BIOCFLUSH Flush read packet buffer. 548 * BIOCPROMISC Put interface into promiscuous mode. 549 * BIOCGDLT Get link layer type. 550 * BIOCGETIF Get interface name. 551 * BIOCSETIF Set interface. 552 * BIOCSRTIMEOUT Set read timeout. 553 * BIOCGRTIMEOUT Get read timeout. 554 * BIOCGSTATS Get packet stats. 555 * BIOCIMMEDIATE Set immediate mode. 556 */ 557 /* ARGSUSED */ 558 int 559 bpfioctl(dev, cmd, addr, flag) 560 dev_t dev; 561 int cmd; 562 caddr_t addr; 563 int flag; 564 { 565 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 566 int s, error = 0; 567 568 switch (cmd) { 569 570 default: 571 error = EINVAL; 572 break; 573 574 /* 575 * Check for read packet available. 576 */ 577 case FIONREAD: 578 { 579 int n; 580 581 s = splimp(); 582 n = d->bd_slen; 583 if (d->bd_hbuf) 584 n += d->bd_hlen; 585 splx(s); 586 587 *(int *)addr = n; 588 break; 589 } 590 591 case SIOCGIFADDR: 592 { 593 struct ifnet *ifp; 594 595 if (d->bd_bif == 0) 596 error = EINVAL; 597 else { 598 ifp = d->bd_bif->bif_ifp; 599 error = (*ifp->if_ioctl)(ifp, cmd, addr); 600 } 601 break; 602 } 603 604 /* 605 * Get buffer len [for read()]. 606 */ 607 case BIOCGBLEN: 608 *(u_int *)addr = d->bd_bufsize; 609 break; 610 611 /* 612 * Set link layer read filter. 613 */ 614 case BIOCSETF: 615 error = bpf_setf(d, (struct bpf_program *)addr); 616 break; 617 618 /* 619 * Flush read packet buffer. 620 */ 621 case BIOCFLUSH: 622 s = splimp(); 623 reset_d(d); 624 splx(s); 625 break; 626 627 /* 628 * Put interface into promiscuous mode. 629 */ 630 case BIOCPROMISC: 631 if (d->bd_bif == 0) { 632 /* 633 * No interface attached yet. 634 */ 635 error = EINVAL; 636 break; 637 } 638 s = splimp(); 639 if (d->bd_promisc == 0) { 640 error = ifpromisc(d->bd_bif->bif_ifp, 1); 641 if (error == 0) 642 d->bd_promisc = 1; 643 } 644 splx(s); 645 break; 646 647 /* 648 * Get device parameters. 649 */ 650 case BIOCGDLT: 651 if (d->bd_bif == 0) 652 error = EINVAL; 653 else 654 *(u_int *)addr = d->bd_bif->bif_dlt; 655 break; 656 657 /* 658 * Set interface name. 659 */ 660 case BIOCGETIF: 661 if (d->bd_bif == 0) 662 error = EINVAL; 663 else 664 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 665 break; 666 667 /* 668 * Set interface. 669 */ 670 case BIOCSETIF: 671 error = bpf_setif(d, (struct ifreq *)addr); 672 break; 673 674 /* 675 * Set read timeout. 676 */ 677 case BIOCSRTIMEOUT: 678 { 679 struct timeval *tv = (struct timeval *)addr; 680 u_long msec; 681 682 /* Compute number of milliseconds. */ 683 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 684 /* Scale milliseconds to ticks. Assume hard 685 clock has millisecond or greater resolution 686 (i.e. tick >= 1000). For 10ms hardclock, 687 tick/1000 = 10, so rtout<-msec/10. */ 688 d->bd_rtout = msec / (tick / 1000); 689 break; 690 } 691 692 /* 693 * Get read timeout. 694 */ 695 case BIOCGRTIMEOUT: 696 { 697 struct timeval *tv = (struct timeval *)addr; 698 u_long msec = d->bd_rtout; 699 700 msec *= tick / 1000; 701 tv->tv_sec = msec / 1000; 702 tv->tv_usec = msec % 1000; 703 break; 704 } 705 706 /* 707 * Get packet stats. 708 */ 709 case BIOCGSTATS: 710 { 711 struct bpf_stat *bs = (struct bpf_stat *)addr; 712 713 bs->bs_recv = d->bd_rcount; 714 bs->bs_drop = d->bd_dcount; 715 break; 716 } 717 718 /* 719 * Set immediate mode. 720 */ 721 case BIOCIMMEDIATE: 722 d->bd_immediate = *(u_int *)addr; 723 break; 724 } 725 return (error); 726 } 727 728 /* 729 * Set d's packet filter program to fp. If this file already has a filter, 730 * free it and replace it. Returns EINVAL for bogus requests. 731 */ 732 int 733 bpf_setf(d, fp) 734 struct bpf_d *d; 735 struct bpf_program *fp; 736 { 737 struct bpf_insn *fcode, *old; 738 u_int flen, size; 739 int s; 740 741 old = d->bd_filter; 742 if (fp->bf_insns == 0) { 743 if (fp->bf_len != 0) 744 return (EINVAL); 745 s = splimp(); 746 d->bd_filter = 0; 747 reset_d(d); 748 splx(s); 749 if (old != 0) 750 free((caddr_t)old, M_DEVBUF); 751 return (0); 752 } 753 flen = fp->bf_len; 754 if (flen > BPF_MAXINSNS) 755 return (EINVAL); 756 757 size = flen * sizeof(*fp->bf_insns); 758 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 759 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 760 bpf_validate(fcode, (int)flen)) { 761 s = splimp(); 762 d->bd_filter = fcode; 763 reset_d(d); 764 splx(s); 765 if (old != 0) 766 free((caddr_t)old, M_DEVBUF); 767 768 return (0); 769 } 770 free((caddr_t)fcode, M_DEVBUF); 771 return (EINVAL); 772 } 773 774 /* 775 * Detach a file from its current interface (if attached at all) and attach 776 * to the interface indicated by the name stored in ifr. 777 * Return an errno or 0. 778 */ 779 static int 780 bpf_setif(d, ifr) 781 struct bpf_d *d; 782 struct ifreq *ifr; 783 { 784 struct bpf_if *bp; 785 char *cp; 786 int unit, s; 787 788 /* 789 * Separate string into name part and unit number. Put a null 790 * byte at the end of the name part, and compute the number. 791 * If the a unit number is unspecified, the default is 0, 792 * as initialized above. XXX This should be common code. 793 */ 794 unit = 0; 795 cp = ifr->ifr_name; 796 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 797 while (*cp++) { 798 if (*cp >= '0' && *cp <= '9') { 799 unit = *cp - '0'; 800 *cp++ = '\0'; 801 while (*cp) 802 unit = 10 * unit + *cp++ - '0'; 803 break; 804 } 805 } 806 /* 807 * Look through attached interfaces for the named one. 808 */ 809 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 810 struct ifnet *ifp = bp->bif_ifp; 811 812 if (ifp == 0 || unit != ifp->if_unit 813 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 814 continue; 815 /* 816 * We found the requested interface. If we're 817 * already attached to it, just flush the buffer. 818 * If it's not up, return an error. 819 */ 820 if ((ifp->if_flags & IFF_UP) == 0) 821 return (ENETDOWN); 822 s = splimp(); 823 if (bp != d->bd_bif) { 824 if (d->bd_bif) 825 /* 826 * Detach if attached to something else. 827 */ 828 bpf_detachd(d); 829 830 bpf_attachd(d, bp); 831 } 832 reset_d(d); 833 splx(s); 834 return (0); 835 } 836 /* Not found. */ 837 return (ENXIO); 838 } 839 840 /* 841 * Convert an interface name plus unit number of an ifp to a single 842 * name which is returned in the ifr. 843 */ 844 static void 845 bpf_ifname(ifp, ifr) 846 struct ifnet *ifp; 847 struct ifreq *ifr; 848 { 849 char *s = ifp->if_name; 850 char *d = ifr->ifr_name; 851 852 while (*d++ = *s++) 853 ; 854 /* XXX Assume that unit number is less than 10. */ 855 *d++ = ifp->if_unit + '0'; 856 *d = '\0'; 857 } 858 859 /* 860 * The new select interface passes down the proc pointer; the old select 861 * stubs had to grab it out of the user struct. This glue allows either case. 862 */ 863 #if BSD >= 199103 864 #define bpf_select bpfselect 865 #else 866 int 867 bpfselect(dev, rw) 868 register dev_t dev; 869 int rw; 870 { 871 bpf_select(dev, rw, u.u_procp); 872 } 873 #endif 874 875 /* 876 * Support for select() system call 877 * Inspired by the code in tty.c for the same purpose. 878 * 879 * bpfselect - returns true iff the specific operation 880 * will not block indefinitely. Otherwise, return 881 * false but make a note that a selwakeup() must be done. 882 */ 883 int 884 bpf_select(dev, rw, p) 885 register dev_t dev; 886 int rw; 887 struct proc *p; 888 { 889 register struct bpf_d *d; 890 register int s; 891 892 if (rw != FREAD) 893 return (0); 894 /* 895 * An imitation of the FIONREAD ioctl code. 896 */ 897 d = &bpf_dtab[minor(dev)]; 898 899 s = splimp(); 900 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 901 /* 902 * There is data waiting. 903 */ 904 splx(s); 905 return (1); 906 } 907 /* 908 * No data ready. If there's already a select() waiting on this 909 * minor device then this is a collision. This shouldn't happen 910 * because minors really should not be shared, but if a process 911 * forks while one of these is open, it is possible that both 912 * processes could select on the same descriptor. 913 */ 914 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 915 d->bd_selcoll = 1; 916 else 917 d->bd_selproc = p; 918 919 splx(s); 920 return (0); 921 } 922 923 /* 924 * bpf_tap - incoming linkage from device drivers 925 */ 926 void 927 bpf_tap(arg, pkt, pktlen) 928 caddr_t arg; 929 register u_char *pkt; 930 register u_int pktlen; 931 { 932 struct bpf_if *bp; 933 register struct bpf_d *d; 934 register u_int slen; 935 /* 936 * Note that the ipl does not have to be raised at this point. 937 * The only problem that could arise here is that if two different 938 * interfaces shared any data. This is not the case. 939 */ 940 bp = (struct bpf_if *)arg; 941 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 942 ++d->bd_rcount; 943 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 944 if (slen != 0) 945 catchpacket(d, pkt, pktlen, slen, bcopy); 946 } 947 } 948 949 /* 950 * Copy data from an mbuf chain into a buffer. This code is derived 951 * from m_copydata in sys/uipc_mbuf.c. 952 */ 953 static void 954 bpf_mcopy(src, dst, len) 955 u_char *src; 956 u_char *dst; 957 register int len; 958 { 959 register struct mbuf *m = (struct mbuf *)src; 960 register unsigned count; 961 962 while (len > 0) { 963 if (m == 0) 964 panic("bpf_mcopy"); 965 count = MIN(m->m_len, len); 966 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 967 m = m->m_next; 968 dst += count; 969 len -= count; 970 } 971 } 972 973 /* 974 * bpf_mtap - incoming linkage from device drivers, when packet 975 * is in an mbuf chain 976 */ 977 void 978 bpf_mtap(arg, m) 979 caddr_t arg; 980 struct mbuf *m; 981 { 982 struct bpf_if *bp = (struct bpf_if *)arg; 983 struct bpf_d *d; 984 u_int pktlen, slen; 985 struct mbuf *m0; 986 987 pktlen = 0; 988 for (m0 = m; m0 != 0; m0 = m0->m_next) 989 pktlen += m0->m_len; 990 991 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 992 ++d->bd_rcount; 993 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 994 if (slen != 0) 995 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 996 } 997 } 998 999 /* 1000 * Move the packet data from interface memory (pkt) into the 1001 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1002 * otherwise 0. "copy" is the routine called to do the actual data 1003 * transfer. bcopy is passed in to copy contiguous chunks, while 1004 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1005 * pkt is really an mbuf. 1006 */ 1007 static void 1008 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1009 register struct bpf_d *d; 1010 register u_char *pkt; 1011 register u_int pktlen, snaplen; 1012 register void (*cpfn)(); 1013 { 1014 register struct bpf_hdr *hp; 1015 register int totlen, curlen; 1016 register int hdrlen = d->bd_bif->bif_hdrlen; 1017 /* 1018 * Figure out how many bytes to move. If the packet is 1019 * greater or equal to the snapshot length, transfer that 1020 * much. Otherwise, transfer the whole packet (unless 1021 * we hit the buffer size limit). 1022 */ 1023 totlen = hdrlen + MIN(snaplen, pktlen); 1024 if (totlen > d->bd_bufsize) 1025 totlen = d->bd_bufsize; 1026 1027 /* 1028 * Round up the end of the previous packet to the next longword. 1029 */ 1030 curlen = BPF_WORDALIGN(d->bd_slen); 1031 if (curlen + totlen > d->bd_bufsize) { 1032 /* 1033 * This packet will overflow the storage buffer. 1034 * Rotate the buffers if we can, then wakeup any 1035 * pending reads. 1036 */ 1037 if (d->bd_fbuf == 0) { 1038 /* 1039 * We haven't completed the previous read yet, 1040 * so drop the packet. 1041 */ 1042 ++d->bd_dcount; 1043 return; 1044 } 1045 ROTATE_BUFFERS(d); 1046 bpf_wakeup(d); 1047 curlen = 0; 1048 } 1049 else if (d->bd_immediate) 1050 /* 1051 * Immediate mode is set. A packet arrived so any 1052 * reads should be woken up. 1053 */ 1054 bpf_wakeup(d); 1055 1056 /* 1057 * Append the bpf header. 1058 */ 1059 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1060 #ifdef sun 1061 uniqtime(&hp->bh_tstamp); 1062 #else 1063 #if BSD >= 199103 1064 microtime(&hp->bh_tstamp); 1065 #else 1066 hp->bh_tstamp = time; 1067 #endif 1068 #endif 1069 hp->bh_datalen = pktlen; 1070 hp->bh_hdrlen = hdrlen; 1071 /* 1072 * Copy the packet data into the store buffer and update its length. 1073 */ 1074 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1075 d->bd_slen = curlen + totlen; 1076 } 1077 1078 /* 1079 * Initialize all nonzero fields of a descriptor. 1080 */ 1081 static int 1082 bpf_initd(d) 1083 register struct bpf_d *d; 1084 { 1085 d->bd_bufsize = bpf_bufsize; 1086 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1087 if (d->bd_fbuf == 0) 1088 return (ENOBUFS); 1089 1090 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1091 if (d->bd_sbuf == 0) { 1092 free(d->bd_fbuf, M_DEVBUF); 1093 return (ENOBUFS); 1094 } 1095 d->bd_slen = 0; 1096 d->bd_hlen = 0; 1097 return (0); 1098 } 1099 1100 /* 1101 * Free buffers currently in use by a descriptor. 1102 * Called on close. 1103 */ 1104 bpf_freed(d) 1105 register struct bpf_d *d; 1106 { 1107 /* 1108 * We don't need to lock out interrupts since this descriptor has 1109 * been detached from its interface and it yet hasn't been marked 1110 * free. 1111 */ 1112 if (d->bd_hbuf) 1113 free(d->bd_hbuf, M_DEVBUF); 1114 if (d->bd_fbuf) 1115 free(d->bd_fbuf, M_DEVBUF); 1116 free(d->bd_sbuf, M_DEVBUF); 1117 if (d->bd_filter) 1118 free((caddr_t)d->bd_filter, M_DEVBUF); 1119 1120 D_MARKFREE(d); 1121 } 1122 1123 /* 1124 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1125 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1126 * size of the link header (variable length headers not yet supported). 1127 */ 1128 void 1129 bpfattach(driverp, ifp, dlt, hdrlen) 1130 caddr_t *driverp; 1131 struct ifnet *ifp; 1132 u_int dlt, hdrlen; 1133 { 1134 struct bpf_if *bp; 1135 int i; 1136 #if BSD < 199103 1137 static struct bpf_if bpf_ifs[NBPFILTER]; 1138 static int bpfifno; 1139 1140 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1141 #else 1142 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1143 #endif 1144 if (bp == 0) 1145 panic("bpfattach"); 1146 1147 bp->bif_dlist = 0; 1148 bp->bif_driverp = (struct bpf_if **)driverp; 1149 bp->bif_ifp = ifp; 1150 bp->bif_dlt = dlt; 1151 1152 bp->bif_next = bpf_iflist; 1153 bpf_iflist = bp; 1154 1155 *bp->bif_driverp = 0; 1156 1157 /* 1158 * Compute the length of the bpf header. This is not necessarily 1159 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1160 * that the network layer header begins on a longword boundary (for 1161 * performance reasons and to alleviate alignment restrictions). 1162 */ 1163 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1164 1165 /* 1166 * Mark all the descriptors free if this hasn't been done. 1167 */ 1168 if (!D_ISFREE(&bpf_dtab[0])) 1169 for (i = 0; i < NBPFILTER; ++i) 1170 D_MARKFREE(&bpf_dtab[i]); 1171 1172 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1173 } 1174 1175 #if BSD >= 199103 1176 /* XXX This routine belongs in net/if.c. */ 1177 /* 1178 * Set/clear promiscuous mode on interface ifp based on the truth value` 1179 * of pswitch. The calls are reference counted so that only the first 1180 * on request actually has an effect, as does the final off request. 1181 * Results are undefined if the off and on requests are not matched. 1182 */ 1183 int 1184 ifpromisc(ifp, pswitch) 1185 struct ifnet *ifp; 1186 int pswitch; 1187 { 1188 struct ifreq ifr; 1189 /* 1190 * If the device is not configured up, we cannot put it in 1191 * promiscuous mode. 1192 */ 1193 if ((ifp->if_flags & IFF_UP) == 0) 1194 return (ENETDOWN); 1195 1196 if (pswitch) { 1197 if (ifp->if_pcount++ != 0) 1198 return (0); 1199 ifp->if_flags |= IFF_PROMISC; 1200 } else { 1201 if (--ifp->if_pcount > 0) 1202 return (0); 1203 ifp->if_flags &= ~IFF_PROMISC; 1204 } 1205 ifr.ifr_flags = ifp->if_flags; 1206 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); 1207 } 1208 #endif 1209 1210 #if BSD < 199103 1211 /* 1212 * Allocate some memory for bpf. This is temporary SunOS support, and 1213 * is admittedly a gross hack. 1214 * If resources unavaiable, return 0. 1215 */ 1216 static caddr_t 1217 bpf_alloc(size, canwait) 1218 register int size; 1219 register int canwait; 1220 { 1221 register struct mbuf *m; 1222 1223 if ((unsigned)size > (MCLBYTES-8)) 1224 return 0; 1225 1226 MGET(m, canwait, MT_DATA); 1227 if (m == 0) 1228 return 0; 1229 if ((unsigned)size > (MLEN-8)) { 1230 MCLGET(m); 1231 if (m->m_len != MCLBYTES) { 1232 m_freem(m); 1233 return 0; 1234 } 1235 } 1236 *mtod(m, struct mbuf **) = m; 1237 return mtod(m, caddr_t) + 8; 1238 } 1239 #endif 1240 #endif 1241