1 /*- 2 * Copyright (c) 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne of Lawrence Berkeley Laboratory. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)bpf.c 7.5 (Berkeley) 07/15/91 12 * 13 * static char rcsid[] = 14 * "$Header: bpf.c,v 1.23 91/01/30 18:22:13 mccanne Exp $"; 15 */ 16 17 #include "bpfilter.h" 18 19 #if (NBPFILTER > 0) 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/mbuf.h> 24 #include <sys/buf.h> 25 #include <sys/dir.h> 26 #include <sys/proc.h> 27 #include <sys/user.h> 28 #include <sys/ioctl.h> 29 #include <sys/map.h> 30 31 #include <sys/file.h> 32 #ifdef sparc 33 #include <sys/stream.h> 34 #endif 35 #include <sys/tty.h> 36 #include <sys/uio.h> 37 38 #include <sys/protosw.h> 39 #include <sys/socket.h> 40 #include <net/if.h> 41 42 #include <net/bpf.h> 43 #include <net/bpfdesc.h> 44 45 #include <sys/errno.h> 46 47 #include <netinet/in.h> 48 #include <netinet/if_ether.h> 49 #include <sys/kernel.h> 50 51 #define PRINET 26 /* interruptible */ 52 53 /* 54 * The default read buffer size is patchable. 55 */ 56 int bpf_bufsize = MCLBYTES; 57 58 /* 59 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 60 * bpf_dtab holds the descriptors, indexed by minor device # 61 * 62 * We really don't need NBPFILTER bpf_if entries, but this eliminates 63 * the need to account for all possible drivers here. 64 * This problem will go away when these structures are allocated dynamically. 65 */ 66 static struct bpf_if *bpf_iflist; 67 static struct bpf_d bpf_dtab[NBPFILTER]; 68 69 static void bpf_ifname(); 70 static void catchpacket(); 71 static int bpf_setif(); 72 static int bpf_initd(); 73 74 static int 75 bpf_movein(uio, linktype, mp, sockp) 76 register struct uio *uio; 77 int linktype; 78 register struct mbuf **mp; 79 register struct sockaddr *sockp; 80 { 81 struct mbuf *m; 82 int error; 83 int len; 84 int hlen; 85 86 /* 87 * Build a sockaddr based on the data link layer type. 88 * We do this at this level because the ethernet header 89 * is copied directly into the data field of the sockaddr. 90 * In the case of SLIP, there is no header and the packet 91 * is forwarded as is. 92 * Also, we are careful to leave room at the front of the mbuf 93 * for the link level header. 94 */ 95 switch (linktype) { 96 case DLT_SLIP: 97 sockp->sa_family = AF_INET; 98 hlen = 0; 99 break; 100 101 case DLT_EN10MB: 102 sockp->sa_family = AF_UNSPEC; 103 /* XXX Would MAXLINKHDR be better? */ 104 hlen = sizeof(struct ether_header); 105 break; 106 107 case DLT_FDDI: 108 sockp->sa_family = AF_UNSPEC; 109 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 110 hlen = 24; 111 break; 112 113 default: 114 return (EIO); 115 } 116 117 len = uio->uio_resid; 118 if ((unsigned)len > MCLBYTES) 119 return (EIO); 120 121 MGET(m, M_WAIT, MT_DATA); 122 if (m == 0) 123 return (ENOBUFS); 124 if (len > MLEN) { 125 MCLGET(m, M_WAIT); 126 if ((m->m_flags & M_EXT) == 0) { 127 error = ENOBUFS; 128 goto bad; 129 } 130 } 131 m->m_len = len; 132 *mp = m; 133 /* 134 * Make room for link header. 135 */ 136 if (hlen) { 137 m->m_len -= hlen; 138 m->m_data += hlen; /* XXX */ 139 140 error = uiomove((caddr_t)sockp->sa_data, hlen, uio); 141 if (error) 142 goto bad; 143 } 144 error = uiomove(mtod(m, caddr_t), len - hlen, uio); 145 if (!error) 146 return (0); 147 bad: 148 m_freem(m); 149 return (error); 150 } 151 152 /* 153 * Attach 'd' to the bpf interface 'bp', i.e. make 'd' listen on 'bp'. 154 * Must be called at splimp. 155 */ 156 static void 157 bpf_attachd(d, bp) 158 struct bpf_d *d; 159 struct bpf_if *bp; 160 { 161 /* Point d at bp. */ 162 d->bd_bif = bp; 163 164 /* Add d to bp's list of listeners. */ 165 d->bd_next = bp->bif_dlist; 166 bp->bif_dlist = d; 167 168 /* 169 * Let the driver know we're here (if it doesn't already). 170 */ 171 *bp->bif_driverp = bp; 172 } 173 174 static void 175 bpf_detachd(d) 176 struct bpf_d *d; 177 { 178 struct bpf_d **p; 179 struct bpf_if *bp; 180 181 bp = d->bd_bif; 182 /* 183 * Check if this descriptor had requested promiscuous mode. 184 * If so, turn it off. 185 */ 186 if (d->bd_promisc) { 187 d->bd_promisc = 0; 188 if (ifpromisc(bp->bif_ifp, 0)) 189 /* 190 * Something is really wrong if we were able to put 191 * the driver into promiscuous mode, but can't 192 * take it out. 193 */ 194 panic("bpf_detachd: ifpromisc failed"); 195 } 196 /* Remove 'd' from the interface's descriptor list. */ 197 p = &bp->bif_dlist; 198 while (*p != d) { 199 p = &(*p)->bd_next; 200 if (*p == 0) 201 panic("bpf_detachd: descriptor not in list"); 202 } 203 *p = (*p)->bd_next; 204 if (bp->bif_dlist == 0) 205 /* 206 * Let the driver know that there are no more listeners. 207 */ 208 *d->bd_bif->bif_driverp = 0; 209 d->bd_bif = 0; 210 } 211 212 213 /* 214 * Mark a descriptor free by making it point to itself. 215 * This is probably cheaper than marking with a constant since 216 * the address should be in a register anyway. 217 */ 218 #define D_ISFREE(d) ((d) == (d)->bd_next) 219 #define D_MARKFREE(d) ((d)->bd_next = (d)) 220 #define D_MARKUSED(d) ((d)->bd_next = 0) 221 222 /* 223 * bpfopen - open ethernet device 224 * 225 * Errors: ENXIO - illegal minor device number 226 * EBUSY - too many files open 227 */ 228 /* ARGSUSED */ 229 int 230 bpfopen(dev, flag) 231 dev_t dev; 232 int flag; 233 { 234 int error, s; 235 register struct bpf_d *d; 236 237 if (minor(dev) >= NBPFILTER) 238 return (ENXIO); 239 240 /* 241 * Each minor can be opened by only one process. If the requested 242 * minor is in use, return EBUSY. 243 */ 244 s = splimp(); 245 d = &bpf_dtab[minor(dev)]; 246 if (!D_ISFREE(d)) { 247 splx(s); 248 return (EBUSY); 249 } else 250 /* Mark "free" and do most initialization. */ 251 bzero((char *)d, sizeof(*d)); 252 splx(s); 253 254 error = bpf_initd(d); 255 if (error) { 256 D_MARKFREE(d); 257 return (error); 258 } 259 return (0); 260 } 261 262 /* 263 * Close the descriptor by detaching it from its interface, 264 * deallocating its buffers, and marking it free. 265 */ 266 /* ARGSUSED */ 267 bpfclose(dev, flag) 268 dev_t dev; 269 int flag; 270 { 271 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 272 int s; 273 274 s = splimp(); 275 if (d->bd_bif) 276 bpf_detachd(d); 277 splx(s); 278 279 /* Free the buffer space. */ 280 if (d->bd_hbuf) 281 free(d->bd_hbuf, M_DEVBUF); 282 if (d->bd_fbuf) 283 free(d->bd_fbuf, M_DEVBUF); 284 free(d->bd_sbuf, M_DEVBUF); 285 if (d->bd_filter) 286 free((caddr_t)d->bd_filter, M_DEVBUF); 287 288 D_MARKFREE(d); 289 } 290 291 /* 292 * Rotate the packet buffers in descriptor d. Move the store buffer 293 * into the hold slot, and the free buffer into the store slot. 294 * Zero the length of the new store buffer. 295 */ 296 #define ROTATE_BUFFERS(d) \ 297 (d)->bd_hbuf = (d)->bd_sbuf; \ 298 (d)->bd_hlen = (d)->bd_slen; \ 299 (d)->bd_sbuf = (d)->bd_fbuf; \ 300 (d)->bd_slen = 0; \ 301 (d)->bd_fbuf = 0; 302 /* 303 * bpfread - read next chunk of packets from buffers 304 */ 305 int 306 bpfread(dev, uio) 307 dev_t dev; 308 register struct uio *uio; 309 { 310 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 311 int error; 312 int s; 313 314 /* 315 * Restrict application to use a buffer the same size as 316 * as kernel buffers. 317 */ 318 if (uio->uio_resid != d->bd_bufsize) 319 return (EINVAL); 320 321 s = splimp(); 322 /* 323 * If the hold buffer is empty, then set a timer and sleep 324 * until either the timeout has occurred or enough packets have 325 * arrived to fill the store buffer. 326 */ 327 while (d->bd_hbuf == 0) { 328 if (d->bd_immediate && d->bd_slen != 0) { 329 /* 330 * A packet(s) either arrived since the previous 331 * read or arrived while we were asleep. 332 * Rotate the buffers and return what's here. 333 */ 334 ROTATE_BUFFERS(d); 335 break; 336 } 337 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); 338 if (error == EINTR || error == ERESTART) { 339 splx(s); 340 return (error); 341 } 342 if (error == EWOULDBLOCK) { 343 /* 344 * On a timeout, return what's in the buffer, 345 * which may be nothing. If there is something 346 * in the store buffer, we can rotate the buffers. 347 */ 348 if (d->bd_hbuf) 349 /* 350 * We filled up the buffer in between 351 * getting the timeout and arriving 352 * here, so we don't need to rotate. 353 */ 354 break; 355 356 if (d->bd_slen == 0) { 357 splx(s); 358 return (0); 359 } 360 ROTATE_BUFFERS(d); 361 break; 362 } 363 } 364 /* 365 * At this point, we know we have something in the hold slot. 366 */ 367 splx(s); 368 369 /* 370 * Move data from hold buffer into user space. 371 * We know the entire buffer is transferred since 372 * we checked above that the read buffer is bpf_bufsize bytes. 373 */ 374 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 375 376 s = splimp(); 377 d->bd_fbuf = d->bd_hbuf; 378 d->bd_hbuf = 0; 379 splx(s); 380 381 return (error); 382 } 383 384 385 /* 386 * If there are processes sleeping on this descriptor, wake them up. 387 */ 388 static inline void 389 bpf_wakeup(d) 390 register struct bpf_d *d; 391 { 392 wakeup((caddr_t)d); 393 if (d->bd_selproc) { 394 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 395 d->bd_selcoll = 0; 396 d->bd_selproc = 0; 397 } 398 } 399 400 int 401 bpfwrite(dev, uio) 402 dev_t dev; 403 struct uio *uio; 404 { 405 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 406 struct ifnet *ifp; 407 struct mbuf *m; 408 int error, s; 409 static struct sockaddr dst; 410 411 if (d->bd_bif == 0) 412 return (ENXIO); 413 414 ifp = d->bd_bif->bif_ifp; 415 416 if (uio->uio_resid == 0) 417 return (0); 418 if (uio->uio_resid > ifp->if_mtu) 419 return (EMSGSIZE); 420 421 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst); 422 if (error) 423 return (error); 424 425 s = splnet(); 426 error = (*ifp->if_output)(ifp, m, &dst); 427 splx(s); 428 /* 429 * The driver frees the mbuf. 430 */ 431 return (error); 432 } 433 434 /* 435 * Reset a descriptor by flushing its packet bufferand clearing the receive 436 * and drop counts. Should be called at splimp. 437 */ 438 static void 439 reset_d(d) 440 struct bpf_d *d; 441 { 442 if (d->bd_hbuf) { 443 /* Free the hold buffer. */ 444 d->bd_fbuf = d->bd_hbuf; 445 d->bd_hbuf = 0; 446 } 447 d->bd_slen = 0; 448 d->bd_rcount = 0; 449 d->bd_dcount = 0; 450 } 451 452 /* 453 * FIONREAD Check for read packet available. 454 * SIOCGIFADDR Get interface address - convenient hook to driver. 455 * BIOCGFLEN Get max filter len. 456 * BIOCGBLEN Get buffer len [for read()]. 457 * BIOCSETF Set ethernet read filter. 458 * BIOCFLUSH Flush read packet buffer. 459 * BIOCPROMISC Put interface into promiscuous mode. 460 * BIOCGDLT Get link layer type. 461 * BIOCGETIF Get interface name. 462 * BIOCSETIF Set interface. 463 * BIOCSRTIMEOUT Set read timeout. 464 * BIOCGRTIMEOUT Get read timeout. 465 * BIOCGSTATS Get packet stats. 466 * BIOCIMMEDIATE Set immediate mode. 467 */ 468 /* ARGSUSED */ 469 int 470 bpfioctl(dev, cmd, addr, flag) 471 dev_t dev; 472 int cmd; 473 caddr_t addr; 474 int flag; 475 { 476 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 477 int s, error = 0; 478 479 switch (cmd) { 480 481 default: 482 error = EINVAL; 483 break; 484 485 /* 486 * Check for read packet available. 487 */ 488 case FIONREAD: 489 { 490 int n; 491 492 s = splimp(); 493 n = d->bd_slen; 494 if (d->bd_hbuf) 495 n += d->bd_hlen; 496 splx(s); 497 498 *(int *)addr = n; 499 break; 500 } 501 502 case SIOCGIFADDR: 503 { 504 struct ifnet *ifp; 505 506 if (d->bd_bif == 0) 507 error = EINVAL; 508 else { 509 ifp = d->bd_bif->bif_ifp; 510 error = (*ifp->if_ioctl)(ifp, cmd, addr); 511 } 512 break; 513 } 514 515 /* 516 * Get max filter len. 517 */ 518 case BIOCGFLEN: 519 *(u_int *)addr = BPF_MAXINSNS; 520 break; 521 /* 522 * Get buffer len [for read()]. 523 */ 524 case BIOCGBLEN: 525 *(u_int *)addr = d->bd_bufsize; 526 break; 527 528 /* 529 * Set ethernet read filter. 530 */ 531 case BIOCSETF: 532 error = bpf_setf(d, (struct bpf_program *)addr); 533 break; 534 535 /* 536 * Flush read packet buffer. 537 */ 538 case BIOCFLUSH: 539 s = splimp(); 540 reset_d(d); 541 splx(s); 542 break; 543 544 /* 545 * Put interface into promiscuous mode. 546 */ 547 case BIOCPROMISC: 548 if (d->bd_bif == 0) { 549 /* 550 * No interface attached yet. 551 */ 552 error = EINVAL; 553 break; 554 } 555 s = splimp(); 556 if (d->bd_promisc == 0) { 557 d->bd_promisc = 1; 558 error = ifpromisc(d->bd_bif->bif_ifp, 1); 559 } 560 splx(s); 561 break; 562 563 /* 564 * Get device parameters. 565 */ 566 case BIOCGDLT: 567 if (d->bd_bif == 0) 568 error = EINVAL; 569 else 570 *(u_int *)addr = d->bd_bif->bif_dlt; 571 break; 572 573 /* 574 * Set interface name. 575 */ 576 case BIOCGETIF: 577 if (d->bd_bif == 0) 578 error = EINVAL; 579 else 580 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 581 break; 582 583 /* 584 * Set interface. 585 */ 586 case BIOCSETIF: 587 error = bpf_setif(d, (struct ifreq *)addr); 588 break; 589 590 /* 591 * Set read timeout. 592 */ 593 case BIOCSRTIMEOUT: 594 { 595 struct timeval *tv = (struct timeval *)addr; 596 u_long msec; 597 598 /* Compute number of milliseconds. */ 599 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 600 /* Scale milliseconds to ticks. Assume hard 601 clock has millisecond or greater resolution 602 (i.e. tick >= 1000). For 10ms hardclock, 603 tick/1000 = 10, so rtout<-msec/10. */ 604 d->bd_rtout = msec / (tick / 1000); 605 break; 606 } 607 608 /* 609 * Get read timeout. 610 */ 611 case BIOCGRTIMEOUT: 612 { 613 struct timeval *tv = (struct timeval *)addr; 614 u_long msec = d->bd_rtout; 615 616 msec *= tick / 1000; 617 tv->tv_sec = msec / 1000; 618 tv->tv_usec = msec % 1000; 619 break; 620 } 621 622 /* 623 * Get packet stats. 624 */ 625 case BIOCGSTATS: 626 { 627 struct bpf_stat *bs = (struct bpf_stat *)addr; 628 629 bs->bs_recv = d->bd_rcount; 630 bs->bs_drop = d->bd_dcount; 631 break; 632 } 633 634 /* 635 * Set immediate mode. 636 */ 637 case BIOCIMMEDIATE: 638 d->bd_immediate = *(u_int *)addr; 639 break; 640 } 641 return (error); 642 } 643 644 /* 645 * Set d's packet filter program to 'fp'. If 'd' already has a filter, 646 * free it and replace it. Returns EINVAL for bogus requests. 647 */ 648 int 649 bpf_setf(d, fp) 650 struct bpf_d *d; 651 struct bpf_program *fp; 652 { 653 struct bpf_insn *fcode, *old; 654 u_int flen, size; 655 int s; 656 657 old = d->bd_filter; 658 if (fp->bf_insns == 0) { 659 if (fp->bf_len != 0) 660 return (EINVAL); 661 s = splimp(); 662 d->bd_filter = 0; 663 reset_d(d); 664 splx(s); 665 if (old != 0) 666 free((caddr_t)old, M_DEVBUF); 667 return (0); 668 } 669 flen = fp->bf_len; 670 if (flen > BPF_MAXINSNS) 671 return (EINVAL); 672 673 size = flen * sizeof(*fp->bf_insns); 674 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 675 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 676 bpf_validate(fcode, (int)flen)) { 677 s = splimp(); 678 d->bd_filter = fcode; 679 reset_d(d); 680 splx(s); 681 if (old != 0) 682 free((caddr_t)old, M_DEVBUF); 683 684 return (0); 685 } 686 free((caddr_t)fcode, M_DEVBUF); 687 return (EINVAL); 688 } 689 690 /* 691 * Detach 'd' from its current interface (if attached at all) and attach to 692 * the interface named 'name'. Return ioctl error code or 0. 693 */ 694 static int 695 bpf_setif(d, ifr) 696 struct bpf_d *d; 697 struct ifreq *ifr; 698 { 699 struct bpf_if *bp; 700 char *cp; 701 int unit, s; 702 703 /* 704 * Separate string into name part and unit number. Put a null 705 * byte at the end of the name part, and compute the number. 706 * If the a unit number is unspecified, the default is 0, 707 * as initialized above. XXX This should be common code. 708 */ 709 unit = 0; 710 cp = ifr->ifr_name; 711 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 712 while (*cp++) { 713 if (*cp >= '0' && *cp <= '9') { 714 unit = *cp - '0'; 715 *cp++ = '\0'; 716 while (*cp) 717 unit = 10 * unit + *cp++ - '0'; 718 break; 719 } 720 } 721 /* 722 * Look through attached interfaces for the named one. 723 */ 724 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 725 struct ifnet *ifp = bp->bif_ifp; 726 727 if (ifp == 0 || unit != ifp->if_unit 728 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 729 continue; 730 /* 731 * We found the requested interface. If we're 732 * already attached to it, just flush the buffer. 733 * If it's not up, return an error. 734 */ 735 if ((ifp->if_flags & IFF_UP) == 0) 736 return (ENETDOWN); 737 s = splimp(); 738 if (bp != d->bd_bif) { 739 if (d->bd_bif) 740 /* 741 * Detach if attached to something else. 742 */ 743 bpf_detachd(d); 744 745 bpf_attachd(d, bp); 746 } 747 reset_d(d); 748 splx(s); 749 return (0); 750 } 751 /* Not found. */ 752 return (ENXIO); 753 } 754 755 /* 756 * Lookup the name of the 'ifp' interface and return it in 'ifr->ifr_name'. 757 * We augment the ifp's base name with its unit number. 758 */ 759 static void 760 bpf_ifname(ifp, ifr) 761 struct ifnet *ifp; 762 struct ifreq *ifr; 763 { 764 char *s = ifp->if_name; 765 char *d = ifr->ifr_name; 766 767 while (*d++ = *s++) 768 ; 769 /* XXX Assume that unit number is less than 10. */ 770 *d++ = ifp->if_unit + '0'; 771 *d = '\0'; 772 } 773 774 /* 775 * Support for select() system call 776 * Inspired by the code in tty.c for the same purpose. 777 * 778 * bpfselect - returns true iff the specific operation 779 * will not block indefinitely. Otherwise, return 780 * false but make a note that a selwakeup() must be done. 781 */ 782 int 783 bpfselect(dev, rw, p) 784 register dev_t dev; 785 int rw; 786 struct proc *p; 787 { 788 register struct bpf_d *d; 789 register int s; 790 791 if (rw != FREAD) 792 return (0); 793 /* 794 * An imitation of the FIONREAD ioctl code. 795 */ 796 d = &bpf_dtab[minor(dev)]; 797 798 s = splimp(); 799 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 800 /* 801 * There is data waiting. 802 */ 803 splx(s); 804 return (1); 805 } 806 /* 807 * No data ready. If there's already a select() waiting on this 808 * minor device then this is a collision. This shouldn't happen 809 * because minors really should not be shared, but if a process 810 * forks while one of these is open, it is possible that both 811 * processes could select on the same descriptor. 812 */ 813 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 814 d->bd_selcoll = 1; 815 else 816 d->bd_selproc = p; 817 818 splx(s); 819 return (0); 820 } 821 822 /* 823 * bpf_tap - incoming linkage from device drivers 824 */ 825 void 826 bpf_tap(arg, pkt, pktlen) 827 caddr_t arg; 828 register u_char *pkt; 829 register u_int pktlen; 830 { 831 struct bpf_if *bp; 832 register struct bpf_d *d; 833 register u_int slen; 834 /* 835 * Note that the ipl does not have to be raised at this point. 836 * The only problem that could arise here is that if two different 837 * interfaces shared any data. This is not the case. 838 */ 839 bp = (struct bpf_if *)arg; 840 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 841 ++d->bd_rcount; 842 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 843 if (slen != 0) 844 catchpacket(d, pkt, pktlen, slen, bcopy); 845 } 846 } 847 848 /* 849 * Copy data from an mbuf chain into a buffer. This code is derived 850 * from m_copydata in sys/uipc_mbuf.c. 851 */ 852 static void 853 bpf_mcopy(src, dst, len) 854 u_char *src; 855 u_char *dst; 856 register int len; 857 { 858 register struct mbuf *m = (struct mbuf *)src; 859 register unsigned count; 860 861 while (len > 0) { 862 if (m == 0) 863 panic("bpf_mcopy"); 864 count = MIN(m->m_len, len); 865 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 866 m = m->m_next; 867 dst += count; 868 len -= count; 869 } 870 } 871 872 /* 873 * bpf_mtap - incoming linkage from device drivers, when packet 874 * is in an mbuf chain 875 */ 876 void 877 bpf_mtap(arg, m) 878 caddr_t arg; 879 struct mbuf *m; 880 { 881 struct bpf_if *bp = (struct bpf_if *)arg; 882 struct bpf_d *d; 883 u_int pktlen, slen; 884 struct mbuf *m0; 885 886 pktlen = 0; 887 for (m0 = m; m0 != m; m0 = m0->m_next) 888 pktlen += m0->m_len; 889 890 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 891 ++d->bd_rcount; 892 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 893 if (slen != 0) 894 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 895 } 896 } 897 898 /* 899 * Move the packet data from interface memory (pkt) into the 900 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 901 * otherwise 0. 'copy' is the routine called to do the actual data 902 * transfer. 'bcopy' is passed in to copy contiguous chunks, while 903 * 'bpf_mcopy' is passed in to copy mbuf chains. In the latter 904 * case, 'pkt' is really an mbuf. 905 */ 906 static void 907 catchpacket(d, pkt, pktlen, snaplen, cpfn) 908 register struct bpf_d *d; 909 register u_char *pkt; 910 register u_int pktlen, snaplen; 911 register void (*cpfn)(); 912 { 913 register struct bpf_hdr *hp; 914 register int totlen, curlen; 915 register int hdrlen = d->bd_bif->bif_hdrlen; 916 /* 917 * Figure out how many bytes to move. If the packet is 918 * greater or equal to the snapshot length, transfer that 919 * much. Otherwise, transfer the whole packet (unless 920 * we hit the buffer size limit). 921 */ 922 totlen = hdrlen + MIN(snaplen, pktlen); 923 if (totlen > d->bd_bufsize) 924 totlen = d->bd_bufsize; 925 926 /* 927 * Round up the end of the previous packet to the next longword. 928 */ 929 curlen = BPF_WORDALIGN(d->bd_slen); 930 if (curlen + totlen > d->bd_bufsize) { 931 /* 932 * This packet will overflow the storage buffer. 933 * Rotate the buffers if we can, then wakeup any 934 * pending reads. 935 */ 936 if (d->bd_fbuf == 0) { 937 /* 938 * We haven't completed the previous read yet, 939 * so drop the packet. 940 */ 941 ++d->bd_dcount; 942 return; 943 } 944 ROTATE_BUFFERS(d); 945 bpf_wakeup(d); 946 curlen = 0; 947 } 948 else if (d->bd_immediate) 949 /* 950 * Immediate mode is set. A packet arrived so any 951 * reads should be woken up. 952 */ 953 bpf_wakeup(d); 954 955 /* 956 * Append the bpf header. 957 */ 958 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 959 #ifdef sun 960 uniqtime(&hp->bh_tstamp); 961 #else 962 #ifdef hp300 963 microtime(&hp->bh_tstamp); 964 #else 965 hp->bh_tstamp = time; 966 #endif 967 #endif 968 hp->bh_datalen = pktlen; 969 hp->bh_hdrlen = hdrlen; 970 /* 971 * Copy the packet data into the store buffer and update its length. 972 */ 973 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 974 d->bd_slen = curlen + totlen; 975 } 976 977 /* 978 * Initialize all nonzero fields of a descriptor. 979 */ 980 static int 981 bpf_initd(d) 982 register struct bpf_d *d; 983 { 984 d->bd_bufsize = bpf_bufsize; 985 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 986 if (d->bd_fbuf == 0) 987 return (ENOBUFS); 988 989 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 990 if (d->bd_sbuf == 0) { 991 free(d->bd_fbuf, M_DEVBUF); 992 return (ENOBUFS); 993 } 994 d->bd_slen = 0; 995 d->bd_hlen = 0; 996 return (0); 997 } 998 999 /* 1000 * Register 'ifp' with bpf. XXX 1001 * and 'driverp' is a pointer to the 'struct bpf_if *' in the driver's softc. 1002 */ 1003 void 1004 bpfattach(driverp, ifp, dlt, hdrlen) 1005 caddr_t *driverp; 1006 struct ifnet *ifp; 1007 u_int dlt, hdrlen; 1008 { 1009 struct bpf_if *bp; 1010 int i; 1011 1012 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1013 if (bp == 0) 1014 panic("bpfattach"); 1015 1016 bp->bif_dlist = 0; 1017 bp->bif_driverp = (struct bpf_if **)driverp; 1018 bp->bif_ifp = ifp; 1019 bp->bif_dlt = dlt; 1020 1021 bp->bif_next = bpf_iflist; 1022 bpf_iflist = bp; 1023 1024 *bp->bif_driverp = 0; 1025 1026 /* 1027 * Compute the length of the bpf header. This is not necessarily 1028 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1029 * that the network layer header begins on a longword boundary (for 1030 * performance reasons and to alleviate alignment restrictions). 1031 */ 1032 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1033 1034 /* 1035 * Mark all the descriptors free if this hasn't been done. 1036 */ 1037 if (!D_ISFREE(&bpf_dtab[0])) 1038 for (i = 0; i < NBPFILTER; ++i) 1039 D_MARKFREE(&bpf_dtab[i]); 1040 1041 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1042 } 1043 1044 /* XXX This routine belongs in net/if.c. */ 1045 /* 1046 * Set/clear promiscuous mode on interface ifp based on the truth value` 1047 * of pswitch. The calls are reference counted so that only the first 1048 * on request actually has an effect, as does the final off request. 1049 * Results are undefined if the off and on requests are not matched. 1050 */ 1051 int 1052 ifpromisc(ifp, pswitch) 1053 struct ifnet *ifp; 1054 int pswitch; 1055 { 1056 struct ifreq ifr; 1057 /* 1058 * If the device is not configured up, we cannot put it in 1059 * promiscuous mode. 1060 */ 1061 if ((ifp->if_flags & IFF_UP) == 0) 1062 return (ENETDOWN); 1063 1064 if (pswitch) { 1065 if (ifp->if_pcount++ != 0) 1066 return (0); 1067 ifp->if_flags |= IFF_PROMISC; 1068 } else { 1069 if (--ifp->if_pcount > 0) 1070 return (0); 1071 ifp->if_flags &= ~IFF_PROMISC; 1072 } 1073 ifr.ifr_flags = ifp->if_flags; 1074 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); 1075 } 1076 1077 #endif (NBPFILTER > 0) 1078