1 /* $NetBSD: bpf.c,v 1.19 1995/04/22 13:26:20 cgd Exp $ */ 2 3 /* 4 * Copyright (c) 1990, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from the Stanford/CMU enet packet filter, 8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 10 * Berkeley Laboratory. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 41 */ 42 43 #include "bpfilter.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/mbuf.h> 48 #include <sys/buf.h> 49 #include <sys/time.h> 50 #include <sys/proc.h> 51 #include <sys/user.h> 52 #include <sys/ioctl.h> 53 #include <sys/map.h> 54 55 #include <sys/file.h> 56 #if defined(sparc) && BSD < 199103 57 #include <sys/stream.h> 58 #endif 59 #include <sys/tty.h> 60 #include <sys/uio.h> 61 62 #include <sys/protosw.h> 63 #include <sys/socket.h> 64 #include <net/if.h> 65 66 #include <net/bpf.h> 67 #include <net/bpfdesc.h> 68 69 #include <sys/errno.h> 70 71 #include <netinet/in.h> 72 #include <netinet/if_arc.h> 73 #include <netinet/if_ether.h> 74 #include <sys/kernel.h> 75 76 /* 77 * Older BSDs don't have kernel malloc. 78 */ 79 #if BSD < 199103 80 extern bcopy(); 81 static caddr_t bpf_alloc(); 82 #include <net/bpf_compat.h> 83 #define BPF_BUFSIZE (MCLBYTES-8) 84 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 85 #else 86 #define BPF_BUFSIZE 4096 87 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 88 #endif 89 90 #define PRINET 26 /* interruptible */ 91 92 /* 93 * The default read buffer size is patchable. 94 */ 95 int bpf_bufsize = BPF_BUFSIZE; 96 97 /* 98 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 99 * bpf_dtab holds the descriptors, indexed by minor device # 100 */ 101 struct bpf_if *bpf_iflist; 102 struct bpf_d bpf_dtab[NBPFILTER]; 103 104 #if BSD >= 199207 || NetBSD0_9 >= 2 105 /* 106 * bpfilterattach() is called at boot time in new systems. We do 107 * nothing here since old systems will not call this. 108 */ 109 /* ARGSUSED */ 110 void 111 bpfilterattach(n) 112 int n; 113 { 114 } 115 #endif 116 117 static int bpf_allocbufs __P((struct bpf_d *)); 118 static int bpf_allocbufs __P((struct bpf_d *)); 119 static void bpf_freed __P((struct bpf_d *)); 120 static void bpf_freed __P((struct bpf_d *)); 121 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 122 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 123 static void bpf_mcopy __P((const void *, void *, size_t)); 124 static int bpf_movein __P((struct uio *, int, 125 struct mbuf **, struct sockaddr *, int *)); 126 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 127 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 128 static __inline void 129 bpf_wakeup __P((struct bpf_d *)); 130 static void catchpacket __P((struct bpf_d *, u_char *, size_t, 131 size_t, void (*)(const void *, void *, size_t))); 132 static void reset_d __P((struct bpf_d *)); 133 134 static int 135 bpf_movein(uio, linktype, mp, sockp, datlen) 136 register struct uio *uio; 137 int linktype, *datlen; 138 register struct mbuf **mp; 139 register struct sockaddr *sockp; 140 { 141 struct mbuf *m; 142 int error; 143 int len; 144 int hlen; 145 146 /* 147 * Build a sockaddr based on the data link layer type. 148 * We do this at this level because the ethernet header 149 * is copied directly into the data field of the sockaddr. 150 * In the case of SLIP, there is no header and the packet 151 * is forwarded as is. 152 * Also, we are careful to leave room at the front of the mbuf 153 * for the link level header. 154 */ 155 switch (linktype) { 156 157 case DLT_SLIP: 158 sockp->sa_family = AF_INET; 159 hlen = 0; 160 break; 161 162 case DLT_PPP: 163 sockp->sa_family = AF_UNSPEC; 164 hlen = 0; 165 break; 166 167 case DLT_EN10MB: 168 sockp->sa_family = AF_UNSPEC; 169 /* XXX Would MAXLINKHDR be better? */ 170 hlen = sizeof(struct ether_header); 171 break; 172 173 case DLT_ARCNET: 174 sockp->sa_family = AF_UNSPEC; 175 hlen = ARC_HDRLEN; 176 break; 177 178 case DLT_FDDI: 179 sockp->sa_family = AF_UNSPEC; 180 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 181 hlen = 24; 182 break; 183 184 case DLT_NULL: 185 sockp->sa_family = AF_UNSPEC; 186 hlen = 0; 187 break; 188 189 default: 190 return (EIO); 191 } 192 193 len = uio->uio_resid; 194 *datlen = len - hlen; 195 if ((unsigned)len > MCLBYTES) 196 return (EIO); 197 198 MGET(m, M_WAIT, MT_DATA); 199 if (m == 0) 200 return (ENOBUFS); 201 if (len > MLEN) { 202 #if BSD >= 199103 203 MCLGET(m, M_WAIT); 204 if ((m->m_flags & M_EXT) == 0) { 205 #else 206 MCLGET(m); 207 if (m->m_len != MCLBYTES) { 208 #endif 209 error = ENOBUFS; 210 goto bad; 211 } 212 } 213 m->m_len = len; 214 *mp = m; 215 /* 216 * Make room for link header. 217 */ 218 if (hlen != 0) { 219 m->m_len -= hlen; 220 #if BSD >= 199103 221 m->m_data += hlen; /* XXX */ 222 #else 223 m->m_off += hlen; 224 #endif 225 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 226 if (error) 227 goto bad; 228 } 229 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 230 if (!error) 231 return (0); 232 bad: 233 m_freem(m); 234 return (error); 235 } 236 237 /* 238 * Attach file to the bpf interface, i.e. make d listen on bp. 239 * Must be called at splimp. 240 */ 241 static void 242 bpf_attachd(d, bp) 243 struct bpf_d *d; 244 struct bpf_if *bp; 245 { 246 /* 247 * Point d at bp, and add d to the interface's list of listeners. 248 * Finally, point the driver's bpf cookie at the interface so 249 * it will divert packets to bpf. 250 */ 251 d->bd_bif = bp; 252 d->bd_next = bp->bif_dlist; 253 bp->bif_dlist = d; 254 255 *bp->bif_driverp = bp; 256 } 257 258 /* 259 * Detach a file from its interface. 260 */ 261 static void 262 bpf_detachd(d) 263 struct bpf_d *d; 264 { 265 struct bpf_d **p; 266 struct bpf_if *bp; 267 268 bp = d->bd_bif; 269 /* 270 * Check if this descriptor had requested promiscuous mode. 271 * If so, turn it off. 272 */ 273 if (d->bd_promisc) { 274 int error; 275 276 d->bd_promisc = 0; 277 error = ifpromisc(bp->bif_ifp, 0); 278 if (error && error != EINVAL) 279 /* 280 * Something is really wrong if we were able to put 281 * the driver into promiscuous mode, but can't 282 * take it out. 283 */ 284 panic("bpf: ifpromisc failed"); 285 } 286 /* Remove d from the interface's descriptor list. */ 287 p = &bp->bif_dlist; 288 while (*p != d) { 289 p = &(*p)->bd_next; 290 if (*p == 0) 291 panic("bpf_detachd: descriptor not in list"); 292 } 293 *p = (*p)->bd_next; 294 if (bp->bif_dlist == 0) 295 /* 296 * Let the driver know that there are no more listeners. 297 */ 298 *d->bd_bif->bif_driverp = 0; 299 d->bd_bif = 0; 300 } 301 302 303 /* 304 * Mark a descriptor free by making it point to itself. 305 * This is probably cheaper than marking with a constant since 306 * the address should be in a register anyway. 307 */ 308 #define D_ISFREE(d) ((d) == (d)->bd_next) 309 #define D_MARKFREE(d) ((d)->bd_next = (d)) 310 #define D_MARKUSED(d) ((d)->bd_next = 0) 311 312 /* 313 * Open ethernet device. Returns ENXIO for illegal minor device number, 314 * EBUSY if file is open by another process. 315 */ 316 /* ARGSUSED */ 317 int 318 bpfopen(dev, flag) 319 dev_t dev; 320 int flag; 321 { 322 register struct bpf_d *d; 323 324 if (minor(dev) >= NBPFILTER) 325 return (ENXIO); 326 /* 327 * Each minor can be opened by only one process. If the requested 328 * minor is in use, return EBUSY. 329 */ 330 d = &bpf_dtab[minor(dev)]; 331 if (!D_ISFREE(d)) 332 return (EBUSY); 333 334 /* Mark "free" and do most initialization. */ 335 bzero((char *)d, sizeof(*d)); 336 d->bd_bufsize = bpf_bufsize; 337 338 return (0); 339 } 340 341 /* 342 * Close the descriptor by detaching it from its interface, 343 * deallocating its buffers, and marking it free. 344 */ 345 /* ARGSUSED */ 346 int 347 bpfclose(dev, flag) 348 dev_t dev; 349 int flag; 350 { 351 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 352 register int s; 353 354 s = splimp(); 355 if (d->bd_bif) 356 bpf_detachd(d); 357 splx(s); 358 bpf_freed(d); 359 360 return (0); 361 } 362 363 /* 364 * Support for SunOS, which does not have tsleep. 365 */ 366 #if BSD < 199103 367 static 368 bpf_timeout(arg) 369 caddr_t arg; 370 { 371 struct bpf_d *d = (struct bpf_d *)arg; 372 d->bd_timedout = 1; 373 wakeup(arg); 374 } 375 376 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 377 378 int 379 bpf_sleep(d) 380 register struct bpf_d *d; 381 { 382 register int rto = d->bd_rtout; 383 register int st; 384 385 if (rto != 0) { 386 d->bd_timedout = 0; 387 timeout(bpf_timeout, (caddr_t)d, rto); 388 } 389 st = sleep((caddr_t)d, PRINET|PCATCH); 390 if (rto != 0) { 391 if (d->bd_timedout == 0) 392 untimeout(bpf_timeout, (caddr_t)d); 393 else if (st == 0) 394 return EWOULDBLOCK; 395 } 396 return (st != 0) ? EINTR : 0; 397 } 398 #else 399 #define BPF_SLEEP tsleep 400 #endif 401 402 /* 403 * Rotate the packet buffers in descriptor d. Move the store buffer 404 * into the hold slot, and the free buffer into the store slot. 405 * Zero the length of the new store buffer. 406 */ 407 #define ROTATE_BUFFERS(d) \ 408 (d)->bd_hbuf = (d)->bd_sbuf; \ 409 (d)->bd_hlen = (d)->bd_slen; \ 410 (d)->bd_sbuf = (d)->bd_fbuf; \ 411 (d)->bd_slen = 0; \ 412 (d)->bd_fbuf = 0; 413 /* 414 * bpfread - read next chunk of packets from buffers 415 */ 416 int 417 bpfread(dev, uio) 418 dev_t dev; 419 register struct uio *uio; 420 { 421 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 422 int error; 423 int s; 424 425 /* 426 * Restrict application to use a buffer the same size as 427 * as kernel buffers. 428 */ 429 if (uio->uio_resid != d->bd_bufsize) 430 return (EINVAL); 431 432 s = splimp(); 433 /* 434 * If the hold buffer is empty, then do a timed sleep, which 435 * ends when the timeout expires or when enough packets 436 * have arrived to fill the store buffer. 437 */ 438 while (d->bd_hbuf == 0) { 439 if (d->bd_immediate && d->bd_slen != 0) { 440 /* 441 * A packet(s) either arrived since the previous 442 * read or arrived while we were asleep. 443 * Rotate the buffers and return what's here. 444 */ 445 ROTATE_BUFFERS(d); 446 break; 447 } 448 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 449 d->bd_rtout); 450 if (error == EINTR || error == ERESTART) { 451 splx(s); 452 return (error); 453 } 454 if (error == EWOULDBLOCK) { 455 /* 456 * On a timeout, return what's in the buffer, 457 * which may be nothing. If there is something 458 * in the store buffer, we can rotate the buffers. 459 */ 460 if (d->bd_hbuf) 461 /* 462 * We filled up the buffer in between 463 * getting the timeout and arriving 464 * here, so we don't need to rotate. 465 */ 466 break; 467 468 if (d->bd_slen == 0) { 469 splx(s); 470 return (0); 471 } 472 ROTATE_BUFFERS(d); 473 break; 474 } 475 } 476 /* 477 * At this point, we know we have something in the hold slot. 478 */ 479 splx(s); 480 481 /* 482 * Move data from hold buffer into user space. 483 * We know the entire buffer is transferred since 484 * we checked above that the read buffer is bpf_bufsize bytes. 485 */ 486 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 487 488 s = splimp(); 489 d->bd_fbuf = d->bd_hbuf; 490 d->bd_hbuf = 0; 491 d->bd_hlen = 0; 492 splx(s); 493 494 return (error); 495 } 496 497 498 /* 499 * If there are processes sleeping on this descriptor, wake them up. 500 */ 501 static __inline void 502 bpf_wakeup(d) 503 register struct bpf_d *d; 504 { 505 wakeup((caddr_t)d); 506 #if BSD >= 199103 507 selwakeup(&d->bd_sel); 508 /* XXX */ 509 d->bd_sel.si_pid = 0; 510 #else 511 if (d->bd_selproc) { 512 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 513 d->bd_selcoll = 0; 514 d->bd_selproc = 0; 515 } 516 #endif 517 } 518 519 int 520 bpfwrite(dev, uio) 521 dev_t dev; 522 struct uio *uio; 523 { 524 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 525 struct ifnet *ifp; 526 struct mbuf *m; 527 int error, s; 528 static struct sockaddr dst; 529 int datlen; 530 531 if (d->bd_bif == 0) 532 return (ENXIO); 533 534 ifp = d->bd_bif->bif_ifp; 535 536 if (uio->uio_resid == 0) 537 return (0); 538 539 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 540 if (error) 541 return (error); 542 543 if (datlen > ifp->if_mtu) 544 return (EMSGSIZE); 545 546 s = splnet(); 547 #if BSD >= 199103 548 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 549 #else 550 error = (*ifp->if_output)(ifp, m, &dst); 551 #endif 552 splx(s); 553 /* 554 * The driver frees the mbuf. 555 */ 556 return (error); 557 } 558 559 /* 560 * Reset a descriptor by flushing its packet buffer and clearing the 561 * receive and drop counts. Should be called at splimp. 562 */ 563 static void 564 reset_d(d) 565 struct bpf_d *d; 566 { 567 if (d->bd_hbuf) { 568 /* Free the hold buffer. */ 569 d->bd_fbuf = d->bd_hbuf; 570 d->bd_hbuf = 0; 571 } 572 d->bd_slen = 0; 573 d->bd_hlen = 0; 574 d->bd_rcount = 0; 575 d->bd_dcount = 0; 576 } 577 578 /* 579 * FIONREAD Check for read packet available. 580 * SIOCGIFADDR Get interface address - convenient hook to driver. 581 * BIOCGBLEN Get buffer len [for read()]. 582 * BIOCSETF Set ethernet read filter. 583 * BIOCFLUSH Flush read packet buffer. 584 * BIOCPROMISC Put interface into promiscuous mode. 585 * BIOCGDLT Get link layer type. 586 * BIOCGETIF Get interface name. 587 * BIOCSETIF Set interface. 588 * BIOCSRTIMEOUT Set read timeout. 589 * BIOCGRTIMEOUT Get read timeout. 590 * BIOCGSTATS Get packet stats. 591 * BIOCIMMEDIATE Set immediate mode. 592 * BIOCVERSION Get filter language version. 593 */ 594 /* ARGSUSED */ 595 int 596 bpfioctl(dev, cmd, addr, flag) 597 dev_t dev; 598 u_long cmd; 599 caddr_t addr; 600 int flag; 601 { 602 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 603 int s, error = 0; 604 605 switch (cmd) { 606 607 default: 608 error = EINVAL; 609 break; 610 611 /* 612 * Check for read packet available. 613 */ 614 case FIONREAD: 615 { 616 int n; 617 618 s = splimp(); 619 n = d->bd_slen; 620 if (d->bd_hbuf) 621 n += d->bd_hlen; 622 splx(s); 623 624 *(int *)addr = n; 625 break; 626 } 627 628 case SIOCGIFADDR: 629 { 630 struct ifnet *ifp; 631 632 if (d->bd_bif == 0) 633 error = EINVAL; 634 else { 635 ifp = d->bd_bif->bif_ifp; 636 error = (*ifp->if_ioctl)(ifp, cmd, addr); 637 } 638 break; 639 } 640 641 /* 642 * Get buffer len [for read()]. 643 */ 644 case BIOCGBLEN: 645 *(u_int *)addr = d->bd_bufsize; 646 break; 647 648 /* 649 * Set buffer length. 650 */ 651 case BIOCSBLEN: 652 #if BSD < 199103 653 error = EINVAL; 654 #else 655 if (d->bd_bif != 0) 656 error = EINVAL; 657 else { 658 register u_int size = *(u_int *)addr; 659 660 if (size > BPF_MAXBUFSIZE) 661 *(u_int *)addr = size = BPF_MAXBUFSIZE; 662 else if (size < BPF_MINBUFSIZE) 663 *(u_int *)addr = size = BPF_MINBUFSIZE; 664 d->bd_bufsize = size; 665 } 666 #endif 667 break; 668 669 /* 670 * Set link layer read filter. 671 */ 672 case BIOCSETF: 673 error = bpf_setf(d, (struct bpf_program *)addr); 674 break; 675 676 /* 677 * Flush read packet buffer. 678 */ 679 case BIOCFLUSH: 680 s = splimp(); 681 reset_d(d); 682 splx(s); 683 break; 684 685 /* 686 * Put interface into promiscuous mode. 687 */ 688 case BIOCPROMISC: 689 if (d->bd_bif == 0) { 690 /* 691 * No interface attached yet. 692 */ 693 error = EINVAL; 694 break; 695 } 696 s = splimp(); 697 if (d->bd_promisc == 0) { 698 error = ifpromisc(d->bd_bif->bif_ifp, 1); 699 if (error == 0) 700 d->bd_promisc = 1; 701 } 702 splx(s); 703 break; 704 705 /* 706 * Get device parameters. 707 */ 708 case BIOCGDLT: 709 if (d->bd_bif == 0) 710 error = EINVAL; 711 else 712 *(u_int *)addr = d->bd_bif->bif_dlt; 713 break; 714 715 /* 716 * Set interface name. 717 */ 718 case BIOCGETIF: 719 if (d->bd_bif == 0) 720 error = EINVAL; 721 else 722 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 723 break; 724 725 /* 726 * Set interface. 727 */ 728 case BIOCSETIF: 729 error = bpf_setif(d, (struct ifreq *)addr); 730 break; 731 732 /* 733 * Set read timeout. 734 */ 735 case BIOCSRTIMEOUT: 736 { 737 struct timeval *tv = (struct timeval *)addr; 738 739 /* Compute number of ticks. */ 740 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; 741 break; 742 } 743 744 /* 745 * Get read timeout. 746 */ 747 case BIOCGRTIMEOUT: 748 { 749 struct timeval *tv = (struct timeval *)addr; 750 751 tv->tv_sec = d->bd_rtout / hz; 752 tv->tv_usec = (d->bd_rtout % hz) * tick; 753 break; 754 } 755 756 /* 757 * Get packet stats. 758 */ 759 case BIOCGSTATS: 760 { 761 struct bpf_stat *bs = (struct bpf_stat *)addr; 762 763 bs->bs_recv = d->bd_rcount; 764 bs->bs_drop = d->bd_dcount; 765 break; 766 } 767 768 /* 769 * Set immediate mode. 770 */ 771 case BIOCIMMEDIATE: 772 d->bd_immediate = *(u_int *)addr; 773 break; 774 775 case BIOCVERSION: 776 { 777 struct bpf_version *bv = (struct bpf_version *)addr; 778 779 bv->bv_major = BPF_MAJOR_VERSION; 780 bv->bv_minor = BPF_MINOR_VERSION; 781 break; 782 } 783 } 784 return (error); 785 } 786 787 /* 788 * Set d's packet filter program to fp. If this file already has a filter, 789 * free it and replace it. Returns EINVAL for bogus requests. 790 */ 791 int 792 bpf_setf(d, fp) 793 struct bpf_d *d; 794 struct bpf_program *fp; 795 { 796 struct bpf_insn *fcode, *old; 797 u_int flen, size; 798 int s; 799 800 old = d->bd_filter; 801 if (fp->bf_insns == 0) { 802 if (fp->bf_len != 0) 803 return (EINVAL); 804 s = splimp(); 805 d->bd_filter = 0; 806 reset_d(d); 807 splx(s); 808 if (old != 0) 809 free((caddr_t)old, M_DEVBUF); 810 return (0); 811 } 812 flen = fp->bf_len; 813 if (flen > BPF_MAXINSNS) 814 return (EINVAL); 815 816 size = flen * sizeof(*fp->bf_insns); 817 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 818 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 819 bpf_validate(fcode, (int)flen)) { 820 s = splimp(); 821 d->bd_filter = fcode; 822 reset_d(d); 823 splx(s); 824 if (old != 0) 825 free((caddr_t)old, M_DEVBUF); 826 827 return (0); 828 } 829 free((caddr_t)fcode, M_DEVBUF); 830 return (EINVAL); 831 } 832 833 /* 834 * Detach a file from its current interface (if attached at all) and attach 835 * to the interface indicated by the name stored in ifr. 836 * Return an errno or 0. 837 */ 838 static int 839 bpf_setif(d, ifr) 840 struct bpf_d *d; 841 struct ifreq *ifr; 842 { 843 struct bpf_if *bp; 844 char *cp; 845 int unit, s, error; 846 847 /* 848 * Separate string into name part and unit number. Put a null 849 * byte at the end of the name part, and compute the number. 850 * If the a unit number is unspecified, the default is 0, 851 * as initialized above. XXX This should be common code. 852 */ 853 unit = 0; 854 cp = ifr->ifr_name; 855 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 856 while (*cp++) { 857 if (*cp >= '0' && *cp <= '9') { 858 unit = *cp - '0'; 859 *cp++ = '\0'; 860 while (*cp) 861 unit = 10 * unit + *cp++ - '0'; 862 break; 863 } 864 } 865 /* 866 * Look through attached interfaces for the named one. 867 */ 868 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 869 struct ifnet *ifp = bp->bif_ifp; 870 871 if (ifp == 0 || unit != ifp->if_unit 872 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 873 continue; 874 /* 875 * We found the requested interface. 876 * If it's not up, return an error. 877 * Allocate the packet buffers if we need to. 878 * If we're already attached to requested interface, 879 * just flush the buffer. 880 */ 881 if ((ifp->if_flags & IFF_UP) == 0) 882 return (ENETDOWN); 883 884 if (d->bd_sbuf == 0) { 885 error = bpf_allocbufs(d); 886 if (error != 0) 887 return (error); 888 } 889 s = splimp(); 890 if (bp != d->bd_bif) { 891 if (d->bd_bif) 892 /* 893 * Detach if attached to something else. 894 */ 895 bpf_detachd(d); 896 897 bpf_attachd(d, bp); 898 } 899 reset_d(d); 900 splx(s); 901 return (0); 902 } 903 /* Not found. */ 904 return (ENXIO); 905 } 906 907 /* 908 * Convert an interface name plus unit number of an ifp to a single 909 * name which is returned in the ifr. 910 */ 911 static void 912 bpf_ifname(ifp, ifr) 913 struct ifnet *ifp; 914 struct ifreq *ifr; 915 { 916 char *s = ifp->if_name; 917 char *d = ifr->ifr_name; 918 919 while (*d++ = *s++) 920 continue; 921 /* XXX Assume that unit number is less than 10. */ 922 *d++ = ifp->if_unit + '0'; 923 *d = '\0'; 924 } 925 926 /* 927 * The new select interface passes down the proc pointer; the old select 928 * stubs had to grab it out of the user struct. This glue allows either case. 929 */ 930 #if BSD >= 199103 931 #define bpf_select bpfselect 932 #else 933 int 934 bpfselect(dev, rw) 935 register dev_t dev; 936 int rw; 937 { 938 return (bpf_select(dev, rw, u.u_procp)); 939 } 940 #endif 941 942 /* 943 * Support for select() system call 944 * 945 * Return true iff the specific operation will not block indefinitely. 946 * Otherwise, return false but make a note that a selwakeup() must be done. 947 */ 948 int 949 bpf_select(dev, rw, p) 950 register dev_t dev; 951 int rw; 952 struct proc *p; 953 { 954 register struct bpf_d *d; 955 register int s; 956 957 if (rw != FREAD) 958 return (0); 959 /* 960 * An imitation of the FIONREAD ioctl code. 961 */ 962 d = &bpf_dtab[minor(dev)]; 963 964 s = splimp(); 965 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 966 /* 967 * There is data waiting. 968 */ 969 splx(s); 970 return (1); 971 } 972 #if BSD >= 199103 973 selrecord(p, &d->bd_sel); 974 #else 975 /* 976 * No data ready. If there's already a select() waiting on this 977 * minor device then this is a collision. This shouldn't happen 978 * because minors really should not be shared, but if a process 979 * forks while one of these is open, it is possible that both 980 * processes could select on the same descriptor. 981 */ 982 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 983 d->bd_selcoll = 1; 984 else 985 d->bd_selproc = p; 986 #endif 987 splx(s); 988 return (0); 989 } 990 991 /* 992 * Incoming linkage from device drivers. Process the packet pkt, of length 993 * pktlen, which is stored in a contiguous buffer. The packet is parsed 994 * by each process' filter, and if accepted, stashed into the corresponding 995 * buffer. 996 */ 997 void 998 bpf_tap(arg, pkt, pktlen) 999 caddr_t arg; 1000 register u_char *pkt; 1001 register u_int pktlen; 1002 { 1003 struct bpf_if *bp; 1004 register struct bpf_d *d; 1005 register size_t slen; 1006 /* 1007 * Note that the ipl does not have to be raised at this point. 1008 * The only problem that could arise here is that if two different 1009 * interfaces shared any data. This is not the case. 1010 */ 1011 bp = (struct bpf_if *)arg; 1012 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1013 ++d->bd_rcount; 1014 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1015 if (slen != 0) 1016 catchpacket(d, pkt, pktlen, slen, bcopy); 1017 } 1018 } 1019 1020 /* 1021 * Copy data from an mbuf chain into a buffer. This code is derived 1022 * from m_copydata in sys/uipc_mbuf.c. 1023 */ 1024 static void 1025 bpf_mcopy(src_arg, dst_arg, len) 1026 const void *src_arg; 1027 void *dst_arg; 1028 register size_t len; 1029 { 1030 register const struct mbuf *m; 1031 register u_int count; 1032 u_char *dst; 1033 1034 m = src_arg; 1035 dst = dst_arg; 1036 while (len > 0) { 1037 if (m == 0) 1038 panic("bpf_mcopy"); 1039 count = min(m->m_len, len); 1040 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 1041 m = m->m_next; 1042 dst += count; 1043 len -= count; 1044 } 1045 } 1046 1047 /* 1048 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1049 */ 1050 void 1051 bpf_mtap(arg, m) 1052 caddr_t arg; 1053 struct mbuf *m; 1054 { 1055 struct bpf_if *bp = (struct bpf_if *)arg; 1056 struct bpf_d *d; 1057 size_t pktlen, slen; 1058 struct mbuf *m0; 1059 1060 pktlen = 0; 1061 for (m0 = m; m0 != 0; m0 = m0->m_next) 1062 pktlen += m0->m_len; 1063 1064 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1065 ++d->bd_rcount; 1066 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1067 if (slen != 0) 1068 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1069 } 1070 } 1071 1072 /* 1073 * Move the packet data from interface memory (pkt) into the 1074 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1075 * otherwise 0. "copy" is the routine called to do the actual data 1076 * transfer. bcopy is passed in to copy contiguous chunks, while 1077 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1078 * pkt is really an mbuf. 1079 */ 1080 static void 1081 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1082 register struct bpf_d *d; 1083 register u_char *pkt; 1084 register size_t pktlen, snaplen; 1085 register void (*cpfn) __P((const void *, void *, size_t)); 1086 { 1087 register struct bpf_hdr *hp; 1088 register int totlen, curlen; 1089 register int hdrlen = d->bd_bif->bif_hdrlen; 1090 /* 1091 * Figure out how many bytes to move. If the packet is 1092 * greater or equal to the snapshot length, transfer that 1093 * much. Otherwise, transfer the whole packet (unless 1094 * we hit the buffer size limit). 1095 */ 1096 totlen = hdrlen + min(snaplen, pktlen); 1097 if (totlen > d->bd_bufsize) 1098 totlen = d->bd_bufsize; 1099 1100 /* 1101 * Round up the end of the previous packet to the next longword. 1102 */ 1103 curlen = BPF_WORDALIGN(d->bd_slen); 1104 if (curlen + totlen > d->bd_bufsize) { 1105 /* 1106 * This packet will overflow the storage buffer. 1107 * Rotate the buffers if we can, then wakeup any 1108 * pending reads. 1109 */ 1110 if (d->bd_fbuf == 0) { 1111 /* 1112 * We haven't completed the previous read yet, 1113 * so drop the packet. 1114 */ 1115 ++d->bd_dcount; 1116 return; 1117 } 1118 ROTATE_BUFFERS(d); 1119 bpf_wakeup(d); 1120 curlen = 0; 1121 } 1122 else if (d->bd_immediate) 1123 /* 1124 * Immediate mode is set. A packet arrived so any 1125 * reads should be woken up. 1126 */ 1127 bpf_wakeup(d); 1128 1129 /* 1130 * Append the bpf header. 1131 */ 1132 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1133 #if BSD >= 199103 1134 microtime(&hp->bh_tstamp); 1135 #elif defined(sun) 1136 uniqtime(&hp->bh_tstamp); 1137 #else 1138 hp->bh_tstamp = time; 1139 #endif 1140 hp->bh_datalen = pktlen; 1141 hp->bh_hdrlen = hdrlen; 1142 /* 1143 * Copy the packet data into the store buffer and update its length. 1144 */ 1145 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1146 d->bd_slen = curlen + totlen; 1147 } 1148 1149 /* 1150 * Initialize all nonzero fields of a descriptor. 1151 */ 1152 static int 1153 bpf_allocbufs(d) 1154 register struct bpf_d *d; 1155 { 1156 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1157 if (d->bd_fbuf == 0) 1158 return (ENOBUFS); 1159 1160 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1161 if (d->bd_sbuf == 0) { 1162 free(d->bd_fbuf, M_DEVBUF); 1163 return (ENOBUFS); 1164 } 1165 d->bd_slen = 0; 1166 d->bd_hlen = 0; 1167 return (0); 1168 } 1169 1170 /* 1171 * Free buffers currently in use by a descriptor. 1172 * Called on close. 1173 */ 1174 static void 1175 bpf_freed(d) 1176 register struct bpf_d *d; 1177 { 1178 /* 1179 * We don't need to lock out interrupts since this descriptor has 1180 * been detached from its interface and it yet hasn't been marked 1181 * free. 1182 */ 1183 if (d->bd_sbuf != 0) { 1184 free(d->bd_sbuf, M_DEVBUF); 1185 if (d->bd_hbuf != 0) 1186 free(d->bd_hbuf, M_DEVBUF); 1187 if (d->bd_fbuf != 0) 1188 free(d->bd_fbuf, M_DEVBUF); 1189 } 1190 if (d->bd_filter) 1191 free((caddr_t)d->bd_filter, M_DEVBUF); 1192 1193 D_MARKFREE(d); 1194 } 1195 1196 /* 1197 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1198 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1199 * size of the link header (variable length headers not yet supported). 1200 */ 1201 void 1202 bpfattach(driverp, ifp, dlt, hdrlen) 1203 caddr_t *driverp; 1204 struct ifnet *ifp; 1205 u_int dlt, hdrlen; 1206 { 1207 struct bpf_if *bp; 1208 int i; 1209 #if BSD < 199103 1210 static struct bpf_if bpf_ifs[NBPFILTER]; 1211 static int bpfifno; 1212 1213 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1214 #else 1215 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1216 #endif 1217 if (bp == 0) 1218 panic("bpfattach"); 1219 1220 bp->bif_dlist = 0; 1221 bp->bif_driverp = (struct bpf_if **)driverp; 1222 bp->bif_ifp = ifp; 1223 bp->bif_dlt = dlt; 1224 1225 bp->bif_next = bpf_iflist; 1226 bpf_iflist = bp; 1227 1228 *bp->bif_driverp = 0; 1229 1230 /* 1231 * Compute the length of the bpf header. This is not necessarily 1232 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1233 * that the network layer header begins on a longword boundary (for 1234 * performance reasons and to alleviate alignment restrictions). 1235 */ 1236 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1237 1238 /* 1239 * Mark all the descriptors free if this hasn't been done. 1240 */ 1241 if (!D_ISFREE(&bpf_dtab[0])) 1242 for (i = 0; i < NBPFILTER; ++i) 1243 D_MARKFREE(&bpf_dtab[i]); 1244 1245 #if 0 1246 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1247 #endif 1248 } 1249 1250 #if BSD >= 199103 1251 /* XXX This routine belongs in net/if.c. */ 1252 /* 1253 * Set/clear promiscuous mode on interface ifp based on the truth value 1254 * of pswitch. The calls are reference counted so that only the first 1255 * "on" request actually has an effect, as does the final "off" request. 1256 * Results are undefined if the "off" and "on" requests are not matched. 1257 */ 1258 int 1259 ifpromisc(ifp, pswitch) 1260 struct ifnet *ifp; 1261 int pswitch; 1262 { 1263 struct ifreq ifr; 1264 1265 if (pswitch) { 1266 /* 1267 * If the device is not configured up, we cannot put it in 1268 * promiscuous mode. 1269 */ 1270 if ((ifp->if_flags & IFF_UP) == 0) 1271 return (ENETDOWN); 1272 if (ifp->if_pcount++ != 0) 1273 return (0); 1274 ifp->if_flags |= IFF_PROMISC; 1275 } else { 1276 if (--ifp->if_pcount > 0) 1277 return (0); 1278 ifp->if_flags &= ~IFF_PROMISC; 1279 /* 1280 * If the device is not configured up, we should not need to 1281 * turn off promiscuous mode (device should have turned it 1282 * off when interface went down; and will look at IFF_PROMISC 1283 * again next time interface comes up). 1284 */ 1285 if ((ifp->if_flags & IFF_UP) == 0) 1286 return (0); 1287 } 1288 ifr.ifr_flags = ifp->if_flags; 1289 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); 1290 } 1291 #endif 1292 1293 #if BSD < 199103 1294 /* 1295 * Allocate some memory for bpf. This is temporary SunOS support, and 1296 * is admittedly a hack. 1297 * If resources unavaiable, return 0. 1298 */ 1299 static caddr_t 1300 bpf_alloc(size, canwait) 1301 register int size; 1302 register int canwait; 1303 { 1304 register struct mbuf *m; 1305 1306 if ((unsigned)size > (MCLBYTES-8)) 1307 return 0; 1308 1309 MGET(m, canwait, MT_DATA); 1310 if (m == 0) 1311 return 0; 1312 if ((unsigned)size > (MLEN-8)) { 1313 MCLGET(m); 1314 if (m->m_len != MCLBYTES) { 1315 m_freem(m); 1316 return 0; 1317 } 1318 } 1319 *mtod(m, struct mbuf **) = m; 1320 return mtod(m, caddr_t) + 8; 1321 } 1322 #endif 1323