1 /* $OpenBSD: bpf.c,v 1.69 2008/09/17 20:10:37 chl Exp $ */ 2 /* $NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from the Stanford/CMU enet packet filter, 9 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 10 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 11 * Berkeley Laboratory. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 38 */ 39 40 #include "bpfilter.h" 41 42 #include <sys/param.h> 43 #include <sys/mbuf.h> 44 #include <sys/proc.h> 45 #include <sys/signalvar.h> 46 #include <sys/ioctl.h> 47 #include <sys/conf.h> 48 #include <sys/vnode.h> 49 #include <sys/file.h> 50 #include <sys/socket.h> 51 #include <sys/poll.h> 52 #include <sys/kernel.h> 53 #include <sys/sysctl.h> 54 55 #include <net/if.h> 56 #include <net/bpf.h> 57 #include <net/bpfdesc.h> 58 59 #include <netinet/in.h> 60 #include <netinet/if_ether.h> 61 62 #define BPF_BUFSIZE 32768 63 64 #define PRINET 26 /* interruptible */ 65 66 /* 67 * The default read buffer size is patchable. 68 */ 69 int bpf_bufsize = BPF_BUFSIZE; 70 int bpf_maxbufsize = BPF_MAXBUFSIZE; 71 72 /* 73 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 74 * bpf_d_list is the list of descriptors 75 */ 76 struct bpf_if *bpf_iflist; 77 LIST_HEAD(, bpf_d) bpf_d_list; 78 79 int bpf_allocbufs(struct bpf_d *); 80 void bpf_freed(struct bpf_d *); 81 void bpf_ifname(struct ifnet *, struct ifreq *); 82 void bpf_mcopy(const void *, void *, size_t); 83 int bpf_movein(struct uio *, u_int, struct mbuf **, 84 struct sockaddr *, struct bpf_insn *); 85 void bpf_attachd(struct bpf_d *, struct bpf_if *); 86 void bpf_detachd(struct bpf_d *); 87 int bpf_setif(struct bpf_d *, struct ifreq *); 88 int bpfpoll(dev_t, int, struct proc *); 89 int bpfkqfilter(dev_t, struct knote *); 90 static __inline void bpf_wakeup(struct bpf_d *); 91 void bpf_catchpacket(struct bpf_d *, u_char *, size_t, size_t, 92 void (*)(const void *, void *, size_t)); 93 void bpf_reset_d(struct bpf_d *); 94 int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 95 int bpf_setdlt(struct bpf_d *, u_int); 96 97 void filt_bpfrdetach(struct knote *); 98 int filt_bpfread(struct knote *, long); 99 100 struct bpf_d *bpfilter_lookup(int); 101 struct bpf_d *bpfilter_create(int); 102 void bpfilter_destroy(struct bpf_d *); 103 104 int 105 bpf_movein(struct uio *uio, u_int linktype, struct mbuf **mp, 106 struct sockaddr *sockp, struct bpf_insn *filter) 107 { 108 struct mbuf *m; 109 struct m_tag *mtag; 110 int error; 111 u_int hlen; 112 u_int len; 113 u_int slen; 114 115 /* 116 * Build a sockaddr based on the data link layer type. 117 * We do this at this level because the ethernet header 118 * is copied directly into the data field of the sockaddr. 119 * In the case of SLIP, there is no header and the packet 120 * is forwarded as is. 121 * Also, we are careful to leave room at the front of the mbuf 122 * for the link level header. 123 */ 124 switch (linktype) { 125 126 case DLT_SLIP: 127 sockp->sa_family = AF_INET; 128 hlen = 0; 129 break; 130 131 case DLT_PPP: 132 sockp->sa_family = AF_UNSPEC; 133 hlen = 0; 134 break; 135 136 case DLT_EN10MB: 137 sockp->sa_family = AF_UNSPEC; 138 /* XXX Would MAXLINKHDR be better? */ 139 hlen = ETHER_HDR_LEN; 140 break; 141 142 case DLT_FDDI: 143 sockp->sa_family = AF_UNSPEC; 144 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 145 hlen = 24; 146 break; 147 148 case DLT_IEEE802_11: 149 case DLT_IEEE802_11_RADIO: 150 sockp->sa_family = AF_UNSPEC; 151 hlen = 0; 152 break; 153 154 case DLT_RAW: 155 case DLT_NULL: 156 sockp->sa_family = AF_UNSPEC; 157 hlen = 0; 158 break; 159 160 case DLT_ATM_RFC1483: 161 /* 162 * en atm driver requires 4-byte atm pseudo header. 163 * though it isn't standard, vpi:vci needs to be 164 * specified anyway. 165 */ 166 sockp->sa_family = AF_UNSPEC; 167 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 168 break; 169 170 default: 171 return (EIO); 172 } 173 174 len = uio->uio_resid; 175 if (len > MCLBYTES) 176 return (EIO); 177 178 MGETHDR(m, M_WAIT, MT_DATA); 179 m->m_pkthdr.rcvif = 0; 180 m->m_pkthdr.len = len - hlen; 181 182 if (len > MHLEN) { 183 MCLGET(m, M_WAIT); 184 if ((m->m_flags & M_EXT) == 0) { 185 error = ENOBUFS; 186 goto bad; 187 } 188 } 189 m->m_len = len; 190 *mp = m; 191 192 error = uiomove(mtod(m, caddr_t), len, uio); 193 if (error) 194 goto bad; 195 196 slen = bpf_filter(filter, mtod(m, u_char *), len, len); 197 if (slen < len) { 198 error = EPERM; 199 goto bad; 200 } 201 202 if (m->m_len < hlen) { 203 error = EPERM; 204 goto bad; 205 } 206 /* 207 * Make room for link header, and copy it to sockaddr 208 */ 209 if (hlen != 0) { 210 bcopy(m->m_data, sockp->sa_data, hlen); 211 m->m_len -= hlen; 212 m->m_data += hlen; /* XXX */ 213 } 214 215 /* 216 * Prepend the data link type as a mbuf tag 217 */ 218 mtag = m_tag_get(PACKET_TAG_DLT, sizeof(u_int), M_NOWAIT); 219 if (mtag == NULL) 220 return (ENOMEM); 221 *(u_int *)(mtag + 1) = linktype; 222 m_tag_prepend(m, mtag); 223 224 return (0); 225 bad: 226 m_freem(m); 227 return (error); 228 } 229 230 /* 231 * Attach file to the bpf interface, i.e. make d listen on bp. 232 * Must be called at splnet. 233 */ 234 void 235 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 236 { 237 /* 238 * Point d at bp, and add d to the interface's list of listeners. 239 * Finally, point the driver's bpf cookie at the interface so 240 * it will divert packets to bpf. 241 */ 242 d->bd_bif = bp; 243 d->bd_next = bp->bif_dlist; 244 bp->bif_dlist = d; 245 246 *bp->bif_driverp = bp; 247 } 248 249 /* 250 * Detach a file from its interface. 251 */ 252 void 253 bpf_detachd(struct bpf_d *d) 254 { 255 struct bpf_d **p; 256 struct bpf_if *bp; 257 258 bp = d->bd_bif; 259 /* 260 * Check if this descriptor had requested promiscuous mode. 261 * If so, turn it off. 262 */ 263 if (d->bd_promisc) { 264 int error; 265 266 d->bd_promisc = 0; 267 error = ifpromisc(bp->bif_ifp, 0); 268 if (error && !(error == EINVAL || error == ENODEV)) 269 /* 270 * Something is really wrong if we were able to put 271 * the driver into promiscuous mode, but can't 272 * take it out. 273 */ 274 panic("bpf: ifpromisc failed"); 275 } 276 /* Remove d from the interface's descriptor list. */ 277 p = &bp->bif_dlist; 278 while (*p != d) { 279 p = &(*p)->bd_next; 280 if (*p == 0) 281 panic("bpf_detachd: descriptor not in list"); 282 } 283 *p = (*p)->bd_next; 284 if (bp->bif_dlist == 0) 285 /* 286 * Let the driver know that there are no more listeners. 287 */ 288 *d->bd_bif->bif_driverp = 0; 289 d->bd_bif = 0; 290 } 291 292 /* 293 * Reference count access to descriptor buffers 294 */ 295 #define D_GET(d) ((d)->bd_ref++) 296 #define D_PUT(d) bpf_freed(d) 297 298 /* 299 * bpfilterattach() is called at boot time in new systems. We do 300 * nothing here since old systems will not call this. 301 */ 302 /* ARGSUSED */ 303 void 304 bpfilterattach(int n) 305 { 306 LIST_INIT(&bpf_d_list); 307 } 308 309 /* 310 * Open ethernet device. Returns ENXIO for illegal minor device number, 311 * EBUSY if file is open by another process. 312 */ 313 /* ARGSUSED */ 314 int 315 bpfopen(dev_t dev, int flag, int mode, struct proc *p) 316 { 317 struct bpf_d *d; 318 319 /* create on demand */ 320 if ((d = bpfilter_create(minor(dev))) == NULL) 321 return (EBUSY); 322 323 /* Mark "free" and do most initialization. */ 324 d->bd_bufsize = bpf_bufsize; 325 d->bd_sig = SIGIO; 326 327 D_GET(d); 328 329 return (0); 330 } 331 332 /* 333 * Close the descriptor by detaching it from its interface, 334 * deallocating its buffers, and marking it free. 335 */ 336 /* ARGSUSED */ 337 int 338 bpfclose(dev_t dev, int flag, int mode, struct proc *p) 339 { 340 struct bpf_d *d; 341 int s; 342 343 d = bpfilter_lookup(minor(dev)); 344 s = splnet(); 345 if (d->bd_bif) 346 bpf_detachd(d); 347 bpf_wakeup(d); 348 D_PUT(d); 349 splx(s); 350 351 return (0); 352 } 353 354 /* 355 * Rotate the packet buffers in descriptor d. Move the store buffer 356 * into the hold slot, and the free buffer into the store slot. 357 * Zero the length of the new store buffer. 358 */ 359 #define ROTATE_BUFFERS(d) \ 360 (d)->bd_hbuf = (d)->bd_sbuf; \ 361 (d)->bd_hlen = (d)->bd_slen; \ 362 (d)->bd_sbuf = (d)->bd_fbuf; \ 363 (d)->bd_slen = 0; \ 364 (d)->bd_fbuf = 0; 365 /* 366 * bpfread - read next chunk of packets from buffers 367 */ 368 int 369 bpfread(dev_t dev, struct uio *uio, int ioflag) 370 { 371 struct bpf_d *d; 372 int error; 373 int s; 374 375 d = bpfilter_lookup(minor(dev)); 376 if (d->bd_bif == 0) 377 return (ENXIO); 378 379 /* 380 * Restrict application to use a buffer the same size as 381 * as kernel buffers. 382 */ 383 if (uio->uio_resid != d->bd_bufsize) 384 return (EINVAL); 385 386 s = splnet(); 387 388 D_GET(d); 389 390 /* 391 * bd_rdStart is tagged when we start the read, iff there's a timeout. 392 * we can then figure out when we're done reading. 393 */ 394 if (d->bd_rtout != -1 && d->bd_rdStart == 0) 395 d->bd_rdStart = ticks; 396 else 397 d->bd_rdStart = 0; 398 399 /* 400 * If the hold buffer is empty, then do a timed sleep, which 401 * ends when the timeout expires or when enough packets 402 * have arrived to fill the store buffer. 403 */ 404 while (d->bd_hbuf == 0) { 405 if (d->bd_bif == NULL) { 406 /* interface is gone */ 407 if (d->bd_slen == 0) { 408 D_PUT(d); 409 splx(s); 410 return (EIO); 411 } 412 ROTATE_BUFFERS(d); 413 break; 414 } 415 if (d->bd_immediate && d->bd_slen != 0) { 416 /* 417 * A packet(s) either arrived since the previous 418 * read or arrived while we were asleep. 419 * Rotate the buffers and return what's here. 420 */ 421 ROTATE_BUFFERS(d); 422 break; 423 } 424 if ((d->bd_rtout != -1) || 425 (d->bd_rdStart + d->bd_rtout) < ticks) { 426 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", 427 d->bd_rtout); 428 } else { 429 if (d->bd_rtout == -1) { 430 /* User requested non-blocking I/O */ 431 error = EWOULDBLOCK; 432 } else 433 error = 0; 434 } 435 if (error == EINTR || error == ERESTART) { 436 D_PUT(d); 437 splx(s); 438 return (error); 439 } 440 if (error == EWOULDBLOCK) { 441 /* 442 * On a timeout, return what's in the buffer, 443 * which may be nothing. If there is something 444 * in the store buffer, we can rotate the buffers. 445 */ 446 if (d->bd_hbuf) 447 /* 448 * We filled up the buffer in between 449 * getting the timeout and arriving 450 * here, so we don't need to rotate. 451 */ 452 break; 453 454 if (d->bd_slen == 0) { 455 D_PUT(d); 456 splx(s); 457 return (0); 458 } 459 ROTATE_BUFFERS(d); 460 break; 461 } 462 } 463 /* 464 * At this point, we know we have something in the hold slot. 465 */ 466 splx(s); 467 468 /* 469 * Move data from hold buffer into user space. 470 * We know the entire buffer is transferred since 471 * we checked above that the read buffer is bpf_bufsize bytes. 472 */ 473 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 474 475 s = splnet(); 476 d->bd_fbuf = d->bd_hbuf; 477 d->bd_hbuf = 0; 478 d->bd_hlen = 0; 479 480 D_PUT(d); 481 splx(s); 482 483 return (error); 484 } 485 486 487 /* 488 * If there are processes sleeping on this descriptor, wake them up. 489 */ 490 static __inline void 491 bpf_wakeup(struct bpf_d *d) 492 { 493 wakeup((caddr_t)d); 494 if (d->bd_async && d->bd_sig) 495 csignal(d->bd_pgid, d->bd_sig, 496 d->bd_siguid, d->bd_sigeuid); 497 498 selwakeup(&d->bd_sel); 499 /* XXX */ 500 d->bd_sel.si_selpid = 0; 501 KNOTE(&d->bd_sel.si_note, 0); 502 } 503 504 int 505 bpfwrite(dev_t dev, struct uio *uio, int ioflag) 506 { 507 struct bpf_d *d; 508 struct ifnet *ifp; 509 struct mbuf *m; 510 int error, s; 511 struct sockaddr_storage dst; 512 513 d = bpfilter_lookup(minor(dev)); 514 if (d->bd_bif == 0) 515 return (ENXIO); 516 517 ifp = d->bd_bif->bif_ifp; 518 519 if ((ifp->if_flags & IFF_UP) == 0) 520 return (ENETDOWN); 521 522 if (uio->uio_resid == 0) 523 return (0); 524 525 error = bpf_movein(uio, d->bd_bif->bif_dlt, &m, 526 (struct sockaddr *)&dst, d->bd_wfilter); 527 if (error) 528 return (error); 529 530 if (m->m_pkthdr.len > ifp->if_mtu) { 531 m_freem(m); 532 return (EMSGSIZE); 533 } 534 535 if (d->bd_hdrcmplt) 536 dst.ss_family = pseudo_AF_HDRCMPLT; 537 538 s = splsoftnet(); 539 error = (*ifp->if_output)(ifp, m, (struct sockaddr *)&dst, 540 (struct rtentry *)0); 541 splx(s); 542 /* 543 * The driver frees the mbuf. 544 */ 545 return (error); 546 } 547 548 /* 549 * Reset a descriptor by flushing its packet buffer and clearing the 550 * receive and drop counts. Should be called at splnet. 551 */ 552 void 553 bpf_reset_d(struct bpf_d *d) 554 { 555 if (d->bd_hbuf) { 556 /* Free the hold buffer. */ 557 d->bd_fbuf = d->bd_hbuf; 558 d->bd_hbuf = 0; 559 } 560 d->bd_slen = 0; 561 d->bd_hlen = 0; 562 d->bd_rcount = 0; 563 d->bd_dcount = 0; 564 } 565 566 /* 567 * FIONREAD Check for read packet available. 568 * BIOCGBLEN Get buffer len [for read()]. 569 * BIOCSETF Set ethernet read filter. 570 * BIOCFLUSH Flush read packet buffer. 571 * BIOCPROMISC Put interface into promiscuous mode. 572 * BIOCGDLTLIST Get supported link layer types. 573 * BIOCGDLT Get link layer type. 574 * BIOCSDLT Set link layer type. 575 * BIOCGETIF Get interface name. 576 * BIOCSETIF Set interface. 577 * BIOCSRTIMEOUT Set read timeout. 578 * BIOCGRTIMEOUT Get read timeout. 579 * BIOCGSTATS Get packet stats. 580 * BIOCIMMEDIATE Set immediate mode. 581 * BIOCVERSION Get filter language version. 582 * BIOCGHDRCMPLT Get "header already complete" flag 583 * BIOCSHDRCMPLT Set "header already complete" flag 584 */ 585 /* ARGSUSED */ 586 int 587 bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 588 { 589 struct bpf_d *d; 590 int s, error = 0; 591 592 d = bpfilter_lookup(minor(dev)); 593 if (d->bd_locked && suser(p, 0) != 0) { 594 /* list of allowed ioctls when locked and not root */ 595 switch (cmd) { 596 case BIOCGBLEN: 597 case BIOCFLUSH: 598 case BIOCGDLT: 599 case BIOCGDLTLIST: 600 case BIOCGETIF: 601 case BIOCGRTIMEOUT: 602 case BIOCGSTATS: 603 case BIOCVERSION: 604 case BIOCGRSIG: 605 case BIOCGHDRCMPLT: 606 case FIONREAD: 607 case BIOCLOCK: 608 case BIOCSRTIMEOUT: 609 case BIOCIMMEDIATE: 610 case TIOCGPGRP: 611 case BIOCGDIRFILT: 612 break; 613 default: 614 return (EPERM); 615 } 616 } 617 618 switch (cmd) { 619 620 default: 621 error = EINVAL; 622 break; 623 624 /* 625 * Check for read packet available. 626 */ 627 case FIONREAD: 628 { 629 int n; 630 631 s = splnet(); 632 n = d->bd_slen; 633 if (d->bd_hbuf) 634 n += d->bd_hlen; 635 splx(s); 636 637 *(int *)addr = n; 638 break; 639 } 640 641 /* 642 * Get buffer len [for read()]. 643 */ 644 case BIOCGBLEN: 645 *(u_int *)addr = d->bd_bufsize; 646 break; 647 648 /* 649 * Set buffer length. 650 */ 651 case BIOCSBLEN: 652 if (d->bd_bif != 0) 653 error = EINVAL; 654 else { 655 u_int size = *(u_int *)addr; 656 657 if (size > bpf_maxbufsize) 658 *(u_int *)addr = size = bpf_maxbufsize; 659 else if (size < BPF_MINBUFSIZE) 660 *(u_int *)addr = size = BPF_MINBUFSIZE; 661 d->bd_bufsize = size; 662 } 663 break; 664 665 /* 666 * Set link layer read filter. 667 */ 668 case BIOCSETF: 669 error = bpf_setf(d, (struct bpf_program *)addr, 0); 670 break; 671 672 /* 673 * Set link layer write filter. 674 */ 675 case BIOCSETWF: 676 error = bpf_setf(d, (struct bpf_program *)addr, 1); 677 break; 678 679 /* 680 * Flush read packet buffer. 681 */ 682 case BIOCFLUSH: 683 s = splnet(); 684 bpf_reset_d(d); 685 splx(s); 686 break; 687 688 /* 689 * Put interface into promiscuous mode. 690 */ 691 case BIOCPROMISC: 692 if (d->bd_bif == 0) { 693 /* 694 * No interface attached yet. 695 */ 696 error = EINVAL; 697 break; 698 } 699 s = splnet(); 700 if (d->bd_promisc == 0) { 701 error = ifpromisc(d->bd_bif->bif_ifp, 1); 702 if (error == 0) 703 d->bd_promisc = 1; 704 } 705 splx(s); 706 break; 707 708 /* 709 * Get a list of supported device parameters. 710 */ 711 case BIOCGDLTLIST: 712 if (d->bd_bif == NULL) 713 error = EINVAL; 714 else 715 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 716 break; 717 718 /* 719 * Get device parameters. 720 */ 721 case BIOCGDLT: 722 if (d->bd_bif == 0) 723 error = EINVAL; 724 else 725 *(u_int *)addr = d->bd_bif->bif_dlt; 726 break; 727 728 /* 729 * Set device parameters. 730 */ 731 case BIOCSDLT: 732 if (d->bd_bif == NULL) 733 error = EINVAL; 734 else 735 error = bpf_setdlt(d, *(u_int *)addr); 736 break; 737 738 /* 739 * Set interface name. 740 */ 741 case BIOCGETIF: 742 if (d->bd_bif == 0) 743 error = EINVAL; 744 else 745 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 746 break; 747 748 /* 749 * Set interface. 750 */ 751 case BIOCSETIF: 752 error = bpf_setif(d, (struct ifreq *)addr); 753 break; 754 755 /* 756 * Set read timeout. 757 */ 758 case BIOCSRTIMEOUT: 759 { 760 struct timeval *tv = (struct timeval *)addr; 761 762 /* Compute number of ticks. */ 763 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; 764 if (d->bd_rtout == 0 && tv->tv_usec != 0) 765 d->bd_rtout = 1; 766 break; 767 } 768 769 /* 770 * Get read timeout. 771 */ 772 case BIOCGRTIMEOUT: 773 { 774 struct timeval *tv = (struct timeval *)addr; 775 776 tv->tv_sec = d->bd_rtout / hz; 777 tv->tv_usec = (d->bd_rtout % hz) * tick; 778 break; 779 } 780 781 /* 782 * Get packet stats. 783 */ 784 case BIOCGSTATS: 785 { 786 struct bpf_stat *bs = (struct bpf_stat *)addr; 787 788 bs->bs_recv = d->bd_rcount; 789 bs->bs_drop = d->bd_dcount; 790 break; 791 } 792 793 /* 794 * Set immediate mode. 795 */ 796 case BIOCIMMEDIATE: 797 d->bd_immediate = *(u_int *)addr; 798 break; 799 800 case BIOCVERSION: 801 { 802 struct bpf_version *bv = (struct bpf_version *)addr; 803 804 bv->bv_major = BPF_MAJOR_VERSION; 805 bv->bv_minor = BPF_MINOR_VERSION; 806 break; 807 } 808 809 case BIOCGHDRCMPLT: /* get "header already complete" flag */ 810 *(u_int *)addr = d->bd_hdrcmplt; 811 break; 812 813 case BIOCSHDRCMPLT: /* set "header already complete" flag */ 814 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 815 break; 816 817 case BIOCLOCK: /* set "locked" flag (no reset) */ 818 d->bd_locked = 1; 819 break; 820 821 case BIOCGFILDROP: /* get "filter-drop" flag */ 822 *(u_int *)addr = d->bd_fildrop; 823 break; 824 825 case BIOCSFILDROP: /* set "filter-drop" flag */ 826 d->bd_fildrop = *(u_int *)addr ? 1 : 0; 827 break; 828 829 case BIOCGDIRFILT: /* get direction filter */ 830 *(u_int *)addr = d->bd_dirfilt; 831 break; 832 833 case BIOCSDIRFILT: /* set direction filter */ 834 d->bd_dirfilt = (*(u_int *)addr) & 835 (BPF_DIRECTION_IN|BPF_DIRECTION_OUT); 836 break; 837 838 case FIONBIO: /* Non-blocking I/O */ 839 if (*(int *)addr) 840 d->bd_rtout = -1; 841 else 842 d->bd_rtout = 0; 843 break; 844 845 case FIOASYNC: /* Send signal on receive packets */ 846 d->bd_async = *(int *)addr; 847 break; 848 849 /* 850 * N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing 851 * the equivalent of a TIOCSPGRP and hence end up here. *However* 852 * TIOCSPGRP's arg is a process group if it's positive and a process 853 * id if it's negative. This is exactly the opposite of what the 854 * other two functions want! Therefore there is code in ioctl and 855 * fcntl to negate the arg before calling here. 856 */ 857 case TIOCSPGRP: /* Process or group to send signals to */ 858 d->bd_pgid = *(int *)addr; 859 d->bd_siguid = p->p_cred->p_ruid; 860 d->bd_sigeuid = p->p_ucred->cr_uid; 861 break; 862 863 case TIOCGPGRP: 864 *(int *)addr = d->bd_pgid; 865 break; 866 867 case BIOCSRSIG: /* Set receive signal */ 868 { 869 u_int sig; 870 871 sig = *(u_int *)addr; 872 873 if (sig >= NSIG) 874 error = EINVAL; 875 else 876 d->bd_sig = sig; 877 break; 878 } 879 case BIOCGRSIG: 880 *(u_int *)addr = d->bd_sig; 881 break; 882 } 883 return (error); 884 } 885 886 /* 887 * Set d's packet filter program to fp. If this file already has a filter, 888 * free it and replace it. Returns EINVAL for bogus requests. 889 */ 890 int 891 bpf_setf(struct bpf_d *d, struct bpf_program *fp, int wf) 892 { 893 struct bpf_insn *fcode, *old; 894 u_int flen, size; 895 int s; 896 897 old = wf ? d->bd_wfilter : d->bd_rfilter; 898 if (fp->bf_insns == 0) { 899 if (fp->bf_len != 0) 900 return (EINVAL); 901 s = splnet(); 902 if (wf) 903 d->bd_wfilter = 0; 904 else 905 d->bd_rfilter = 0; 906 bpf_reset_d(d); 907 splx(s); 908 if (old != 0) 909 free((caddr_t)old, M_DEVBUF); 910 return (0); 911 } 912 flen = fp->bf_len; 913 if (flen > BPF_MAXINSNS) 914 return (EINVAL); 915 916 size = flen * sizeof(*fp->bf_insns); 917 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 918 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 919 bpf_validate(fcode, (int)flen)) { 920 s = splnet(); 921 if (wf) 922 d->bd_wfilter = fcode; 923 else 924 d->bd_rfilter = fcode; 925 bpf_reset_d(d); 926 splx(s); 927 if (old != 0) 928 free((caddr_t)old, M_DEVBUF); 929 930 return (0); 931 } 932 free((caddr_t)fcode, M_DEVBUF); 933 return (EINVAL); 934 } 935 936 /* 937 * Detach a file from its current interface (if attached at all) and attach 938 * to the interface indicated by the name stored in ifr. 939 * Return an errno or 0. 940 */ 941 int 942 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 943 { 944 struct bpf_if *bp, *candidate = NULL; 945 int s, error; 946 947 /* 948 * Look through attached interfaces for the named one. 949 */ 950 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 951 struct ifnet *ifp = bp->bif_ifp; 952 953 if (ifp == 0 || 954 strcmp(ifp->if_xname, ifr->ifr_name) != 0) 955 continue; 956 957 /* 958 * We found the requested interface. 959 */ 960 if (candidate == NULL || candidate->bif_dlt > bp->bif_dlt) 961 candidate = bp; 962 } 963 964 if (candidate != NULL) { 965 /* 966 * Allocate the packet buffers if we need to. 967 * If we're already attached to requested interface, 968 * just flush the buffer. 969 */ 970 if (d->bd_sbuf == 0) { 971 error = bpf_allocbufs(d); 972 if (error != 0) 973 return (error); 974 } 975 s = splnet(); 976 if (candidate != d->bd_bif) { 977 if (d->bd_bif) 978 /* 979 * Detach if attached to something else. 980 */ 981 bpf_detachd(d); 982 983 bpf_attachd(d, candidate); 984 } 985 bpf_reset_d(d); 986 splx(s); 987 return (0); 988 } 989 /* Not found. */ 990 return (ENXIO); 991 } 992 993 /* 994 * Copy the interface name to the ifreq. 995 */ 996 void 997 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr) 998 { 999 bcopy(ifp->if_xname, ifr->ifr_name, IFNAMSIZ); 1000 } 1001 1002 /* 1003 * Support for poll() system call 1004 */ 1005 int 1006 bpfpoll(dev_t dev, int events, struct proc *p) 1007 { 1008 struct bpf_d *d; 1009 int s, revents; 1010 1011 revents = events & (POLLIN | POLLRDNORM); 1012 if (revents == 0) 1013 return (0); /* only support reading */ 1014 1015 /* 1016 * An imitation of the FIONREAD ioctl code. 1017 */ 1018 d = bpfilter_lookup(minor(dev)); 1019 /* 1020 * XXX The USB stack manages it to trigger some race condition 1021 * which causes bpfilter_lookup to return NULL when a USB device 1022 * gets detached while it is up and has an open bpf handler (e.g. 1023 * dhclient). We still should recheck if we can fix the root 1024 * cause of this issue. 1025 */ 1026 if (d == NULL) 1027 return (POLLERR); 1028 s = splnet(); 1029 if (d->bd_hlen == 0 && (!d->bd_immediate || d->bd_slen == 0)) { 1030 revents = 0; /* no data waiting */ 1031 /* 1032 * if there's a timeout, mark the time we started waiting. 1033 */ 1034 if (d->bd_rtout != -1 && d->bd_rdStart == 0) 1035 d->bd_rdStart = ticks; 1036 selrecord(p, &d->bd_sel); 1037 } 1038 splx(s); 1039 return (revents); 1040 } 1041 1042 struct filterops bpfread_filtops = 1043 { 1, NULL, filt_bpfrdetach, filt_bpfread }; 1044 1045 int 1046 bpfkqfilter(dev_t dev, struct knote *kn) 1047 { 1048 struct bpf_d *d; 1049 struct klist *klist; 1050 int s; 1051 1052 d = bpfilter_lookup(minor(dev)); 1053 switch (kn->kn_filter) { 1054 case EVFILT_READ: 1055 klist = &d->bd_sel.si_note; 1056 kn->kn_fop = &bpfread_filtops; 1057 break; 1058 default: 1059 return (1); 1060 } 1061 1062 kn->kn_hook = (caddr_t)((u_long)dev); 1063 1064 s = splnet(); 1065 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 1066 splx(s); 1067 1068 return (0); 1069 } 1070 1071 void 1072 filt_bpfrdetach(struct knote *kn) 1073 { 1074 dev_t dev = (dev_t)((u_long)kn->kn_hook); 1075 struct bpf_d *d; 1076 int s; 1077 1078 d = bpfilter_lookup(minor(dev)); 1079 s = splnet(); 1080 SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext); 1081 splx(s); 1082 } 1083 1084 int 1085 filt_bpfread(struct knote *kn, long hint) 1086 { 1087 dev_t dev = (dev_t)((u_long)kn->kn_hook); 1088 struct bpf_d *d; 1089 1090 d = bpfilter_lookup(minor(dev)); 1091 kn->kn_data = d->bd_hlen; 1092 if (d->bd_immediate) 1093 kn->kn_data += d->bd_slen; 1094 return (kn->kn_data > 0); 1095 } 1096 1097 /* 1098 * Incoming linkage from device drivers. Process the packet pkt, of length 1099 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1100 * by each process' filter, and if accepted, stashed into the corresponding 1101 * buffer. 1102 */ 1103 int 1104 bpf_tap(caddr_t arg, u_char *pkt, u_int pktlen, u_int direction) 1105 { 1106 struct bpf_if *bp; 1107 struct bpf_d *d; 1108 size_t slen; 1109 int drop = 0; 1110 1111 /* 1112 * Note that the ipl does not have to be raised at this point. 1113 * The only problem that could arise here is that if two different 1114 * interfaces shared any data. This is not the case. 1115 */ 1116 bp = (struct bpf_if *)arg; 1117 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1118 ++d->bd_rcount; 1119 if ((direction & d->bd_dirfilt) != 0) 1120 slen = 0; 1121 else 1122 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1123 if (slen != 0) { 1124 bpf_catchpacket(d, pkt, pktlen, slen, bcopy); 1125 if (d->bd_fildrop) 1126 drop++; 1127 } 1128 } 1129 1130 return (drop); 1131 } 1132 1133 /* 1134 * Copy data from an mbuf chain into a buffer. This code is derived 1135 * from m_copydata in sys/uipc_mbuf.c. 1136 */ 1137 void 1138 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1139 { 1140 const struct mbuf *m; 1141 u_int count; 1142 u_char *dst; 1143 1144 m = src_arg; 1145 dst = dst_arg; 1146 while (len > 0) { 1147 if (m == 0) 1148 panic("bpf_mcopy"); 1149 count = min(m->m_len, len); 1150 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 1151 m = m->m_next; 1152 dst += count; 1153 len -= count; 1154 } 1155 } 1156 1157 /* 1158 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1159 */ 1160 void 1161 bpf_mtap(caddr_t arg, struct mbuf *m, u_int direction) 1162 { 1163 struct bpf_if *bp = (struct bpf_if *)arg; 1164 struct bpf_d *d; 1165 size_t pktlen, slen; 1166 struct mbuf *m0; 1167 1168 if (m == NULL) 1169 return; 1170 1171 pktlen = 0; 1172 for (m0 = m; m0 != 0; m0 = m0->m_next) 1173 pktlen += m0->m_len; 1174 1175 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1176 ++d->bd_rcount; 1177 if ((direction & d->bd_dirfilt) != 0) 1178 slen = 0; 1179 else 1180 slen = bpf_filter(d->bd_rfilter, (u_char *)m, 1181 pktlen, 0); 1182 1183 if (slen == 0) 1184 continue; 1185 1186 bpf_catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1187 if (d->bd_fildrop) 1188 m->m_flags |= M_FILDROP; 1189 } 1190 } 1191 1192 /* 1193 * Incoming linkage from device drivers, where we have a mbuf chain 1194 * but need to prepend some arbitrary header from a linear buffer. 1195 * 1196 * Con up a minimal dummy header to pacify bpf. Allocate (only) a 1197 * struct m_hdr on the stack. This is safe as bpf only reads from the 1198 * fields in this header that we initialize, and will not try to free 1199 * it or keep a pointer to it. 1200 */ 1201 void 1202 bpf_mtap_hdr(caddr_t arg, caddr_t data, u_int dlen, struct mbuf *m, 1203 u_int direction) 1204 { 1205 struct m_hdr mh; 1206 1207 mh.mh_flags = 0; 1208 mh.mh_next = m; 1209 mh.mh_len = dlen; 1210 mh.mh_data = data; 1211 1212 bpf_mtap(arg, (struct mbuf *) &mh, direction); 1213 m->m_flags |= mh.mh_flags & M_FILDROP; 1214 } 1215 1216 /* 1217 * Incoming linkage from device drivers, where we have a mbuf chain 1218 * but need to prepend the address family. 1219 * 1220 * Con up a minimal dummy header to pacify bpf. We allocate (only) a 1221 * struct m_hdr on the stack. This is safe as bpf only reads from the 1222 * fields in this header that we initialize, and will not try to free 1223 * it or keep a pointer to it. 1224 */ 1225 void 1226 bpf_mtap_af(caddr_t arg, u_int32_t af, struct mbuf *m, u_int direction) 1227 { 1228 struct m_hdr mh; 1229 1230 mh.mh_flags = 0; 1231 mh.mh_next = m; 1232 mh.mh_len = 4; 1233 mh.mh_data = (caddr_t)⁡ 1234 1235 bpf_mtap(arg, (struct mbuf *) &mh, direction); 1236 m->m_flags |= mh.mh_flags & M_FILDROP; 1237 } 1238 1239 /* 1240 * Move the packet data from interface memory (pkt) into the 1241 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1242 * otherwise 0. "copy" is the routine called to do the actual data 1243 * transfer. bcopy is passed in to copy contiguous chunks, while 1244 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1245 * pkt is really an mbuf. 1246 */ 1247 void 1248 bpf_catchpacket(struct bpf_d *d, u_char *pkt, size_t pktlen, size_t snaplen, 1249 void (*cpfn)(const void *, void *, size_t)) 1250 { 1251 struct bpf_hdr *hp; 1252 int totlen, curlen; 1253 int hdrlen = d->bd_bif->bif_hdrlen; 1254 struct timeval tv; 1255 1256 /* 1257 * Figure out how many bytes to move. If the packet is 1258 * greater or equal to the snapshot length, transfer that 1259 * much. Otherwise, transfer the whole packet (unless 1260 * we hit the buffer size limit). 1261 */ 1262 totlen = hdrlen + min(snaplen, pktlen); 1263 if (totlen > d->bd_bufsize) 1264 totlen = d->bd_bufsize; 1265 1266 /* 1267 * Round up the end of the previous packet to the next longword. 1268 */ 1269 curlen = BPF_WORDALIGN(d->bd_slen); 1270 if (curlen + totlen > d->bd_bufsize) { 1271 /* 1272 * This packet will overflow the storage buffer. 1273 * Rotate the buffers if we can, then wakeup any 1274 * pending reads. 1275 */ 1276 if (d->bd_fbuf == 0) { 1277 /* 1278 * We haven't completed the previous read yet, 1279 * so drop the packet. 1280 */ 1281 ++d->bd_dcount; 1282 return; 1283 } 1284 ROTATE_BUFFERS(d); 1285 bpf_wakeup(d); 1286 curlen = 0; 1287 } 1288 1289 /* 1290 * Append the bpf header. 1291 */ 1292 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1293 microtime(&tv); 1294 hp->bh_tstamp.tv_sec = tv.tv_sec; 1295 hp->bh_tstamp.tv_usec = tv.tv_usec; 1296 hp->bh_datalen = pktlen; 1297 hp->bh_hdrlen = hdrlen; 1298 /* 1299 * Copy the packet data into the store buffer and update its length. 1300 */ 1301 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1302 d->bd_slen = curlen + totlen; 1303 1304 if (d->bd_immediate) { 1305 /* 1306 * Immediate mode is set. A packet arrived so any 1307 * reads should be woken up. 1308 */ 1309 bpf_wakeup(d); 1310 } 1311 1312 if (d->bd_rdStart && (d->bd_rtout + d->bd_rdStart < ticks)) { 1313 /* 1314 * we could be selecting on the bpf, and we 1315 * may have timeouts set. We got here by getting 1316 * a packet, so wake up the reader. 1317 */ 1318 if (d->bd_fbuf) { 1319 d->bd_rdStart = 0; 1320 ROTATE_BUFFERS(d); 1321 bpf_wakeup(d); 1322 } 1323 } 1324 } 1325 1326 /* 1327 * Initialize all nonzero fields of a descriptor. 1328 */ 1329 int 1330 bpf_allocbufs(struct bpf_d *d) 1331 { 1332 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); 1333 if (d->bd_fbuf == NULL) 1334 return (ENOBUFS); 1335 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); 1336 if (d->bd_sbuf == NULL) { 1337 free(d->bd_fbuf, M_DEVBUF); 1338 return (ENOBUFS); 1339 } 1340 d->bd_slen = 0; 1341 d->bd_hlen = 0; 1342 return (0); 1343 } 1344 1345 /* 1346 * Free buffers currently in use by a descriptor 1347 * when the reference count drops to zero. 1348 */ 1349 void 1350 bpf_freed(struct bpf_d *d) 1351 { 1352 if (--d->bd_ref > 0) 1353 return; 1354 1355 if (d->bd_sbuf != 0) { 1356 free(d->bd_sbuf, M_DEVBUF); 1357 if (d->bd_hbuf != 0) 1358 free(d->bd_hbuf, M_DEVBUF); 1359 if (d->bd_fbuf != 0) 1360 free(d->bd_fbuf, M_DEVBUF); 1361 } 1362 if (d->bd_rfilter) 1363 free((caddr_t)d->bd_rfilter, M_DEVBUF); 1364 if (d->bd_wfilter) 1365 free((caddr_t)d->bd_wfilter, M_DEVBUF); 1366 1367 bpfilter_destroy(d); 1368 } 1369 1370 /* 1371 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1372 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1373 * size of the link header (variable length headers not yet supported). 1374 */ 1375 void 1376 bpfattach(caddr_t *driverp, struct ifnet *ifp, u_int dlt, u_int hdrlen) 1377 { 1378 struct bpf_if *bp; 1379 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1380 1381 if (bp == 0) 1382 panic("bpfattach"); 1383 1384 bp->bif_dlist = 0; 1385 bp->bif_driverp = (struct bpf_if **)driverp; 1386 bp->bif_ifp = ifp; 1387 bp->bif_dlt = dlt; 1388 1389 bp->bif_next = bpf_iflist; 1390 bpf_iflist = bp; 1391 1392 *bp->bif_driverp = NULL; 1393 1394 /* 1395 * Compute the length of the bpf header. This is not necessarily 1396 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1397 * that the network layer header begins on a longword boundary (for 1398 * performance reasons and to alleviate alignment restrictions). 1399 */ 1400 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1401 } 1402 1403 /* Detach an interface from its attached bpf device. */ 1404 void 1405 bpfdetach(struct ifnet *ifp) 1406 { 1407 struct bpf_if *bp, *nbp, **pbp = &bpf_iflist; 1408 struct bpf_d *bd; 1409 int maj; 1410 1411 for (bp = bpf_iflist; bp; bp = nbp) { 1412 nbp= bp->bif_next; 1413 if (bp->bif_ifp == ifp) { 1414 *pbp = nbp; 1415 1416 /* Locate the major number. */ 1417 for (maj = 0; maj < nchrdev; maj++) 1418 if (cdevsw[maj].d_open == bpfopen) 1419 break; 1420 1421 for (bd = bp->bif_dlist; bd; bd = bp->bif_dlist) { 1422 struct bpf_d *d; 1423 1424 /* 1425 * Locate the minor number and nuke the vnode 1426 * for any open instance. 1427 */ 1428 LIST_FOREACH(d, &bpf_d_list, bd_list) 1429 if (d == bd) { 1430 vdevgone(maj, d->bd_unit, 1431 d->bd_unit, VCHR); 1432 break; 1433 } 1434 } 1435 1436 free(bp, M_DEVBUF); 1437 } else 1438 pbp = &bp->bif_next; 1439 } 1440 ifp->if_bpf = NULL; 1441 } 1442 1443 int 1444 bpf_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1445 size_t newlen) 1446 { 1447 int newval; 1448 int error; 1449 1450 if (namelen != 1) 1451 return (ENOTDIR); 1452 1453 switch (name[0]) { 1454 case NET_BPF_BUFSIZE: 1455 newval = bpf_bufsize; 1456 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval); 1457 if (error) 1458 return (error); 1459 if (newval < BPF_MINBUFSIZE || newval > bpf_maxbufsize) 1460 return (EINVAL); 1461 bpf_bufsize = newval; 1462 break; 1463 case NET_BPF_MAXBUFSIZE: 1464 newval = bpf_maxbufsize; 1465 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval); 1466 if (error) 1467 return (error); 1468 if (newval < BPF_MINBUFSIZE) 1469 return (EINVAL); 1470 bpf_maxbufsize = newval; 1471 break; 1472 default: 1473 return (EOPNOTSUPP); 1474 } 1475 return (0); 1476 } 1477 1478 struct bpf_d * 1479 bpfilter_lookup(int unit) 1480 { 1481 struct bpf_d *bd; 1482 1483 LIST_FOREACH(bd, &bpf_d_list, bd_list) 1484 if (bd->bd_unit == unit) 1485 return (bd); 1486 return (NULL); 1487 } 1488 1489 struct bpf_d * 1490 bpfilter_create(int unit) 1491 { 1492 struct bpf_d *bd; 1493 1494 if ((bd = bpfilter_lookup(unit)) != NULL) 1495 return (NULL); 1496 if ((bd = malloc(sizeof(*bd), M_DEVBUF, M_NOWAIT|M_ZERO)) != NULL) { 1497 bd->bd_unit = unit; 1498 LIST_INSERT_HEAD(&bpf_d_list, bd, bd_list); 1499 } 1500 return (bd); 1501 } 1502 1503 void 1504 bpfilter_destroy(struct bpf_d *bd) 1505 { 1506 LIST_REMOVE(bd, bd_list); 1507 free(bd, M_DEVBUF); 1508 } 1509 1510 /* 1511 * Get a list of available data link type of the interface. 1512 */ 1513 int 1514 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1515 { 1516 int n, error; 1517 struct ifnet *ifp; 1518 struct bpf_if *bp; 1519 1520 ifp = d->bd_bif->bif_ifp; 1521 n = 0; 1522 error = 0; 1523 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1524 if (bp->bif_ifp != ifp) 1525 continue; 1526 if (bfl->bfl_list != NULL) { 1527 if (n >= bfl->bfl_len) 1528 return (ENOMEM); 1529 error = copyout(&bp->bif_dlt, 1530 bfl->bfl_list + n, sizeof(u_int)); 1531 if (error) 1532 break; 1533 } 1534 n++; 1535 } 1536 1537 bfl->bfl_len = n; 1538 return (error); 1539 } 1540 1541 /* 1542 * Set the data link type of a BPF instance. 1543 */ 1544 int 1545 bpf_setdlt(struct bpf_d *d, u_int dlt) 1546 { 1547 int s; 1548 struct ifnet *ifp; 1549 struct bpf_if *bp; 1550 1551 if (d->bd_bif->bif_dlt == dlt) 1552 return (0); 1553 ifp = d->bd_bif->bif_ifp; 1554 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1555 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1556 break; 1557 } 1558 if (bp == NULL) 1559 return (EINVAL); 1560 s = splnet(); 1561 bpf_detachd(d); 1562 bpf_attachd(d, bp); 1563 bpf_reset_d(d); 1564 splx(s); 1565 return (0); 1566 } 1567