1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $ 41 */ 42 43 #include "use_bpf.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/conf.h> 48 #include <sys/device.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/time.h> 52 #include <sys/proc.h> 53 #include <sys/signalvar.h> 54 #include <sys/filio.h> 55 #include <sys/sockio.h> 56 #include <sys/ttycom.h> 57 #include <sys/filedesc.h> 58 59 #include <sys/event.h> 60 61 #include <sys/socket.h> 62 #include <sys/vnode.h> 63 64 #include <sys/thread2.h> 65 66 #include <net/if.h> 67 #include <net/bpf.h> 68 #include <net/bpfdesc.h> 69 #include <net/netmsg2.h> 70 71 #include <netinet/in.h> 72 #include <netinet/if_ether.h> 73 #include <sys/kernel.h> 74 #include <sys/sysctl.h> 75 76 #include <sys/devfs.h> 77 78 struct netmsg_bpf_output { 79 struct netmsg_base base; 80 struct mbuf *nm_mbuf; 81 struct ifnet *nm_ifp; 82 struct sockaddr *nm_dst; 83 }; 84 85 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 86 DEVFS_DECLARE_CLONE_BITMAP(bpf); 87 88 #if NBPF <= 1 89 #define BPF_PREALLOCATED_UNITS 4 90 #else 91 #define BPF_PREALLOCATED_UNITS NBPF 92 #endif 93 94 #if NBPF > 0 95 96 /* 97 * The default read buffer size is patchable. 98 */ 99 static int bpf_bufsize = BPF_DEFAULTBUFSIZE; 100 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 101 &bpf_bufsize, 0, "Current size of bpf buffer"); 102 int bpf_maxbufsize = BPF_MAXBUFSIZE; 103 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 104 &bpf_maxbufsize, 0, "Maximum size of bpf buffer"); 105 106 /* 107 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 108 */ 109 static struct bpf_if *bpf_iflist; 110 111 static struct lwkt_token bpf_token = LWKT_TOKEN_INITIALIZER(bpf_token); 112 113 static int bpf_allocbufs(struct bpf_d *); 114 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 115 static void bpf_detachd(struct bpf_d *d); 116 static void bpf_resetd(struct bpf_d *); 117 static void bpf_freed(struct bpf_d *); 118 static void bpf_mcopy(const void *, void *, size_t); 119 static int bpf_movein(struct uio *, int, struct mbuf **, 120 struct sockaddr *, int *, struct bpf_insn *); 121 static int bpf_setif(struct bpf_d *, struct ifreq *); 122 static void bpf_timed_out(void *); 123 static void bpf_wakeup(struct bpf_d *); 124 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 125 void (*)(const void *, void *, size_t), 126 const struct timeval *); 127 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 128 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 129 static int bpf_setdlt(struct bpf_d *, u_int); 130 static void bpf_drvinit(void *unused); 131 static void bpf_filter_detach(struct knote *kn); 132 static int bpf_filter_read(struct knote *kn, long hint); 133 134 static d_open_t bpfopen; 135 static d_clone_t bpfclone; 136 static d_close_t bpfclose; 137 static d_read_t bpfread; 138 static d_write_t bpfwrite; 139 static d_ioctl_t bpfioctl; 140 static d_kqfilter_t bpfkqfilter; 141 142 #define CDEV_MAJOR 23 143 static struct dev_ops bpf_ops = { 144 { "bpf", 0, D_MPSAFE }, 145 .d_open = bpfopen, 146 .d_close = bpfclose, 147 .d_read = bpfread, 148 .d_write = bpfwrite, 149 .d_ioctl = bpfioctl, 150 .d_kqfilter = bpfkqfilter 151 }; 152 153 154 static int 155 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, 156 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter) 157 { 158 struct mbuf *m; 159 int error; 160 int len; 161 int hlen; 162 int slen; 163 164 *datlen = 0; 165 *mp = NULL; 166 167 /* 168 * Build a sockaddr based on the data link layer type. 169 * We do this at this level because the ethernet header 170 * is copied directly into the data field of the sockaddr. 171 * In the case of SLIP, there is no header and the packet 172 * is forwarded as is. 173 * Also, we are careful to leave room at the front of the mbuf 174 * for the link level header. 175 */ 176 switch (linktype) { 177 case DLT_SLIP: 178 sockp->sa_family = AF_INET; 179 hlen = 0; 180 break; 181 182 case DLT_EN10MB: 183 sockp->sa_family = AF_UNSPEC; 184 /* XXX Would MAXLINKHDR be better? */ 185 hlen = sizeof(struct ether_header); 186 break; 187 188 case DLT_RAW: 189 case DLT_NULL: 190 sockp->sa_family = AF_UNSPEC; 191 hlen = 0; 192 break; 193 194 case DLT_ATM_RFC1483: 195 /* 196 * en atm driver requires 4-byte atm pseudo header. 197 * though it isn't standard, vpi:vci needs to be 198 * specified anyway. 199 */ 200 sockp->sa_family = AF_UNSPEC; 201 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 202 break; 203 204 case DLT_PPP: 205 sockp->sa_family = AF_UNSPEC; 206 hlen = 4; /* This should match PPP_HDRLEN */ 207 break; 208 209 default: 210 return(EIO); 211 } 212 213 len = uio->uio_resid; 214 *datlen = len - hlen; 215 if ((unsigned)len > MCLBYTES) 216 return(EIO); 217 218 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 219 if (m == NULL) 220 return(ENOBUFS); 221 m->m_pkthdr.len = m->m_len = len; 222 m->m_pkthdr.rcvif = NULL; 223 *mp = m; 224 225 if (m->m_len < hlen) { 226 error = EPERM; 227 goto bad; 228 } 229 230 error = uiomove(mtod(m, u_char *), len, uio); 231 if (error) 232 goto bad; 233 234 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 235 if (slen == 0) { 236 error = EPERM; 237 goto bad; 238 } 239 240 /* 241 * Make room for link header, and copy it to sockaddr. 242 */ 243 if (hlen != 0) { 244 bcopy(m->m_data, sockp->sa_data, hlen); 245 m->m_pkthdr.len -= hlen; 246 m->m_len -= hlen; 247 m->m_data += hlen; /* XXX */ 248 } 249 return (0); 250 bad: 251 m_freem(m); 252 return(error); 253 } 254 255 /* 256 * Attach file to the bpf interface, i.e. make d listen on bp. 257 * Must be called at splimp. 258 */ 259 static void 260 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 261 { 262 /* 263 * Point d at bp, and add d to the interface's list of listeners. 264 * Finally, point the driver's bpf cookie at the interface so 265 * it will divert packets to bpf. 266 */ 267 lwkt_gettoken(&bpf_token); 268 d->bd_bif = bp; 269 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 270 *bp->bif_driverp = bp; 271 272 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 273 lwkt_reltoken(&bpf_token); 274 } 275 276 /* 277 * Detach a file from its interface. 278 */ 279 static void 280 bpf_detachd(struct bpf_d *d) 281 { 282 int error; 283 struct bpf_if *bp; 284 struct ifnet *ifp; 285 286 lwkt_gettoken(&bpf_token); 287 bp = d->bd_bif; 288 ifp = bp->bif_ifp; 289 290 /* Remove d from the interface's descriptor list. */ 291 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next); 292 293 if (SLIST_EMPTY(&bp->bif_dlist)) { 294 /* 295 * Let the driver know that there are no more listeners. 296 */ 297 *bp->bif_driverp = NULL; 298 } 299 d->bd_bif = NULL; 300 301 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 302 303 /* 304 * Check if this descriptor had requested promiscuous mode. 305 * If so, turn it off. 306 */ 307 if (d->bd_promisc) { 308 d->bd_promisc = 0; 309 error = ifpromisc(ifp, 0); 310 if (error != 0 && error != ENXIO) { 311 /* 312 * ENXIO can happen if a pccard is unplugged, 313 * Something is really wrong if we were able to put 314 * the driver into promiscuous mode, but can't 315 * take it out. 316 */ 317 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n", 318 error); 319 } 320 } 321 lwkt_reltoken(&bpf_token); 322 } 323 324 /* 325 * Open ethernet device. Returns ENXIO for illegal minor device number, 326 * EBUSY if file is open by another process. 327 */ 328 /* ARGSUSED */ 329 static int 330 bpfopen(struct dev_open_args *ap) 331 { 332 cdev_t dev = ap->a_head.a_dev; 333 struct bpf_d *d; 334 335 lwkt_gettoken(&bpf_token); 336 if (ap->a_cred->cr_prison) { 337 lwkt_reltoken(&bpf_token); 338 return(EPERM); 339 } 340 341 d = dev->si_drv1; 342 /* 343 * Each minor can be opened by only one process. If the requested 344 * minor is in use, return EBUSY. 345 */ 346 if (d != NULL) { 347 lwkt_reltoken(&bpf_token); 348 return(EBUSY); 349 } 350 351 d = kmalloc(sizeof *d, M_BPF, M_WAITOK | M_ZERO); 352 dev->si_drv1 = d; 353 d->bd_bufsize = bpf_bufsize; 354 d->bd_sig = SIGIO; 355 d->bd_seesent = 1; 356 callout_init(&d->bd_callout); 357 lwkt_reltoken(&bpf_token); 358 359 return(0); 360 } 361 362 static int 363 bpfclone(struct dev_clone_args *ap) 364 { 365 int unit; 366 367 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0); 368 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit); 369 370 return 0; 371 } 372 373 /* 374 * Close the descriptor by detaching it from its interface, 375 * deallocating its buffers, and marking it free. 376 */ 377 /* ARGSUSED */ 378 static int 379 bpfclose(struct dev_close_args *ap) 380 { 381 cdev_t dev = ap->a_head.a_dev; 382 struct bpf_d *d = dev->si_drv1; 383 384 lwkt_gettoken(&bpf_token); 385 funsetown(&d->bd_sigio); 386 if (d->bd_state == BPF_WAITING) 387 callout_stop(&d->bd_callout); 388 d->bd_state = BPF_IDLE; 389 if (d->bd_bif != NULL) 390 bpf_detachd(d); 391 bpf_freed(d); 392 dev->si_drv1 = NULL; 393 if (dev->si_uminor >= BPF_PREALLOCATED_UNITS) { 394 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor); 395 destroy_dev(dev); 396 } 397 kfree(d, M_BPF); 398 lwkt_reltoken(&bpf_token); 399 400 return(0); 401 } 402 403 /* 404 * Rotate the packet buffers in descriptor d. Move the store buffer 405 * into the hold slot, and the free buffer into the store slot. 406 * Zero the length of the new store buffer. 407 */ 408 #define ROTATE_BUFFERS(d) \ 409 (d)->bd_hbuf = (d)->bd_sbuf; \ 410 (d)->bd_hlen = (d)->bd_slen; \ 411 (d)->bd_sbuf = (d)->bd_fbuf; \ 412 (d)->bd_slen = 0; \ 413 (d)->bd_fbuf = NULL; 414 /* 415 * bpfread - read next chunk of packets from buffers 416 */ 417 static int 418 bpfread(struct dev_read_args *ap) 419 { 420 cdev_t dev = ap->a_head.a_dev; 421 struct bpf_d *d = dev->si_drv1; 422 int timed_out; 423 int error; 424 425 lwkt_gettoken(&bpf_token); 426 /* 427 * Restrict application to use a buffer the same size as 428 * as kernel buffers. 429 */ 430 if (ap->a_uio->uio_resid != d->bd_bufsize) { 431 lwkt_reltoken(&bpf_token); 432 return(EINVAL); 433 } 434 435 if (d->bd_state == BPF_WAITING) 436 callout_stop(&d->bd_callout); 437 timed_out = (d->bd_state == BPF_TIMED_OUT); 438 d->bd_state = BPF_IDLE; 439 /* 440 * If the hold buffer is empty, then do a timed sleep, which 441 * ends when the timeout expires or when enough packets 442 * have arrived to fill the store buffer. 443 */ 444 while (d->bd_hbuf == NULL) { 445 if ((d->bd_immediate || (ap->a_ioflag & IO_NDELAY) || timed_out) 446 && d->bd_slen != 0) { 447 /* 448 * A packet(s) either arrived since the previous, 449 * We're in immediate mode, or are reading 450 * in non-blocking mode, and a packet(s) 451 * either arrived since the previous 452 * read or arrived while we were asleep. 453 * Rotate the buffers and return what's here. 454 */ 455 ROTATE_BUFFERS(d); 456 break; 457 } 458 459 /* 460 * No data is available, check to see if the bpf device 461 * is still pointed at a real interface. If not, return 462 * ENXIO so that the userland process knows to rebind 463 * it before using it again. 464 */ 465 if (d->bd_bif == NULL) { 466 lwkt_reltoken(&bpf_token); 467 return(ENXIO); 468 } 469 470 if (ap->a_ioflag & IO_NDELAY) { 471 lwkt_reltoken(&bpf_token); 472 return(EWOULDBLOCK); 473 } 474 error = tsleep(d, PCATCH, "bpf", d->bd_rtout); 475 if (error == EINTR || error == ERESTART) { 476 lwkt_reltoken(&bpf_token); 477 return(error); 478 } 479 if (error == EWOULDBLOCK) { 480 /* 481 * On a timeout, return what's in the buffer, 482 * which may be nothing. If there is something 483 * in the store buffer, we can rotate the buffers. 484 */ 485 if (d->bd_hbuf) 486 /* 487 * We filled up the buffer in between 488 * getting the timeout and arriving 489 * here, so we don't need to rotate. 490 */ 491 break; 492 493 if (d->bd_slen == 0) { 494 lwkt_reltoken(&bpf_token); 495 return(0); 496 } 497 ROTATE_BUFFERS(d); 498 break; 499 } 500 } 501 /* 502 * At this point, we know we have something in the hold slot. 503 */ 504 505 /* 506 * Move data from hold buffer into user space. 507 * We know the entire buffer is transferred since 508 * we checked above that the read buffer is bpf_bufsize bytes. 509 */ 510 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio); 511 512 d->bd_fbuf = d->bd_hbuf; 513 d->bd_hbuf = NULL; 514 d->bd_hlen = 0; 515 lwkt_reltoken(&bpf_token); 516 517 return(error); 518 } 519 520 521 /* 522 * If there are processes sleeping on this descriptor, wake them up. 523 */ 524 static void 525 bpf_wakeup(struct bpf_d *d) 526 { 527 if (d->bd_state == BPF_WAITING) { 528 callout_stop(&d->bd_callout); 529 d->bd_state = BPF_IDLE; 530 } 531 wakeup(d); 532 if (d->bd_async && d->bd_sig && d->bd_sigio) 533 pgsigio(d->bd_sigio, d->bd_sig, 0); 534 535 KNOTE(&d->bd_kq.ki_note, 0); 536 } 537 538 static void 539 bpf_timed_out(void *arg) 540 { 541 struct bpf_d *d = (struct bpf_d *)arg; 542 543 if (d->bd_state == BPF_WAITING) { 544 d->bd_state = BPF_TIMED_OUT; 545 if (d->bd_slen != 0) 546 bpf_wakeup(d); 547 } 548 } 549 550 static void 551 bpf_output_dispatch(netmsg_t msg) 552 { 553 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)msg; 554 struct ifnet *ifp = bmsg->nm_ifp; 555 int error; 556 557 /* 558 * The driver frees the mbuf. 559 */ 560 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL); 561 lwkt_replymsg(&msg->lmsg, error); 562 } 563 564 static int 565 bpfwrite(struct dev_write_args *ap) 566 { 567 cdev_t dev = ap->a_head.a_dev; 568 struct bpf_d *d = dev->si_drv1; 569 struct ifnet *ifp; 570 struct mbuf *m; 571 int error, ret; 572 struct sockaddr dst; 573 int datlen; 574 struct netmsg_bpf_output bmsg; 575 576 lwkt_gettoken(&bpf_token); 577 if (d->bd_bif == NULL) { 578 lwkt_reltoken(&bpf_token); 579 return(ENXIO); 580 } 581 582 ifp = d->bd_bif->bif_ifp; 583 584 if (ap->a_uio->uio_resid == 0) { 585 lwkt_reltoken(&bpf_token); 586 return(0); 587 } 588 589 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m, 590 &dst, &datlen, d->bd_wfilter); 591 if (error) { 592 lwkt_reltoken(&bpf_token); 593 return(error); 594 } 595 596 if (datlen > ifp->if_mtu) { 597 m_freem(m); 598 lwkt_reltoken(&bpf_token); 599 return(EMSGSIZE); 600 } 601 602 if (d->bd_hdrcmplt) 603 dst.sa_family = pseudo_AF_HDRCMPLT; 604 605 netmsg_init(&bmsg.base, NULL, &curthread->td_msgport, 606 0, bpf_output_dispatch); 607 bmsg.nm_mbuf = m; 608 bmsg.nm_ifp = ifp; 609 bmsg.nm_dst = &dst; 610 611 ret = lwkt_domsg(netisr_portfn(0), &bmsg.base.lmsg, 0); 612 lwkt_reltoken(&bpf_token); 613 614 return ret; 615 } 616 617 /* 618 * Reset a descriptor by flushing its packet buffer and clearing the 619 * receive and drop counts. Should be called at splimp. 620 */ 621 static void 622 bpf_resetd(struct bpf_d *d) 623 { 624 if (d->bd_hbuf) { 625 /* Free the hold buffer. */ 626 d->bd_fbuf = d->bd_hbuf; 627 d->bd_hbuf = NULL; 628 } 629 d->bd_slen = 0; 630 d->bd_hlen = 0; 631 d->bd_rcount = 0; 632 d->bd_dcount = 0; 633 } 634 635 /* 636 * FIONREAD Check for read packet available. 637 * SIOCGIFADDR Get interface address - convenient hook to driver. 638 * BIOCGBLEN Get buffer len [for read()]. 639 * BIOCSETF Set ethernet read filter. 640 * BIOCSETWF Set ethernet write filter. 641 * BIOCFLUSH Flush read packet buffer. 642 * BIOCPROMISC Put interface into promiscuous mode. 643 * BIOCGDLT Get link layer type. 644 * BIOCGETIF Get interface name. 645 * BIOCSETIF Set interface. 646 * BIOCSRTIMEOUT Set read timeout. 647 * BIOCGRTIMEOUT Get read timeout. 648 * BIOCGSTATS Get packet stats. 649 * BIOCIMMEDIATE Set immediate mode. 650 * BIOCVERSION Get filter language version. 651 * BIOCGHDRCMPLT Get "header already complete" flag 652 * BIOCSHDRCMPLT Set "header already complete" flag 653 * BIOCGSEESENT Get "see packets sent" flag 654 * BIOCSSEESENT Set "see packets sent" flag 655 * BIOCLOCK Set "locked" flag 656 */ 657 /* ARGSUSED */ 658 static int 659 bpfioctl(struct dev_ioctl_args *ap) 660 { 661 cdev_t dev = ap->a_head.a_dev; 662 struct bpf_d *d = dev->si_drv1; 663 int error = 0; 664 665 lwkt_gettoken(&bpf_token); 666 if (d->bd_state == BPF_WAITING) 667 callout_stop(&d->bd_callout); 668 d->bd_state = BPF_IDLE; 669 670 if (d->bd_locked == 1) { 671 switch (ap->a_cmd) { 672 case BIOCGBLEN: 673 case BIOCFLUSH: 674 case BIOCGDLT: 675 case BIOCGDLTLIST: 676 case BIOCGETIF: 677 case BIOCGRTIMEOUT: 678 case BIOCGSTATS: 679 case BIOCVERSION: 680 case BIOCGRSIG: 681 case BIOCGHDRCMPLT: 682 case FIONREAD: 683 case BIOCLOCK: 684 case BIOCSRTIMEOUT: 685 case BIOCIMMEDIATE: 686 case TIOCGPGRP: 687 break; 688 default: 689 lwkt_reltoken(&bpf_token); 690 return (EPERM); 691 } 692 } 693 switch (ap->a_cmd) { 694 default: 695 error = EINVAL; 696 break; 697 698 /* 699 * Check for read packet available. 700 */ 701 case FIONREAD: 702 { 703 int n; 704 705 n = d->bd_slen; 706 if (d->bd_hbuf) 707 n += d->bd_hlen; 708 709 *(int *)ap->a_data = n; 710 break; 711 } 712 713 case SIOCGIFADDR: 714 { 715 struct ifnet *ifp; 716 717 if (d->bd_bif == NULL) { 718 error = EINVAL; 719 } else { 720 ifp = d->bd_bif->bif_ifp; 721 ifnet_serialize_all(ifp); 722 error = ifp->if_ioctl(ifp, ap->a_cmd, 723 ap->a_data, ap->a_cred); 724 ifnet_deserialize_all(ifp); 725 } 726 break; 727 } 728 729 /* 730 * Get buffer len [for read()]. 731 */ 732 case BIOCGBLEN: 733 *(u_int *)ap->a_data = d->bd_bufsize; 734 break; 735 736 /* 737 * Set buffer length. 738 */ 739 case BIOCSBLEN: 740 if (d->bd_bif != NULL) { 741 error = EINVAL; 742 } else { 743 u_int size = *(u_int *)ap->a_data; 744 745 if (size > bpf_maxbufsize) 746 *(u_int *)ap->a_data = size = bpf_maxbufsize; 747 else if (size < BPF_MINBUFSIZE) 748 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE; 749 d->bd_bufsize = size; 750 } 751 break; 752 753 /* 754 * Set link layer read filter. 755 */ 756 case BIOCSETF: 757 case BIOCSETWF: 758 error = bpf_setf(d, (struct bpf_program *)ap->a_data, 759 ap->a_cmd); 760 break; 761 762 /* 763 * Flush read packet buffer. 764 */ 765 case BIOCFLUSH: 766 bpf_resetd(d); 767 break; 768 769 /* 770 * Put interface into promiscuous mode. 771 */ 772 case BIOCPROMISC: 773 if (d->bd_bif == NULL) { 774 /* 775 * No interface attached yet. 776 */ 777 error = EINVAL; 778 break; 779 } 780 if (d->bd_promisc == 0) { 781 error = ifpromisc(d->bd_bif->bif_ifp, 1); 782 if (error == 0) 783 d->bd_promisc = 1; 784 } 785 break; 786 787 /* 788 * Get device parameters. 789 */ 790 case BIOCGDLT: 791 if (d->bd_bif == NULL) 792 error = EINVAL; 793 else 794 *(u_int *)ap->a_data = d->bd_bif->bif_dlt; 795 break; 796 797 /* 798 * Get a list of supported data link types. 799 */ 800 case BIOCGDLTLIST: 801 if (d->bd_bif == NULL) { 802 error = EINVAL; 803 } else { 804 error = bpf_getdltlist(d, 805 (struct bpf_dltlist *)ap->a_data); 806 } 807 break; 808 809 /* 810 * Set data link type. 811 */ 812 case BIOCSDLT: 813 if (d->bd_bif == NULL) 814 error = EINVAL; 815 else 816 error = bpf_setdlt(d, *(u_int *)ap->a_data); 817 break; 818 819 /* 820 * Get interface name. 821 */ 822 case BIOCGETIF: 823 if (d->bd_bif == NULL) { 824 error = EINVAL; 825 } else { 826 struct ifnet *const ifp = d->bd_bif->bif_ifp; 827 struct ifreq *const ifr = (struct ifreq *)ap->a_data; 828 829 strlcpy(ifr->ifr_name, ifp->if_xname, 830 sizeof ifr->ifr_name); 831 } 832 break; 833 834 /* 835 * Set interface. 836 */ 837 case BIOCSETIF: 838 error = bpf_setif(d, (struct ifreq *)ap->a_data); 839 break; 840 841 /* 842 * Set read timeout. 843 */ 844 case BIOCSRTIMEOUT: 845 { 846 struct timeval *tv = (struct timeval *)ap->a_data; 847 848 /* 849 * Subtract 1 tick from tvtohz() since this isn't 850 * a one-shot timer. 851 */ 852 if ((error = itimerfix(tv)) == 0) 853 d->bd_rtout = tvtohz_low(tv); 854 break; 855 } 856 857 /* 858 * Get read timeout. 859 */ 860 case BIOCGRTIMEOUT: 861 { 862 struct timeval *tv = (struct timeval *)ap->a_data; 863 864 tv->tv_sec = d->bd_rtout / hz; 865 tv->tv_usec = (d->bd_rtout % hz) * ustick; 866 break; 867 } 868 869 /* 870 * Get packet stats. 871 */ 872 case BIOCGSTATS: 873 { 874 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data; 875 876 bs->bs_recv = d->bd_rcount; 877 bs->bs_drop = d->bd_dcount; 878 break; 879 } 880 881 /* 882 * Set immediate mode. 883 */ 884 case BIOCIMMEDIATE: 885 d->bd_immediate = *(u_int *)ap->a_data; 886 break; 887 888 case BIOCVERSION: 889 { 890 struct bpf_version *bv = (struct bpf_version *)ap->a_data; 891 892 bv->bv_major = BPF_MAJOR_VERSION; 893 bv->bv_minor = BPF_MINOR_VERSION; 894 break; 895 } 896 897 /* 898 * Get "header already complete" flag 899 */ 900 case BIOCGHDRCMPLT: 901 *(u_int *)ap->a_data = d->bd_hdrcmplt; 902 break; 903 904 /* 905 * Set "header already complete" flag 906 */ 907 case BIOCSHDRCMPLT: 908 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0; 909 break; 910 911 /* 912 * Get "see sent packets" flag 913 */ 914 case BIOCGSEESENT: 915 *(u_int *)ap->a_data = d->bd_seesent; 916 break; 917 918 /* 919 * Set "see sent packets" flag 920 */ 921 case BIOCSSEESENT: 922 d->bd_seesent = *(u_int *)ap->a_data; 923 break; 924 925 case FIOASYNC: /* Send signal on receive packets */ 926 d->bd_async = *(int *)ap->a_data; 927 break; 928 929 case FIOSETOWN: 930 error = fsetown(*(int *)ap->a_data, &d->bd_sigio); 931 break; 932 933 case FIOGETOWN: 934 *(int *)ap->a_data = fgetown(&d->bd_sigio); 935 break; 936 937 /* This is deprecated, FIOSETOWN should be used instead. */ 938 case TIOCSPGRP: 939 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio); 940 break; 941 942 /* This is deprecated, FIOGETOWN should be used instead. */ 943 case TIOCGPGRP: 944 *(int *)ap->a_data = -fgetown(&d->bd_sigio); 945 break; 946 947 case BIOCSRSIG: /* Set receive signal */ 948 { 949 u_int sig; 950 951 sig = *(u_int *)ap->a_data; 952 953 if (sig >= NSIG) 954 error = EINVAL; 955 else 956 d->bd_sig = sig; 957 break; 958 } 959 case BIOCGRSIG: 960 *(u_int *)ap->a_data = d->bd_sig; 961 break; 962 case BIOCLOCK: 963 d->bd_locked = 1; 964 break; 965 } 966 lwkt_reltoken(&bpf_token); 967 968 return(error); 969 } 970 971 /* 972 * Set d's packet filter program to fp. If this file already has a filter, 973 * free it and replace it. Returns EINVAL for bogus requests. 974 */ 975 static int 976 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 977 { 978 struct bpf_insn *fcode, *old; 979 u_int wfilter, flen, size; 980 981 if (cmd == BIOCSETWF) { 982 old = d->bd_wfilter; 983 wfilter = 1; 984 } else { 985 wfilter = 0; 986 old = d->bd_rfilter; 987 } 988 if (fp->bf_insns == NULL) { 989 if (fp->bf_len != 0) 990 return(EINVAL); 991 if (wfilter) 992 d->bd_wfilter = NULL; 993 else 994 d->bd_rfilter = NULL; 995 bpf_resetd(d); 996 if (old != NULL) 997 kfree(old, M_BPF); 998 return(0); 999 } 1000 flen = fp->bf_len; 1001 if (flen > BPF_MAXINSNS) 1002 return(EINVAL); 1003 1004 size = flen * sizeof *fp->bf_insns; 1005 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK); 1006 if (copyin(fp->bf_insns, fcode, size) == 0 && 1007 bpf_validate(fcode, (int)flen)) { 1008 if (wfilter) 1009 d->bd_wfilter = fcode; 1010 else 1011 d->bd_rfilter = fcode; 1012 bpf_resetd(d); 1013 if (old != NULL) 1014 kfree(old, M_BPF); 1015 1016 return(0); 1017 } 1018 kfree(fcode, M_BPF); 1019 return(EINVAL); 1020 } 1021 1022 /* 1023 * Detach a file from its current interface (if attached at all) and attach 1024 * to the interface indicated by the name stored in ifr. 1025 * Return an errno or 0. 1026 */ 1027 static int 1028 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1029 { 1030 struct bpf_if *bp; 1031 int error; 1032 struct ifnet *theywant; 1033 1034 theywant = ifunit(ifr->ifr_name); 1035 if (theywant == NULL) 1036 return(ENXIO); 1037 1038 /* 1039 * Look through attached interfaces for the named one. 1040 */ 1041 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1042 struct ifnet *ifp = bp->bif_ifp; 1043 1044 if (ifp == NULL || ifp != theywant) 1045 continue; 1046 /* skip additional entry */ 1047 if (bp->bif_driverp != &ifp->if_bpf) 1048 continue; 1049 /* 1050 * We found the requested interface. 1051 * Allocate the packet buffers if we need to. 1052 * If we're already attached to requested interface, 1053 * just flush the buffer. 1054 */ 1055 if (d->bd_sbuf == NULL) { 1056 error = bpf_allocbufs(d); 1057 if (error != 0) 1058 return(error); 1059 } 1060 if (bp != d->bd_bif) { 1061 if (d->bd_bif != NULL) { 1062 /* 1063 * Detach if attached to something else. 1064 */ 1065 bpf_detachd(d); 1066 } 1067 1068 bpf_attachd(d, bp); 1069 } 1070 bpf_resetd(d); 1071 return(0); 1072 } 1073 1074 /* Not found. */ 1075 return(ENXIO); 1076 } 1077 1078 static struct filterops bpf_read_filtops = 1079 { FILTEROP_ISFD, NULL, bpf_filter_detach, bpf_filter_read }; 1080 1081 static int 1082 bpfkqfilter(struct dev_kqfilter_args *ap) 1083 { 1084 cdev_t dev = ap->a_head.a_dev; 1085 struct knote *kn = ap->a_kn; 1086 struct klist *klist; 1087 struct bpf_d *d; 1088 1089 lwkt_gettoken(&bpf_token); 1090 d = dev->si_drv1; 1091 if (d->bd_bif == NULL) { 1092 ap->a_result = 1; 1093 lwkt_reltoken(&bpf_token); 1094 return (0); 1095 } 1096 1097 ap->a_result = 0; 1098 switch (kn->kn_filter) { 1099 case EVFILT_READ: 1100 kn->kn_fop = &bpf_read_filtops; 1101 kn->kn_hook = (caddr_t)d; 1102 break; 1103 default: 1104 ap->a_result = EOPNOTSUPP; 1105 lwkt_reltoken(&bpf_token); 1106 return (0); 1107 } 1108 1109 klist = &d->bd_kq.ki_note; 1110 knote_insert(klist, kn); 1111 lwkt_reltoken(&bpf_token); 1112 1113 return (0); 1114 } 1115 1116 static void 1117 bpf_filter_detach(struct knote *kn) 1118 { 1119 struct klist *klist; 1120 struct bpf_d *d; 1121 1122 d = (struct bpf_d *)kn->kn_hook; 1123 klist = &d->bd_kq.ki_note; 1124 knote_remove(klist, kn); 1125 } 1126 1127 static int 1128 bpf_filter_read(struct knote *kn, long hint) 1129 { 1130 struct bpf_d *d; 1131 int ready = 0; 1132 1133 d = (struct bpf_d *)kn->kn_hook; 1134 if (d->bd_hlen != 0 || 1135 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1136 d->bd_slen != 0)) { 1137 ready = 1; 1138 } else { 1139 /* Start the read timeout if necessary. */ 1140 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1141 callout_reset(&d->bd_callout, d->bd_rtout, 1142 bpf_timed_out, d); 1143 d->bd_state = BPF_WAITING; 1144 } 1145 } 1146 1147 return (ready); 1148 } 1149 1150 1151 /* 1152 * Process the packet pkt of length pktlen. The packet is parsed 1153 * by each listener's filter, and if accepted, stashed into the 1154 * corresponding buffer. 1155 */ 1156 void 1157 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1158 { 1159 struct bpf_d *d; 1160 struct timeval tv; 1161 int gottime = 0; 1162 u_int slen; 1163 1164 lwkt_gettoken(&bpf_token); 1165 /* Re-check */ 1166 if (bp == NULL) { 1167 lwkt_reltoken(&bpf_token); 1168 return; 1169 } 1170 1171 /* 1172 * Note that the ipl does not have to be raised at this point. 1173 * The only problem that could arise here is that if two different 1174 * interfaces shared any data. This is not the case. 1175 */ 1176 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1177 ++d->bd_rcount; 1178 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1179 if (slen != 0) { 1180 if (!gottime) { 1181 microtime(&tv); 1182 gottime = 1; 1183 } 1184 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv); 1185 } 1186 } 1187 lwkt_reltoken(&bpf_token); 1188 } 1189 1190 /* 1191 * Copy data from an mbuf chain into a buffer. This code is derived 1192 * from m_copydata in sys/uipc_mbuf.c. 1193 */ 1194 static void 1195 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1196 { 1197 const struct mbuf *m; 1198 u_int count; 1199 u_char *dst; 1200 1201 m = src_arg; 1202 dst = dst_arg; 1203 while (len > 0) { 1204 if (m == NULL) 1205 panic("bpf_mcopy"); 1206 count = min(m->m_len, len); 1207 bcopy(mtod(m, void *), dst, count); 1208 m = m->m_next; 1209 dst += count; 1210 len -= count; 1211 } 1212 } 1213 1214 /* 1215 * Process the packet in the mbuf chain m. The packet is parsed by each 1216 * listener's filter, and if accepted, stashed into the corresponding 1217 * buffer. 1218 */ 1219 void 1220 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1221 { 1222 struct bpf_d *d; 1223 u_int pktlen, slen; 1224 struct timeval tv; 1225 int gottime = 0; 1226 1227 lwkt_gettoken(&bpf_token); 1228 /* Re-check */ 1229 if (bp == NULL) { 1230 lwkt_reltoken(&bpf_token); 1231 return; 1232 } 1233 1234 /* Don't compute pktlen, if no descriptor is attached. */ 1235 if (SLIST_EMPTY(&bp->bif_dlist)) { 1236 lwkt_reltoken(&bpf_token); 1237 return; 1238 } 1239 1240 pktlen = m_lengthm(m, NULL); 1241 1242 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1243 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1244 continue; 1245 ++d->bd_rcount; 1246 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1247 if (slen != 0) { 1248 if (!gottime) { 1249 microtime(&tv); 1250 gottime = 1; 1251 } 1252 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, 1253 &tv); 1254 } 1255 } 1256 lwkt_reltoken(&bpf_token); 1257 } 1258 1259 /* 1260 * Incoming linkage from device drivers, where we have a mbuf chain 1261 * but need to prepend some arbitrary header from a linear buffer. 1262 * 1263 * Con up a minimal dummy header to pacify bpf. Allocate (only) a 1264 * struct m_hdr on the stack. This is safe as bpf only reads from the 1265 * fields in this header that we initialize, and will not try to free 1266 * it or keep a pointer to it. 1267 */ 1268 void 1269 bpf_mtap_hdr(struct bpf_if *arg, caddr_t data, u_int dlen, struct mbuf *m, u_int direction) 1270 { 1271 struct m_hdr mh; 1272 1273 mh.mh_flags = 0; 1274 mh.mh_next = m; 1275 mh.mh_len = dlen; 1276 mh.mh_data = data; 1277 1278 return bpf_mtap(arg, (struct mbuf *) &mh); 1279 } 1280 1281 void 1282 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family) 1283 { 1284 u_int family4; 1285 1286 KKASSERT(family != AF_UNSPEC); 1287 1288 family4 = (u_int)family; 1289 bpf_ptap(bp, m, &family4, sizeof(family4)); 1290 } 1291 1292 /* 1293 * Process the packet in the mbuf chain m with the header in m prepended. 1294 * The packet is parsed by each listener's filter, and if accepted, 1295 * stashed into the corresponding buffer. 1296 */ 1297 void 1298 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1299 { 1300 struct mbuf mb; 1301 1302 /* 1303 * Craft on-stack mbuf suitable for passing to bpf_mtap. 1304 * Note that we cut corners here; we only setup what's 1305 * absolutely needed--this mbuf should never go anywhere else. 1306 */ 1307 mb.m_next = m; 1308 mb.m_data = __DECONST(void *, data); /* LINTED */ 1309 mb.m_len = dlen; 1310 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif; 1311 1312 bpf_mtap(bp, &mb); 1313 } 1314 1315 /* 1316 * Move the packet data from interface memory (pkt) into the 1317 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1318 * otherwise 0. "copy" is the routine called to do the actual data 1319 * transfer. bcopy is passed in to copy contiguous chunks, while 1320 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1321 * pkt is really an mbuf. 1322 */ 1323 static void 1324 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1325 void (*cpfn)(const void *, void *, size_t), 1326 const struct timeval *tv) 1327 { 1328 struct bpf_hdr *hp; 1329 int totlen, curlen; 1330 int hdrlen = d->bd_bif->bif_hdrlen; 1331 int wakeup = 0; 1332 /* 1333 * Figure out how many bytes to move. If the packet is 1334 * greater or equal to the snapshot length, transfer that 1335 * much. Otherwise, transfer the whole packet (unless 1336 * we hit the buffer size limit). 1337 */ 1338 totlen = hdrlen + min(snaplen, pktlen); 1339 if (totlen > d->bd_bufsize) 1340 totlen = d->bd_bufsize; 1341 1342 /* 1343 * Round up the end of the previous packet to the next longword. 1344 */ 1345 curlen = BPF_WORDALIGN(d->bd_slen); 1346 if (curlen + totlen > d->bd_bufsize) { 1347 /* 1348 * This packet will overflow the storage buffer. 1349 * Rotate the buffers if we can, then wakeup any 1350 * pending reads. 1351 */ 1352 if (d->bd_fbuf == NULL) { 1353 /* 1354 * We haven't completed the previous read yet, 1355 * so drop the packet. 1356 */ 1357 ++d->bd_dcount; 1358 return; 1359 } 1360 ROTATE_BUFFERS(d); 1361 wakeup = 1; 1362 curlen = 0; 1363 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { 1364 /* 1365 * Immediate mode is set, or the read timeout has 1366 * already expired during a select call. A packet 1367 * arrived, so the reader should be woken up. 1368 */ 1369 wakeup = 1; 1370 } 1371 1372 /* 1373 * Append the bpf header. 1374 */ 1375 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1376 hp->bh_tstamp = *tv; 1377 hp->bh_datalen = pktlen; 1378 hp->bh_hdrlen = hdrlen; 1379 /* 1380 * Copy the packet data into the store buffer and update its length. 1381 */ 1382 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1383 d->bd_slen = curlen + totlen; 1384 1385 if (wakeup) 1386 bpf_wakeup(d); 1387 } 1388 1389 /* 1390 * Initialize all nonzero fields of a descriptor. 1391 */ 1392 static int 1393 bpf_allocbufs(struct bpf_d *d) 1394 { 1395 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1396 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1397 d->bd_slen = 0; 1398 d->bd_hlen = 0; 1399 return(0); 1400 } 1401 1402 /* 1403 * Free buffers and packet filter program currently in use by a descriptor. 1404 * Called on close. 1405 */ 1406 static void 1407 bpf_freed(struct bpf_d *d) 1408 { 1409 /* 1410 * We don't need to lock out interrupts since this descriptor has 1411 * been detached from its interface and it yet hasn't been marked 1412 * free. 1413 */ 1414 if (d->bd_sbuf != NULL) { 1415 kfree(d->bd_sbuf, M_BPF); 1416 if (d->bd_hbuf != NULL) 1417 kfree(d->bd_hbuf, M_BPF); 1418 if (d->bd_fbuf != NULL) 1419 kfree(d->bd_fbuf, M_BPF); 1420 } 1421 if (d->bd_rfilter) 1422 kfree(d->bd_rfilter, M_BPF); 1423 if (d->bd_wfilter) 1424 kfree(d->bd_wfilter, M_BPF); 1425 } 1426 1427 /* 1428 * Attach an interface to bpf. ifp is a pointer to the structure 1429 * defining the interface to be attached, dlt is the link layer type, 1430 * and hdrlen is the fixed size of the link header (variable length 1431 * headers are not yet supported). 1432 */ 1433 void 1434 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1435 { 1436 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf); 1437 } 1438 1439 void 1440 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1441 { 1442 struct bpf_if *bp; 1443 1444 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO); 1445 1446 lwkt_gettoken(&bpf_token); 1447 1448 SLIST_INIT(&bp->bif_dlist); 1449 bp->bif_ifp = ifp; 1450 bp->bif_dlt = dlt; 1451 bp->bif_driverp = driverp; 1452 *bp->bif_driverp = NULL; 1453 1454 bp->bif_next = bpf_iflist; 1455 bpf_iflist = bp; 1456 1457 /* 1458 * Compute the length of the bpf header. This is not necessarily 1459 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1460 * that the network layer header begins on a longword boundary (for 1461 * performance reasons and to alleviate alignment restrictions). 1462 */ 1463 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1464 1465 lwkt_reltoken(&bpf_token); 1466 1467 if (bootverbose) 1468 if_printf(ifp, "bpf attached\n"); 1469 } 1470 1471 /* 1472 * Detach bpf from an interface. This involves detaching each descriptor 1473 * associated with the interface, and leaving bd_bif NULL. Notify each 1474 * descriptor as it's detached so that any sleepers wake up and get 1475 * ENXIO. 1476 */ 1477 void 1478 bpfdetach(struct ifnet *ifp) 1479 { 1480 struct bpf_if *bp, *bp_prev; 1481 struct bpf_d *d; 1482 1483 lwkt_gettoken(&bpf_token); 1484 1485 /* Locate BPF interface information */ 1486 bp_prev = NULL; 1487 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1488 if (ifp == bp->bif_ifp) 1489 break; 1490 bp_prev = bp; 1491 } 1492 1493 /* Interface wasn't attached */ 1494 if (bp->bif_ifp == NULL) { 1495 lwkt_reltoken(&bpf_token); 1496 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname); 1497 return; 1498 } 1499 1500 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) { 1501 bpf_detachd(d); 1502 bpf_wakeup(d); 1503 } 1504 1505 if (bp_prev != NULL) 1506 bp_prev->bif_next = bp->bif_next; 1507 else 1508 bpf_iflist = bp->bif_next; 1509 1510 kfree(bp, M_BPF); 1511 1512 lwkt_reltoken(&bpf_token); 1513 } 1514 1515 /* 1516 * Get a list of available data link type of the interface. 1517 */ 1518 static int 1519 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1520 { 1521 int n, error; 1522 struct ifnet *ifp; 1523 struct bpf_if *bp; 1524 1525 ifp = d->bd_bif->bif_ifp; 1526 n = 0; 1527 error = 0; 1528 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1529 if (bp->bif_ifp != ifp) 1530 continue; 1531 if (bfl->bfl_list != NULL) { 1532 if (n >= bfl->bfl_len) { 1533 return (ENOMEM); 1534 } 1535 error = copyout(&bp->bif_dlt, 1536 bfl->bfl_list + n, sizeof(u_int)); 1537 } 1538 n++; 1539 } 1540 bfl->bfl_len = n; 1541 return(error); 1542 } 1543 1544 /* 1545 * Set the data link type of a BPF instance. 1546 */ 1547 static int 1548 bpf_setdlt(struct bpf_d *d, u_int dlt) 1549 { 1550 int error, opromisc; 1551 struct ifnet *ifp; 1552 struct bpf_if *bp; 1553 1554 if (d->bd_bif->bif_dlt == dlt) 1555 return (0); 1556 ifp = d->bd_bif->bif_ifp; 1557 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1558 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1559 break; 1560 } 1561 if (bp != NULL) { 1562 opromisc = d->bd_promisc; 1563 bpf_detachd(d); 1564 bpf_attachd(d, bp); 1565 bpf_resetd(d); 1566 if (opromisc) { 1567 error = ifpromisc(bp->bif_ifp, 1); 1568 if (error) { 1569 if_printf(bp->bif_ifp, 1570 "bpf_setdlt: ifpromisc failed (%d)\n", 1571 error); 1572 } else { 1573 d->bd_promisc = 1; 1574 } 1575 } 1576 } 1577 return(bp == NULL ? EINVAL : 0); 1578 } 1579 1580 void 1581 bpf_gettoken(void) 1582 { 1583 lwkt_gettoken(&bpf_token); 1584 } 1585 1586 void 1587 bpf_reltoken(void) 1588 { 1589 lwkt_reltoken(&bpf_token); 1590 } 1591 1592 static void 1593 bpf_drvinit(void *unused) 1594 { 1595 int i; 1596 1597 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf), 1598 bpfclone, 0, 0, 0600, "bpf"); 1599 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) { 1600 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i); 1601 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i); 1602 } 1603 } 1604 1605 static void 1606 bpf_drvuninit(void *unused) 1607 { 1608 devfs_clone_handler_del("bpf"); 1609 dev_ops_remove_all(&bpf_ops); 1610 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf)); 1611 } 1612 1613 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1614 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL); 1615 1616 #else /* !BPF */ 1617 /* 1618 * NOP stubs to allow bpf-using drivers to load and function. 1619 * 1620 * A 'better' implementation would allow the core bpf functionality 1621 * to be loaded at runtime. 1622 */ 1623 1624 void 1625 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1626 { 1627 } 1628 1629 void 1630 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1631 { 1632 } 1633 1634 void 1635 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1636 { 1637 } 1638 1639 void 1640 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1641 { 1642 } 1643 1644 void 1645 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1646 { 1647 } 1648 1649 void 1650 bpfdetach(struct ifnet *ifp) 1651 { 1652 } 1653 1654 u_int 1655 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1656 { 1657 return -1; /* "no filter" behaviour */ 1658 } 1659 1660 void 1661 bpf_gettoken(void) 1662 { 1663 } 1664 1665 void 1666 bpf_reltoken(void) 1667 { 1668 } 1669 1670 #endif /* !BPF */ 1671