1 /* $NetBSD: bpf.c,v 1.107 2005/02/26 22:45:09 perry Exp $ */ 2 3 /* 4 * Copyright (c) 1990, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from the Stanford/CMU enet packet filter, 8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 10 * Berkeley Laboratory. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 37 * static char rcsid[] = 38 * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp "; 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.107 2005/02/26 22:45:09 perry Exp $"); 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/mbuf.h> 47 #include <sys/buf.h> 48 #include <sys/time.h> 49 #include <sys/proc.h> 50 #include <sys/user.h> 51 #include <sys/ioctl.h> 52 #include <sys/conf.h> 53 #include <sys/vnode.h> 54 #include <sys/queue.h> 55 56 #include <sys/file.h> 57 #include <sys/filedesc.h> 58 #include <sys/tty.h> 59 #include <sys/uio.h> 60 61 #include <sys/protosw.h> 62 #include <sys/socket.h> 63 #include <sys/errno.h> 64 #include <sys/kernel.h> 65 #include <sys/poll.h> 66 #include <sys/sysctl.h> 67 68 #include <net/if.h> 69 #include <net/slip.h> 70 71 #include <net/bpf.h> 72 #include <net/bpfdesc.h> 73 74 #include <net/if_arc.h> 75 #include <net/if_ether.h> 76 77 #include <netinet/in.h> 78 #include <netinet/if_inarp.h> 79 80 #if defined(_KERNEL_OPT) 81 #include "opt_bpf.h" 82 #include "sl.h" 83 #include "strip.h" 84 #endif 85 86 #ifndef BPF_BUFSIZE 87 /* 88 * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet 89 * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k). 90 */ 91 # define BPF_BUFSIZE 32768 92 #endif 93 94 #define PRINET 26 /* interruptible */ 95 96 /* 97 * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able. 98 * XXX the default values should be computed dynamically based 99 * on available memory size and available mbuf clusters. 100 */ 101 int bpf_bufsize = BPF_BUFSIZE; 102 int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ 103 104 /* 105 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 106 * bpf_dtab holds the descriptors, indexed by minor device # 107 */ 108 struct bpf_if *bpf_iflist; 109 LIST_HEAD(, bpf_d) bpf_list; 110 111 static int bpf_allocbufs(struct bpf_d *); 112 static void bpf_deliver(struct bpf_if *, 113 void *(*cpfn)(void *, const void *, size_t), 114 void *, u_int, u_int, struct ifnet *); 115 static void bpf_freed(struct bpf_d *); 116 static void bpf_ifname(struct ifnet *, struct ifreq *); 117 static void *bpf_mcpy(void *, const void *, size_t); 118 static int bpf_movein(struct uio *, int, int, 119 struct mbuf **, struct sockaddr *); 120 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 121 static void bpf_detachd(struct bpf_d *); 122 static int bpf_setif(struct bpf_d *, struct ifreq *); 123 static void bpf_timed_out(void *); 124 static __inline void 125 bpf_wakeup(struct bpf_d *); 126 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 127 void *(*)(void *, const void *, size_t)); 128 static void reset_d(struct bpf_d *); 129 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 130 static int bpf_setdlt(struct bpf_d *, u_int); 131 132 static int bpf_read(struct file *, off_t *, struct uio *, struct ucred *, 133 int); 134 static int bpf_write(struct file *, off_t *, struct uio *, struct ucred *, 135 int); 136 static int bpf_ioctl(struct file *, u_long, void *, struct proc *); 137 static int bpf_poll(struct file *, int, struct proc *); 138 static int bpf_close(struct file *, struct proc *); 139 static int bpf_kqfilter(struct file *, struct knote *); 140 141 static const struct fileops bpf_fileops = { 142 bpf_read, 143 bpf_write, 144 bpf_ioctl, 145 fnullop_fcntl, 146 bpf_poll, 147 fbadop_stat, 148 bpf_close, 149 bpf_kqfilter, 150 }; 151 152 dev_type_open(bpfopen); 153 154 const struct cdevsw bpf_cdevsw = { 155 bpfopen, noclose, noread, nowrite, noioctl, 156 nostop, notty, nopoll, nommap, nokqfilter, 157 }; 158 159 static int 160 bpf_movein(uio, linktype, mtu, mp, sockp) 161 struct uio *uio; 162 int linktype; 163 int mtu; 164 struct mbuf **mp; 165 struct sockaddr *sockp; 166 { 167 struct mbuf *m; 168 int error; 169 int len; 170 int hlen; 171 int align; 172 173 /* 174 * Build a sockaddr based on the data link layer type. 175 * We do this at this level because the ethernet header 176 * is copied directly into the data field of the sockaddr. 177 * In the case of SLIP, there is no header and the packet 178 * is forwarded as is. 179 * Also, we are careful to leave room at the front of the mbuf 180 * for the link level header. 181 */ 182 switch (linktype) { 183 184 case DLT_SLIP: 185 sockp->sa_family = AF_INET; 186 hlen = 0; 187 align = 0; 188 break; 189 190 case DLT_PPP: 191 sockp->sa_family = AF_UNSPEC; 192 hlen = 0; 193 align = 0; 194 break; 195 196 case DLT_EN10MB: 197 sockp->sa_family = AF_UNSPEC; 198 /* XXX Would MAXLINKHDR be better? */ 199 /* 6(dst)+6(src)+2(type) */ 200 hlen = sizeof(struct ether_header); 201 align = 2; 202 break; 203 204 case DLT_ARCNET: 205 sockp->sa_family = AF_UNSPEC; 206 hlen = ARC_HDRLEN; 207 align = 5; 208 break; 209 210 case DLT_FDDI: 211 sockp->sa_family = AF_LINK; 212 /* XXX 4(FORMAC)+6(dst)+6(src) */ 213 hlen = 16; 214 align = 0; 215 break; 216 217 case DLT_ECONET: 218 sockp->sa_family = AF_UNSPEC; 219 hlen = 6; 220 align = 2; 221 break; 222 223 case DLT_NULL: 224 sockp->sa_family = AF_UNSPEC; 225 hlen = 0; 226 align = 0; 227 break; 228 229 default: 230 return (EIO); 231 } 232 233 len = uio->uio_resid; 234 /* 235 * If there aren't enough bytes for a link level header or the 236 * packet length exceeds the interface mtu, return an error. 237 */ 238 if (len < hlen || len - hlen > mtu) 239 return (EMSGSIZE); 240 241 /* 242 * XXX Avoid complicated buffer chaining --- 243 * bail if it won't fit in a single mbuf. 244 * (Take into account possible alignment bytes) 245 */ 246 if ((unsigned)len > MCLBYTES - align) 247 return (EIO); 248 249 m = m_gethdr(M_WAIT, MT_DATA); 250 m->m_pkthdr.rcvif = 0; 251 m->m_pkthdr.len = len - hlen; 252 if (len > MHLEN - align) { 253 m_clget(m, M_WAIT); 254 if ((m->m_flags & M_EXT) == 0) { 255 error = ENOBUFS; 256 goto bad; 257 } 258 } 259 260 /* Insure the data is properly aligned */ 261 if (align > 0) { 262 m->m_data += align; 263 m->m_len -= align; 264 } 265 266 error = uiomove(mtod(m, void *), len, uio); 267 if (error) 268 goto bad; 269 if (hlen != 0) { 270 memcpy(sockp->sa_data, mtod(m, void *), hlen); 271 m->m_data += hlen; /* XXX */ 272 len -= hlen; 273 } 274 m->m_len = len; 275 *mp = m; 276 return (0); 277 278 bad: 279 m_freem(m); 280 return (error); 281 } 282 283 /* 284 * Attach file to the bpf interface, i.e. make d listen on bp. 285 * Must be called at splnet. 286 */ 287 static void 288 bpf_attachd(d, bp) 289 struct bpf_d *d; 290 struct bpf_if *bp; 291 { 292 /* 293 * Point d at bp, and add d to the interface's list of listeners. 294 * Finally, point the driver's bpf cookie at the interface so 295 * it will divert packets to bpf. 296 */ 297 d->bd_bif = bp; 298 d->bd_next = bp->bif_dlist; 299 bp->bif_dlist = d; 300 301 *bp->bif_driverp = bp; 302 } 303 304 /* 305 * Detach a file from its interface. 306 */ 307 static void 308 bpf_detachd(d) 309 struct bpf_d *d; 310 { 311 struct bpf_d **p; 312 struct bpf_if *bp; 313 314 bp = d->bd_bif; 315 /* 316 * Check if this descriptor had requested promiscuous mode. 317 * If so, turn it off. 318 */ 319 if (d->bd_promisc) { 320 int error; 321 322 d->bd_promisc = 0; 323 /* 324 * Take device out of promiscuous mode. Since we were 325 * able to enter promiscuous mode, we should be able 326 * to turn it off. But we can get an error if 327 * the interface was configured down, so only panic 328 * if we don't get an unexpected error. 329 */ 330 error = ifpromisc(bp->bif_ifp, 0); 331 if (error && error != EINVAL) 332 panic("bpf: ifpromisc failed"); 333 } 334 /* Remove d from the interface's descriptor list. */ 335 p = &bp->bif_dlist; 336 while (*p != d) { 337 p = &(*p)->bd_next; 338 if (*p == 0) 339 panic("bpf_detachd: descriptor not in list"); 340 } 341 *p = (*p)->bd_next; 342 if (bp->bif_dlist == 0) 343 /* 344 * Let the driver know that there are no more listeners. 345 */ 346 *d->bd_bif->bif_driverp = 0; 347 d->bd_bif = 0; 348 } 349 350 351 /* 352 * Mark a descriptor free by making it point to itself. 353 * This is probably cheaper than marking with a constant since 354 * the address should be in a register anyway. 355 */ 356 357 /* 358 * bpfilterattach() is called at boot time. 359 */ 360 /* ARGSUSED */ 361 void 362 bpfilterattach(n) 363 int n; 364 { 365 LIST_INIT(&bpf_list); 366 } 367 368 /* 369 * Open ethernet device. Clones. 370 */ 371 /* ARGSUSED */ 372 int 373 bpfopen(dev, flag, mode, p) 374 dev_t dev; 375 int flag; 376 int mode; 377 struct proc *p; 378 { 379 struct bpf_d *d; 380 struct file *fp; 381 int error, fd; 382 383 /* falloc() will use the descriptor for us. */ 384 if ((error = falloc(p, &fp, &fd)) != 0) 385 return error; 386 387 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK); 388 (void)memset(d, 0, sizeof(*d)); 389 d->bd_bufsize = bpf_bufsize; 390 d->bd_seesent = 1; 391 callout_init(&d->bd_callout); 392 393 LIST_INSERT_HEAD(&bpf_list, d, bd_list); 394 395 return fdclone(p, fp, fd, flag, &bpf_fileops, d); 396 } 397 398 /* 399 * Close the descriptor by detaching it from its interface, 400 * deallocating its buffers, and marking it free. 401 */ 402 /* ARGSUSED */ 403 static int 404 bpf_close(struct file *fp, struct proc *p) 405 { 406 struct bpf_d *d = fp->f_data; 407 int s; 408 409 s = splnet(); 410 if (d->bd_state == BPF_WAITING) 411 callout_stop(&d->bd_callout); 412 d->bd_state = BPF_IDLE; 413 if (d->bd_bif) 414 bpf_detachd(d); 415 splx(s); 416 bpf_freed(d); 417 LIST_REMOVE(d, bd_list); 418 free(d, M_DEVBUF); 419 fp->f_data = NULL; 420 421 return (0); 422 } 423 424 /* 425 * Rotate the packet buffers in descriptor d. Move the store buffer 426 * into the hold slot, and the free buffer into the store slot. 427 * Zero the length of the new store buffer. 428 */ 429 #define ROTATE_BUFFERS(d) \ 430 (d)->bd_hbuf = (d)->bd_sbuf; \ 431 (d)->bd_hlen = (d)->bd_slen; \ 432 (d)->bd_sbuf = (d)->bd_fbuf; \ 433 (d)->bd_slen = 0; \ 434 (d)->bd_fbuf = 0; 435 /* 436 * bpfread - read next chunk of packets from buffers 437 */ 438 static int 439 bpf_read(struct file *fp, off_t *offp, struct uio *uio, 440 struct ucred *cred, int flags) 441 { 442 struct bpf_d *d = fp->f_data; 443 int timed_out; 444 int error; 445 int s; 446 447 /* 448 * Restrict application to use a buffer the same size as 449 * as kernel buffers. 450 */ 451 if (uio->uio_resid != d->bd_bufsize) 452 return (EINVAL); 453 454 s = splnet(); 455 if (d->bd_state == BPF_WAITING) 456 callout_stop(&d->bd_callout); 457 timed_out = (d->bd_state == BPF_TIMED_OUT); 458 d->bd_state = BPF_IDLE; 459 /* 460 * If the hold buffer is empty, then do a timed sleep, which 461 * ends when the timeout expires or when enough packets 462 * have arrived to fill the store buffer. 463 */ 464 while (d->bd_hbuf == 0) { 465 if (fp->f_flag & FNONBLOCK) { 466 if (d->bd_slen == 0) { 467 splx(s); 468 return (EWOULDBLOCK); 469 } 470 ROTATE_BUFFERS(d); 471 break; 472 } 473 474 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 475 /* 476 * A packet(s) either arrived since the previous 477 * read or arrived while we were asleep. 478 * Rotate the buffers and return what's here. 479 */ 480 ROTATE_BUFFERS(d); 481 break; 482 } 483 error = tsleep(d, PRINET|PCATCH, "bpf", 484 d->bd_rtout); 485 if (error == EINTR || error == ERESTART) { 486 splx(s); 487 return (error); 488 } 489 if (error == EWOULDBLOCK) { 490 /* 491 * On a timeout, return what's in the buffer, 492 * which may be nothing. If there is something 493 * in the store buffer, we can rotate the buffers. 494 */ 495 if (d->bd_hbuf) 496 /* 497 * We filled up the buffer in between 498 * getting the timeout and arriving 499 * here, so we don't need to rotate. 500 */ 501 break; 502 503 if (d->bd_slen == 0) { 504 splx(s); 505 return (0); 506 } 507 ROTATE_BUFFERS(d); 508 break; 509 } 510 if (error != 0) 511 goto done; 512 } 513 /* 514 * At this point, we know we have something in the hold slot. 515 */ 516 splx(s); 517 518 /* 519 * Move data from hold buffer into user space. 520 * We know the entire buffer is transferred since 521 * we checked above that the read buffer is bpf_bufsize bytes. 522 */ 523 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 524 525 s = splnet(); 526 d->bd_fbuf = d->bd_hbuf; 527 d->bd_hbuf = 0; 528 d->bd_hlen = 0; 529 done: 530 splx(s); 531 return (error); 532 } 533 534 535 /* 536 * If there are processes sleeping on this descriptor, wake them up. 537 */ 538 static __inline void 539 bpf_wakeup(d) 540 struct bpf_d *d; 541 { 542 wakeup(d); 543 if (d->bd_async) 544 fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); 545 546 selnotify(&d->bd_sel, 0); 547 /* XXX */ 548 d->bd_sel.sel_pid = 0; 549 } 550 551 552 static void 553 bpf_timed_out(arg) 554 void *arg; 555 { 556 struct bpf_d *d = arg; 557 int s; 558 559 s = splnet(); 560 if (d->bd_state == BPF_WAITING) { 561 d->bd_state = BPF_TIMED_OUT; 562 if (d->bd_slen != 0) 563 bpf_wakeup(d); 564 } 565 splx(s); 566 } 567 568 569 static int 570 bpf_write(struct file *fp, off_t *offp, struct uio *uio, 571 struct ucred *cred, int flags) 572 { 573 struct bpf_d *d = fp->f_data; 574 struct ifnet *ifp; 575 struct mbuf *m; 576 int error, s; 577 static struct sockaddr_storage dst; 578 579 if (d->bd_bif == 0) 580 return (ENXIO); 581 582 ifp = d->bd_bif->bif_ifp; 583 584 if (uio->uio_resid == 0) 585 return (0); 586 587 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, 588 (struct sockaddr *) &dst); 589 if (error) 590 return (error); 591 592 if (m->m_pkthdr.len > ifp->if_mtu) 593 return (EMSGSIZE); 594 595 if (d->bd_hdrcmplt) 596 dst.ss_family = pseudo_AF_HDRCMPLT; 597 598 s = splsoftnet(); 599 error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL); 600 splx(s); 601 /* 602 * The driver frees the mbuf. 603 */ 604 return (error); 605 } 606 607 /* 608 * Reset a descriptor by flushing its packet buffer and clearing the 609 * receive and drop counts. Should be called at splnet. 610 */ 611 static void 612 reset_d(d) 613 struct bpf_d *d; 614 { 615 if (d->bd_hbuf) { 616 /* Free the hold buffer. */ 617 d->bd_fbuf = d->bd_hbuf; 618 d->bd_hbuf = 0; 619 } 620 d->bd_slen = 0; 621 d->bd_hlen = 0; 622 d->bd_rcount = 0; 623 d->bd_dcount = 0; 624 d->bd_ccount = 0; 625 } 626 627 #ifdef BPF_KERN_FILTER 628 extern struct bpf_insn *bpf_tcp_filter; 629 extern struct bpf_insn *bpf_udp_filter; 630 #endif 631 632 /* 633 * FIONREAD Check for read packet available. 634 * BIOCGBLEN Get buffer len [for read()]. 635 * BIOCSETF Set ethernet read filter. 636 * BIOCFLUSH Flush read packet buffer. 637 * BIOCPROMISC Put interface into promiscuous mode. 638 * BIOCGDLT Get link layer type. 639 * BIOCGETIF Get interface name. 640 * BIOCSETIF Set interface. 641 * BIOCSRTIMEOUT Set read timeout. 642 * BIOCGRTIMEOUT Get read timeout. 643 * BIOCGSTATS Get packet stats. 644 * BIOCIMMEDIATE Set immediate mode. 645 * BIOCVERSION Get filter language version. 646 * BIOGHDRCMPLT Get "header already complete" flag. 647 * BIOSHDRCMPLT Set "header already complete" flag. 648 */ 649 /* ARGSUSED */ 650 static int 651 bpf_ioctl(struct file *fp, u_long cmd, void *addr, struct proc *p) 652 { 653 struct bpf_d *d = fp->f_data; 654 int s, error = 0; 655 #ifdef BPF_KERN_FILTER 656 struct bpf_insn **p; 657 #endif 658 659 s = splnet(); 660 if (d->bd_state == BPF_WAITING) 661 callout_stop(&d->bd_callout); 662 d->bd_state = BPF_IDLE; 663 splx(s); 664 665 switch (cmd) { 666 667 default: 668 error = EINVAL; 669 break; 670 671 /* 672 * Check for read packet available. 673 */ 674 case FIONREAD: 675 { 676 int n; 677 678 s = splnet(); 679 n = d->bd_slen; 680 if (d->bd_hbuf) 681 n += d->bd_hlen; 682 splx(s); 683 684 *(int *)addr = n; 685 break; 686 } 687 688 /* 689 * Get buffer len [for read()]. 690 */ 691 case BIOCGBLEN: 692 *(u_int *)addr = d->bd_bufsize; 693 break; 694 695 /* 696 * Set buffer length. 697 */ 698 case BIOCSBLEN: 699 if (d->bd_bif != 0) 700 error = EINVAL; 701 else { 702 u_int size = *(u_int *)addr; 703 704 if (size > bpf_maxbufsize) 705 *(u_int *)addr = size = bpf_maxbufsize; 706 else if (size < BPF_MINBUFSIZE) 707 *(u_int *)addr = size = BPF_MINBUFSIZE; 708 d->bd_bufsize = size; 709 } 710 break; 711 712 /* 713 * Set link layer read filter. 714 */ 715 case BIOCSETF: 716 error = bpf_setf(d, addr); 717 break; 718 719 #ifdef BPF_KERN_FILTER 720 /* 721 * Set TCP or UDP reject filter. 722 */ 723 case BIOCSTCPF: 724 case BIOCSUDPF: 725 if (!suser()) { 726 error = EPERM; 727 break; 728 } 729 730 /* Validate and store filter */ 731 error = bpf_setf(d, addr); 732 733 /* Free possible old filter */ 734 if (cmd == BIOCSTCPF) 735 p = &bpf_tcp_filter; 736 else 737 p = &bpf_udp_filter; 738 if (*p != NULL) 739 free(*p, M_DEVBUF); 740 741 /* Steal new filter (noop if error) */ 742 s = splnet(); 743 *p = d->bd_filter; 744 d->bd_filter = NULL; 745 splx(s); 746 break; 747 #endif 748 749 /* 750 * Flush read packet buffer. 751 */ 752 case BIOCFLUSH: 753 s = splnet(); 754 reset_d(d); 755 splx(s); 756 break; 757 758 /* 759 * Put interface into promiscuous mode. 760 */ 761 case BIOCPROMISC: 762 if (d->bd_bif == 0) { 763 /* 764 * No interface attached yet. 765 */ 766 error = EINVAL; 767 break; 768 } 769 s = splnet(); 770 if (d->bd_promisc == 0) { 771 error = ifpromisc(d->bd_bif->bif_ifp, 1); 772 if (error == 0) 773 d->bd_promisc = 1; 774 } 775 splx(s); 776 break; 777 778 /* 779 * Get device parameters. 780 */ 781 case BIOCGDLT: 782 if (d->bd_bif == 0) 783 error = EINVAL; 784 else 785 *(u_int *)addr = d->bd_bif->bif_dlt; 786 break; 787 788 /* 789 * Get a list of supported device parameters. 790 */ 791 case BIOCGDLTLIST: 792 if (d->bd_bif == 0) 793 error = EINVAL; 794 else 795 error = bpf_getdltlist(d, addr); 796 break; 797 798 /* 799 * Set device parameters. 800 */ 801 case BIOCSDLT: 802 if (d->bd_bif == 0) 803 error = EINVAL; 804 else 805 error = bpf_setdlt(d, *(u_int *)addr); 806 break; 807 808 /* 809 * Set interface name. 810 */ 811 case BIOCGETIF: 812 if (d->bd_bif == 0) 813 error = EINVAL; 814 else 815 bpf_ifname(d->bd_bif->bif_ifp, addr); 816 break; 817 818 /* 819 * Set interface. 820 */ 821 case BIOCSETIF: 822 error = bpf_setif(d, addr); 823 break; 824 825 /* 826 * Set read timeout. 827 */ 828 case BIOCSRTIMEOUT: 829 { 830 struct timeval *tv = addr; 831 832 /* Compute number of ticks. */ 833 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; 834 if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) 835 d->bd_rtout = 1; 836 break; 837 } 838 839 /* 840 * Get read timeout. 841 */ 842 case BIOCGRTIMEOUT: 843 { 844 struct timeval *tv = addr; 845 846 tv->tv_sec = d->bd_rtout / hz; 847 tv->tv_usec = (d->bd_rtout % hz) * tick; 848 break; 849 } 850 851 /* 852 * Get packet stats. 853 */ 854 case BIOCGSTATS: 855 { 856 struct bpf_stat *bs = addr; 857 858 bs->bs_recv = d->bd_rcount; 859 bs->bs_drop = d->bd_dcount; 860 bs->bs_capt = d->bd_ccount; 861 break; 862 } 863 864 case BIOCGSTATSOLD: 865 { 866 struct bpf_stat_old *bs = addr; 867 868 bs->bs_recv = d->bd_rcount; 869 bs->bs_drop = d->bd_dcount; 870 break; 871 } 872 873 /* 874 * Set immediate mode. 875 */ 876 case BIOCIMMEDIATE: 877 d->bd_immediate = *(u_int *)addr; 878 break; 879 880 case BIOCVERSION: 881 { 882 struct bpf_version *bv = addr; 883 884 bv->bv_major = BPF_MAJOR_VERSION; 885 bv->bv_minor = BPF_MINOR_VERSION; 886 break; 887 } 888 889 case BIOCGHDRCMPLT: /* get "header already complete" flag */ 890 *(u_int *)addr = d->bd_hdrcmplt; 891 break; 892 893 case BIOCSHDRCMPLT: /* set "header already complete" flag */ 894 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 895 break; 896 897 /* 898 * Get "see sent packets" flag 899 */ 900 case BIOCGSEESENT: 901 *(u_int *)addr = d->bd_seesent; 902 break; 903 904 /* 905 * Set "see sent" packets flag 906 */ 907 case BIOCSSEESENT: 908 d->bd_seesent = *(u_int *)addr; 909 break; 910 911 case FIONBIO: /* Non-blocking I/O */ 912 /* 913 * No need to do anything special as we use IO_NDELAY in 914 * bpfread() as an indication of whether or not to block 915 * the read. 916 */ 917 break; 918 919 case FIOASYNC: /* Send signal on receive packets */ 920 d->bd_async = *(int *)addr; 921 break; 922 923 case TIOCSPGRP: /* Process or group to send signals to */ 924 case FIOSETOWN: 925 error = fsetown(p, &d->bd_pgid, cmd, addr); 926 break; 927 928 case TIOCGPGRP: 929 case FIOGETOWN: 930 error = fgetown(p, d->bd_pgid, cmd, addr); 931 break; 932 } 933 return (error); 934 } 935 936 /* 937 * Set d's packet filter program to fp. If this file already has a filter, 938 * free it and replace it. Returns EINVAL for bogus requests. 939 */ 940 int 941 bpf_setf(struct bpf_d *d, struct bpf_program *fp) 942 { 943 struct bpf_insn *fcode, *old; 944 u_int flen, size; 945 int s; 946 947 old = d->bd_filter; 948 if (fp->bf_insns == 0) { 949 if (fp->bf_len != 0) 950 return (EINVAL); 951 s = splnet(); 952 d->bd_filter = 0; 953 reset_d(d); 954 splx(s); 955 if (old != 0) 956 free(old, M_DEVBUF); 957 return (0); 958 } 959 flen = fp->bf_len; 960 if (flen > BPF_MAXINSNS) 961 return (EINVAL); 962 963 size = flen * sizeof(*fp->bf_insns); 964 fcode = malloc(size, M_DEVBUF, M_WAITOK); 965 if (copyin(fp->bf_insns, fcode, size) == 0 && 966 bpf_validate(fcode, (int)flen)) { 967 s = splnet(); 968 d->bd_filter = fcode; 969 reset_d(d); 970 splx(s); 971 if (old != 0) 972 free(old, M_DEVBUF); 973 974 return (0); 975 } 976 free(fcode, M_DEVBUF); 977 return (EINVAL); 978 } 979 980 /* 981 * Detach a file from its current interface (if attached at all) and attach 982 * to the interface indicated by the name stored in ifr. 983 * Return an errno or 0. 984 */ 985 static int 986 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 987 { 988 struct bpf_if *bp; 989 char *cp; 990 int unit_seen, i, s, error; 991 992 /* 993 * Make sure the provided name has a unit number, and default 994 * it to '0' if not specified. 995 * XXX This is ugly ... do this differently? 996 */ 997 unit_seen = 0; 998 cp = ifr->ifr_name; 999 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */ 1000 while (*cp++) 1001 if (*cp >= '0' && *cp <= '9') 1002 unit_seen = 1; 1003 if (!unit_seen) { 1004 /* Make sure to leave room for the '\0'. */ 1005 for (i = 0; i < (IFNAMSIZ - 1); ++i) { 1006 if ((ifr->ifr_name[i] >= 'a' && 1007 ifr->ifr_name[i] <= 'z') || 1008 (ifr->ifr_name[i] >= 'A' && 1009 ifr->ifr_name[i] <= 'Z')) 1010 continue; 1011 ifr->ifr_name[i] = '0'; 1012 } 1013 } 1014 1015 /* 1016 * Look through attached interfaces for the named one. 1017 */ 1018 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 1019 struct ifnet *ifp = bp->bif_ifp; 1020 1021 if (ifp == 0 || 1022 strcmp(ifp->if_xname, ifr->ifr_name) != 0) 1023 continue; 1024 /* skip additional entry */ 1025 if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf) 1026 continue; 1027 /* 1028 * We found the requested interface. 1029 * Allocate the packet buffers if we need to. 1030 * If we're already attached to requested interface, 1031 * just flush the buffer. 1032 */ 1033 if (d->bd_sbuf == 0) { 1034 error = bpf_allocbufs(d); 1035 if (error != 0) 1036 return (error); 1037 } 1038 s = splnet(); 1039 if (bp != d->bd_bif) { 1040 if (d->bd_bif) 1041 /* 1042 * Detach if attached to something else. 1043 */ 1044 bpf_detachd(d); 1045 1046 bpf_attachd(d, bp); 1047 } 1048 reset_d(d); 1049 splx(s); 1050 return (0); 1051 } 1052 /* Not found. */ 1053 return (ENXIO); 1054 } 1055 1056 /* 1057 * Copy the interface name to the ifreq. 1058 */ 1059 static void 1060 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr) 1061 { 1062 memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); 1063 } 1064 1065 /* 1066 * Support for poll() system call 1067 * 1068 * Return true iff the specific operation will not block indefinitely - with 1069 * the assumption that it is safe to positively acknowledge a request for the 1070 * ability to write to the BPF device. 1071 * Otherwise, return false but make a note that a selwakeup() must be done. 1072 */ 1073 static int 1074 bpf_poll(struct file *fp, int events, struct proc *p) 1075 { 1076 struct bpf_d *d = fp->f_data; 1077 int s = splnet(); 1078 int revents; 1079 1080 revents = events & (POLLOUT | POLLWRNORM); 1081 if (events & (POLLIN | POLLRDNORM)) { 1082 /* 1083 * An imitation of the FIONREAD ioctl code. 1084 */ 1085 if ((d->bd_hlen != 0) || 1086 (d->bd_immediate && d->bd_slen != 0)) { 1087 revents |= events & (POLLIN | POLLRDNORM); 1088 } else if (d->bd_state == BPF_TIMED_OUT) { 1089 if (d->bd_slen != 0) 1090 revents |= events & (POLLIN | POLLRDNORM); 1091 else 1092 revents |= events & POLLIN; 1093 } else { 1094 selrecord(p, &d->bd_sel); 1095 /* Start the read timeout if necessary */ 1096 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1097 callout_reset(&d->bd_callout, d->bd_rtout, 1098 bpf_timed_out, d); 1099 d->bd_state = BPF_WAITING; 1100 } 1101 } 1102 } 1103 1104 splx(s); 1105 return (revents); 1106 } 1107 1108 static void 1109 filt_bpfrdetach(struct knote *kn) 1110 { 1111 struct bpf_d *d = kn->kn_hook; 1112 int s; 1113 1114 s = splnet(); 1115 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext); 1116 splx(s); 1117 } 1118 1119 static int 1120 filt_bpfread(struct knote *kn, long hint) 1121 { 1122 struct bpf_d *d = kn->kn_hook; 1123 1124 kn->kn_data = d->bd_hlen; 1125 if (d->bd_immediate) 1126 kn->kn_data += d->bd_slen; 1127 return (kn->kn_data > 0); 1128 } 1129 1130 static const struct filterops bpfread_filtops = 1131 { 1, NULL, filt_bpfrdetach, filt_bpfread }; 1132 1133 static int 1134 bpf_kqfilter(struct file *fp, struct knote *kn) 1135 { 1136 struct bpf_d *d = fp->f_data; 1137 struct klist *klist; 1138 int s; 1139 1140 switch (kn->kn_filter) { 1141 case EVFILT_READ: 1142 klist = &d->bd_sel.sel_klist; 1143 kn->kn_fop = &bpfread_filtops; 1144 break; 1145 1146 default: 1147 return (1); 1148 } 1149 1150 kn->kn_hook = d; 1151 1152 s = splnet(); 1153 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 1154 splx(s); 1155 1156 return (0); 1157 } 1158 1159 /* 1160 * Incoming linkage from device drivers. Process the packet pkt, of length 1161 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1162 * by each process' filter, and if accepted, stashed into the corresponding 1163 * buffer. 1164 */ 1165 void 1166 bpf_tap(void *arg, u_char *pkt, u_int pktlen) 1167 { 1168 struct bpf_if *bp; 1169 struct bpf_d *d; 1170 u_int slen; 1171 /* 1172 * Note that the ipl does not have to be raised at this point. 1173 * The only problem that could arise here is that if two different 1174 * interfaces shared any data. This is not the case. 1175 */ 1176 bp = arg; 1177 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1178 ++d->bd_rcount; 1179 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1180 if (slen != 0) 1181 catchpacket(d, pkt, pktlen, slen, memcpy); 1182 } 1183 } 1184 1185 /* 1186 * Copy data from an mbuf chain into a buffer. This code is derived 1187 * from m_copydata in sys/uipc_mbuf.c. 1188 */ 1189 static void * 1190 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len) 1191 { 1192 const struct mbuf *m; 1193 u_int count; 1194 u_char *dst; 1195 1196 m = src_arg; 1197 dst = dst_arg; 1198 while (len > 0) { 1199 if (m == 0) 1200 panic("bpf_mcpy"); 1201 count = min(m->m_len, len); 1202 memcpy(dst, mtod(m, void *), count); 1203 m = m->m_next; 1204 dst += count; 1205 len -= count; 1206 } 1207 return (dst_arg); 1208 } 1209 1210 /* 1211 * Dispatch a packet to all the listeners on interface bp. 1212 * 1213 * marg pointer to the packet, either a data buffer or an mbuf chain 1214 * buflen buffer length, if marg is a data buffer 1215 * cpfn a function that can copy marg into the listener's buffer 1216 * pktlen length of the packet 1217 * rcvif either NULL or the interface the packet came in on. 1218 */ 1219 static __inline void 1220 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t), 1221 void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif) 1222 { 1223 u_int slen; 1224 struct bpf_d *d; 1225 1226 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1227 if (!d->bd_seesent && (rcvif == NULL)) 1228 continue; 1229 ++d->bd_rcount; 1230 slen = bpf_filter(d->bd_filter, marg, pktlen, buflen); 1231 if (slen != 0) 1232 catchpacket(d, marg, pktlen, slen, cpfn); 1233 } 1234 } 1235 1236 /* 1237 * Incoming linkage from device drivers, when the head of the packet is in 1238 * a buffer, and the tail is in an mbuf chain. 1239 */ 1240 void 1241 bpf_mtap2(void *arg, void *data, u_int dlen, struct mbuf *m) 1242 { 1243 struct bpf_if *bp = arg; 1244 u_int pktlen; 1245 struct mbuf mb; 1246 1247 pktlen = m_length(m) + dlen; 1248 1249 /* 1250 * Craft on-stack mbuf suitable for passing to bpf_filter. 1251 * Note that we cut corners here; we only setup what's 1252 * absolutely needed--this mbuf should never go anywhere else. 1253 */ 1254 (void)memset(&mb, 0, sizeof(mb)); 1255 mb.m_next = m; 1256 mb.m_data = data; 1257 mb.m_len = dlen; 1258 1259 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif); 1260 } 1261 1262 /* 1263 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1264 */ 1265 void 1266 bpf_mtap(void *arg, struct mbuf *m) 1267 { 1268 void *(*cpfn)(void *, const void *, size_t); 1269 struct bpf_if *bp = arg; 1270 u_int pktlen, buflen; 1271 void *marg; 1272 1273 pktlen = m_length(m); 1274 1275 if (pktlen == m->m_len) { 1276 cpfn = memcpy; 1277 marg = mtod(m, void *); 1278 buflen = pktlen; 1279 } else { 1280 cpfn = bpf_mcpy; 1281 marg = m; 1282 buflen = 0; 1283 } 1284 1285 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif); 1286 } 1287 1288 /* 1289 * We need to prepend the address family as 1290 * a four byte field. Cons up a dummy header 1291 * to pacify bpf. This is safe because bpf 1292 * will only read from the mbuf (i.e., it won't 1293 * try to free it or keep a pointer a to it). 1294 */ 1295 void 1296 bpf_mtap_af(void *arg, u_int32_t af, struct mbuf *m) 1297 { 1298 struct mbuf m0; 1299 1300 m0.m_flags = 0; 1301 m0.m_next = m; 1302 m0.m_len = 4; 1303 m0.m_data = (char *)⁡ 1304 1305 bpf_mtap(arg, &m0); 1306 } 1307 1308 void 1309 bpf_mtap_et(void *arg, u_int16_t et, struct mbuf *m) 1310 { 1311 struct mbuf m0; 1312 1313 m0.m_flags = 0; 1314 m0.m_next = m; 1315 m0.m_len = 14; 1316 m0.m_data = m0.m_dat; 1317 1318 ((u_int32_t *)m0.m_data)[0] = 0; 1319 ((u_int32_t *)m0.m_data)[1] = 0; 1320 ((u_int32_t *)m0.m_data)[2] = 0; 1321 ((u_int16_t *)m0.m_data)[6] = et; 1322 1323 bpf_mtap(arg, &m0); 1324 } 1325 1326 #if NSL > 0 || NSTRIP > 0 1327 /* 1328 * Put the SLIP pseudo-"link header" in place. 1329 * Note this M_PREPEND() should never fail, 1330 * swince we know we always have enough space 1331 * in the input buffer. 1332 */ 1333 void 1334 bpf_mtap_sl_in(void *arg, u_char *chdr, struct mbuf **m) 1335 { 1336 int s; 1337 u_char *hp; 1338 1339 M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT); 1340 if (*m == NULL) 1341 return; 1342 1343 hp = mtod(*m, u_char *); 1344 hp[SLX_DIR] = SLIPDIR_IN; 1345 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN); 1346 1347 s = splnet(); 1348 bpf_mtap(arg, *m); 1349 splx(s); 1350 1351 m_adj(*m, SLIP_HDRLEN); 1352 } 1353 1354 /* 1355 * Put the SLIP pseudo-"link header" in 1356 * place. The compressed header is now 1357 * at the beginning of the mbuf. 1358 */ 1359 void 1360 bpf_mtap_sl_out(void *arg, u_char *chdr, struct mbuf *m) 1361 { 1362 struct mbuf m0; 1363 u_char *hp; 1364 int s; 1365 1366 m0.m_flags = 0; 1367 m0.m_next = m; 1368 m0.m_data = m0.m_dat; 1369 m0.m_len = SLIP_HDRLEN; 1370 1371 hp = mtod(&m0, u_char *); 1372 1373 hp[SLX_DIR] = SLIPDIR_OUT; 1374 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN); 1375 1376 s = splnet(); 1377 bpf_mtap(arg, &m0); 1378 splx(s); 1379 m_freem(m); 1380 } 1381 #endif 1382 1383 /* 1384 * Move the packet data from interface memory (pkt) into the 1385 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1386 * otherwise 0. "copy" is the routine called to do the actual data 1387 * transfer. memcpy is passed in to copy contiguous chunks, while 1388 * bpf_mcpy is passed in to copy mbuf chains. In the latter case, 1389 * pkt is really an mbuf. 1390 */ 1391 static void 1392 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1393 void *(*cpfn)(void *, const void *, size_t)) 1394 { 1395 struct bpf_hdr *hp; 1396 int totlen, curlen; 1397 int hdrlen = d->bd_bif->bif_hdrlen; 1398 1399 ++d->bd_ccount; 1400 /* 1401 * Figure out how many bytes to move. If the packet is 1402 * greater or equal to the snapshot length, transfer that 1403 * much. Otherwise, transfer the whole packet (unless 1404 * we hit the buffer size limit). 1405 */ 1406 totlen = hdrlen + min(snaplen, pktlen); 1407 if (totlen > d->bd_bufsize) 1408 totlen = d->bd_bufsize; 1409 1410 /* 1411 * Round up the end of the previous packet to the next longword. 1412 */ 1413 curlen = BPF_WORDALIGN(d->bd_slen); 1414 if (curlen + totlen > d->bd_bufsize) { 1415 /* 1416 * This packet will overflow the storage buffer. 1417 * Rotate the buffers if we can, then wakeup any 1418 * pending reads. 1419 */ 1420 if (d->bd_fbuf == 0) { 1421 /* 1422 * We haven't completed the previous read yet, 1423 * so drop the packet. 1424 */ 1425 ++d->bd_dcount; 1426 return; 1427 } 1428 ROTATE_BUFFERS(d); 1429 bpf_wakeup(d); 1430 curlen = 0; 1431 } 1432 1433 /* 1434 * Append the bpf header. 1435 */ 1436 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1437 microtime(&hp->bh_tstamp); 1438 hp->bh_datalen = pktlen; 1439 hp->bh_hdrlen = hdrlen; 1440 /* 1441 * Copy the packet data into the store buffer and update its length. 1442 */ 1443 (*cpfn)((u_char *)hp + hdrlen, pkt, (hp->bh_caplen = totlen - hdrlen)); 1444 d->bd_slen = curlen + totlen; 1445 1446 /* 1447 * Call bpf_wakeup after bd_slen has been updated so that kevent(2) 1448 * will cause filt_bpfread() to be called with it adjusted. 1449 */ 1450 if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1451 /* 1452 * Immediate mode is set, or the read timeout has 1453 * already expired during a select call. A packet 1454 * arrived, so the reader should be woken up. 1455 */ 1456 bpf_wakeup(d); 1457 } 1458 1459 /* 1460 * Initialize all nonzero fields of a descriptor. 1461 */ 1462 static int 1463 bpf_allocbufs(struct bpf_d *d) 1464 { 1465 1466 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); 1467 if (!d->bd_fbuf) 1468 return (ENOBUFS); 1469 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); 1470 if (!d->bd_sbuf) { 1471 free(d->bd_fbuf, M_DEVBUF); 1472 return (ENOBUFS); 1473 } 1474 d->bd_slen = 0; 1475 d->bd_hlen = 0; 1476 return (0); 1477 } 1478 1479 /* 1480 * Free buffers currently in use by a descriptor. 1481 * Called on close. 1482 */ 1483 static void 1484 bpf_freed(struct bpf_d *d) 1485 { 1486 /* 1487 * We don't need to lock out interrupts since this descriptor has 1488 * been detached from its interface and it yet hasn't been marked 1489 * free. 1490 */ 1491 if (d->bd_sbuf != 0) { 1492 free(d->bd_sbuf, M_DEVBUF); 1493 if (d->bd_hbuf != 0) 1494 free(d->bd_hbuf, M_DEVBUF); 1495 if (d->bd_fbuf != 0) 1496 free(d->bd_fbuf, M_DEVBUF); 1497 } 1498 if (d->bd_filter) 1499 free(d->bd_filter, M_DEVBUF); 1500 } 1501 1502 /* 1503 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1504 * fixed size of the link header (variable length headers not yet supported). 1505 */ 1506 void 1507 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1508 { 1509 1510 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1511 } 1512 1513 /* 1514 * Attach additional dlt for a interface to bpf. dlt is the link layer type; 1515 * hdrlen is the fixed size of the link header for the specified dlt 1516 * (variable length headers not yet supported). 1517 */ 1518 void 1519 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, void *driverp) 1520 { 1521 struct bpf_if *bp; 1522 bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1523 if (bp == 0) 1524 panic("bpfattach"); 1525 1526 bp->bif_dlist = 0; 1527 bp->bif_driverp = driverp; 1528 bp->bif_ifp = ifp; 1529 bp->bif_dlt = dlt; 1530 1531 bp->bif_next = bpf_iflist; 1532 bpf_iflist = bp; 1533 1534 *bp->bif_driverp = 0; 1535 1536 /* 1537 * Compute the length of the bpf header. This is not necessarily 1538 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1539 * that the network layer header begins on a longword boundary (for 1540 * performance reasons and to alleviate alignment restrictions). 1541 */ 1542 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1543 1544 #if 0 1545 printf("bpf: %s attached\n", ifp->if_xname); 1546 #endif 1547 } 1548 1549 /* 1550 * Remove an interface from bpf. 1551 */ 1552 void 1553 bpfdetach(struct ifnet *ifp) 1554 { 1555 struct bpf_if *bp, **pbp; 1556 struct bpf_d *d; 1557 int s; 1558 1559 /* Nuke the vnodes for any open instances */ 1560 for (d = LIST_FIRST(&bpf_list); d != NULL; d = LIST_NEXT(d, bd_list)) { 1561 if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) { 1562 /* 1563 * Detach the descriptor from an interface now. 1564 * It will be free'ed later by close routine. 1565 */ 1566 s = splnet(); 1567 d->bd_promisc = 0; /* we can't touch device. */ 1568 bpf_detachd(d); 1569 splx(s); 1570 } 1571 } 1572 1573 again: 1574 for (bp = bpf_iflist, pbp = &bpf_iflist; 1575 bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) { 1576 if (bp->bif_ifp == ifp) { 1577 *pbp = bp->bif_next; 1578 free(bp, M_DEVBUF); 1579 goto again; 1580 } 1581 } 1582 } 1583 1584 /* 1585 * Change the data link type of a interface. 1586 */ 1587 void 1588 bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1589 { 1590 struct bpf_if *bp; 1591 1592 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1593 if (bp->bif_driverp == (struct bpf_if **)&ifp->if_bpf) 1594 break; 1595 } 1596 if (bp == NULL) 1597 panic("bpf_change_type"); 1598 1599 bp->bif_dlt = dlt; 1600 1601 /* 1602 * Compute the length of the bpf header. This is not necessarily 1603 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1604 * that the network layer header begins on a longword boundary (for 1605 * performance reasons and to alleviate alignment restrictions). 1606 */ 1607 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1608 } 1609 1610 /* 1611 * Get a list of available data link type of the interface. 1612 */ 1613 static int 1614 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1615 { 1616 int n, error; 1617 struct ifnet *ifp; 1618 struct bpf_if *bp; 1619 1620 ifp = d->bd_bif->bif_ifp; 1621 n = 0; 1622 error = 0; 1623 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1624 if (bp->bif_ifp != ifp) 1625 continue; 1626 if (bfl->bfl_list != NULL) { 1627 if (n >= bfl->bfl_len) 1628 return ENOMEM; 1629 error = copyout(&bp->bif_dlt, 1630 bfl->bfl_list + n, sizeof(u_int)); 1631 } 1632 n++; 1633 } 1634 bfl->bfl_len = n; 1635 return error; 1636 } 1637 1638 /* 1639 * Set the data link type of a BPF instance. 1640 */ 1641 static int 1642 bpf_setdlt(struct bpf_d *d, u_int dlt) 1643 { 1644 int s, error, opromisc; 1645 struct ifnet *ifp; 1646 struct bpf_if *bp; 1647 1648 if (d->bd_bif->bif_dlt == dlt) 1649 return 0; 1650 ifp = d->bd_bif->bif_ifp; 1651 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1652 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1653 break; 1654 } 1655 if (bp == NULL) 1656 return EINVAL; 1657 s = splnet(); 1658 opromisc = d->bd_promisc; 1659 bpf_detachd(d); 1660 bpf_attachd(d, bp); 1661 reset_d(d); 1662 if (opromisc) { 1663 error = ifpromisc(bp->bif_ifp, 1); 1664 if (error) 1665 printf("%s: bpf_setdlt: ifpromisc failed (%d)\n", 1666 bp->bif_ifp->if_xname, error); 1667 else 1668 d->bd_promisc = 1; 1669 } 1670 splx(s); 1671 return 0; 1672 } 1673 1674 static int 1675 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS) 1676 { 1677 int newsize, error; 1678 struct sysctlnode node; 1679 1680 node = *rnode; 1681 node.sysctl_data = &newsize; 1682 newsize = bpf_maxbufsize; 1683 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1684 if (error || newp == NULL) 1685 return (error); 1686 1687 if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE) 1688 return (EINVAL); 1689 1690 bpf_maxbufsize = newsize; 1691 1692 return (0); 1693 } 1694 1695 SYSCTL_SETUP(sysctl_net_bfp_setup, "sysctl net.bpf subtree setup") 1696 { 1697 struct sysctlnode *node; 1698 1699 sysctl_createv(clog, 0, NULL, NULL, 1700 CTLFLAG_PERMANENT, 1701 CTLTYPE_NODE, "net", NULL, 1702 NULL, 0, NULL, 0, 1703 CTL_NET, CTL_EOL); 1704 1705 node = NULL; 1706 sysctl_createv(clog, 0, NULL, &node, 1707 CTLFLAG_PERMANENT, 1708 CTLTYPE_NODE, "bpf", 1709 SYSCTL_DESCR("BPF options"), 1710 NULL, 0, NULL, 0, 1711 CTL_NET, CTL_CREATE, CTL_EOL); 1712 if (node != NULL) 1713 sysctl_createv(clog, 0, NULL, NULL, 1714 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1715 CTLTYPE_INT, "maxbufsize", 1716 SYSCTL_DESCR("Maximum size for data capture buffer"), 1717 sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0, 1718 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); 1719 } 1720