Lines Matching +full:d +full:-
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
111 struct bpfd_list bif_wlist; /* writer-only list */
134 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
144 * 32-bit version of structure prepended to each packet. We use this header
145 * instead of the standard one for 32-bit streams. We mark the a stream as
146 * 32-bit the first time we see a 32-bit compat ioctl request.
224 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
307 if_rele(bp->bif_ifp);
315 refcount_acquire(&bp->bif_refcnt);
322 if (!refcount_release(&bp->bif_refcnt))
324 NET_EPOCH_CALL(bpfif_free, &bp->epoch_ctx);
328 bpfd_ref(struct bpf_d *d)
331 refcount_acquire(&d->bd_refcnt);
335 bpfd_rele(struct bpf_d *d)
338 if (!refcount_release(&d->bd_refcnt))
340 NET_EPOCH_CALL(bpfd_free, &d->epoch_ctx);
358 if (ptr->func != NULL)
359 bpf_destroy_jit_filter(ptr->func);
370 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
374 BPFD_LOCK_ASSERT(d);
376 switch (d->bd_bufmode) {
378 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
381 counter_u64_add(d->bd_zcopy, 1);
382 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
390 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
394 BPFD_LOCK_ASSERT(d);
396 switch (d->bd_bufmode) {
398 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
401 counter_u64_add(d->bd_zcopy, 1);
402 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
410 * This function gets called when the free buffer is re-assigned.
413 bpf_buf_reclaimed(struct bpf_d *d)
416 BPFD_LOCK_ASSERT(d);
418 switch (d->bd_bufmode) {
423 bpf_zerocopy_buf_reclaimed(d);
437 bpf_canfreebuf(struct bpf_d *d)
440 BPFD_LOCK_ASSERT(d);
442 switch (d->bd_bufmode) {
444 return (bpf_zerocopy_canfreebuf(d));
455 bpf_canwritebuf(struct bpf_d *d)
457 BPFD_LOCK_ASSERT(d);
459 switch (d->bd_bufmode) {
461 return (bpf_zerocopy_canwritebuf(d));
472 bpf_buffull(struct bpf_d *d)
475 BPFD_LOCK_ASSERT(d);
477 switch (d->bd_bufmode) {
479 bpf_zerocopy_buffull(d);
488 bpf_bufheld(struct bpf_d *d)
491 BPFD_LOCK_ASSERT(d);
493 switch (d->bd_bufmode) {
495 bpf_zerocopy_bufheld(d);
501 bpf_free(struct bpf_d *d)
504 switch (d->bd_bufmode) {
506 return (bpf_buffer_free(d));
509 return (bpf_zerocopy_free(d));
517 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
520 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
522 return (bpf_buffer_uiomove(d, buf, len, uio));
526 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
529 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
531 return (bpf_buffer_ioctl_sblen(d, i));
535 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
538 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
540 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
544 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
547 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
549 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
553 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
556 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
558 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
566 struct sockaddr *sockp, int *hdrlen, struct bpf_d *d)
587 sockp->sa_family = AF_INET;
592 sockp->sa_family = AF_UNSPEC;
598 sockp->sa_family = AF_IMPLINK;
603 sockp->sa_family = AF_UNSPEC;
612 sockp->sa_family = AF_UNSPEC;
618 * en atm driver requires 4-byte atm pseudo header.
622 sockp->sa_family = AF_UNSPEC;
627 sockp->sa_family = AF_UNSPEC;
632 sockp->sa_family = AF_IEEE80211;
637 sockp->sa_family = AF_IEEE80211;
638 sockp->sa_len = 12; /* XXX != 0 */
646 len = uio->uio_resid;
647 if (len < hlen || len - hlen > ifp->if_mtu)
654 m->m_pkthdr.len = m->m_len = len;
661 slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len);
671 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
672 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
674 m->m_flags |= M_BCAST;
676 m->m_flags |= M_MCAST;
678 if (d->bd_hdrcmplt == 0) {
679 memcpy(eh->ether_shost, IF_LLADDR(ifp),
680 sizeof(eh->ether_shost));
689 if (sockp->sa_family == AF_IEEE80211) {
692 * NB: sockp is known to be zero'd so if we do a
700 hlen = p->ibp_len;
701 if (hlen > sizeof(sockp->sa_data)) {
706 bcopy(mtod(m, const void *), sockp->sa_data, hlen);
717 * Attach descriptor to the bpf interface, i.e. make d listen on bp,
721 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
731 op_w = V_bpf_optimize_writers || d->bd_writer;
733 if (d->bd_bif != NULL)
734 bpf_detachd_locked(d, false);
736 * Point d at bp, and add d to the interface's list.
739 * we can delay adding d to the list of active listeners until
743 BPFD_LOCK(d);
748 d->bd_bif = bp;
750 /* Add to writers-only list */
751 CK_LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
758 d->bd_writer = 2;
760 CK_LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
762 reset_d(d);
765 bpf_wakeup(d);
767 BPFD_UNLOCK(d);
770 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
771 __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
774 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
778 * Check if we need to upgrade our descriptor @d from write-only mode.
781 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode,
789 if (d->bd_writer == 0 || fcode == NULL)
798 * while pcap_open_live() definitely sets to non-zero value,
799 * we'd prefer to treat k=0 (deny ALL) case the same way: e.g.
821 if (--d->bd_writer == 0) {
824 * been set. This is probably catch-all
832 "%s: filter function set by pid %d, "
833 "bd_writer counter %d, snap %d upgrade %d",
834 __func__, d->bd_pid, d->bd_writer,
844 bpf_detachd(struct bpf_d *d)
847 bpf_detachd_locked(d, false);
852 bpf_detachd_locked(struct bpf_d *d, bool detached_ifp)
859 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
862 if ((bp = d->bd_bif) == NULL)
865 BPFD_LOCK(d);
866 /* Remove d from the interface's descriptor list. */
867 CK_LIST_REMOVE(d, bd_next);
869 error = d->bd_writer;
870 ifp = bp->bif_ifp;
871 d->bd_bif = NULL;
877 bpf_wakeup(d);
879 BPFD_UNLOCK(d);
880 bpf_bpfd_cnt--;
882 /* Call event handler iff d is attached */
884 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
890 if (d->bd_promisc && !detached_ifp) {
891 d->bd_promisc = 0;
892 CURVNET_SET(ifp->if_vnet);
902 if_printf(bp->bif_ifp,
903 "bpf_detach: ifpromisc failed (%d)\n", error);
916 struct bpf_d *d = data;
918 BPFD_LOCK(d);
919 if (d->bd_state == BPF_WAITING)
920 callout_stop(&d->bd_callout);
921 d->bd_state = BPF_IDLE;
922 BPFD_UNLOCK(d);
923 funsetown(&d->bd_sigio);
924 bpf_detachd(d);
926 mac_bpfdesc_destroy(d);
928 seldrain(&d->bd_sel);
929 knlist_destroy(&d->bd_sel.si_note);
930 callout_drain(&d->bd_callout);
931 bpfd_rele(d);
942 struct bpf_d *d;
945 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
946 error = devfs_set_cdevpriv(d, bpf_dtor);
948 free(d, M_BPF);
953 d->bd_rcount = counter_u64_alloc(M_WAITOK);
954 d->bd_dcount = counter_u64_alloc(M_WAITOK);
955 d->bd_fcount = counter_u64_alloc(M_WAITOK);
956 d->bd_wcount = counter_u64_alloc(M_WAITOK);
957 d->bd_wfcount = counter_u64_alloc(M_WAITOK);
958 d->bd_wdcount = counter_u64_alloc(M_WAITOK);
959 d->bd_zcopy = counter_u64_alloc(M_WAITOK);
962 * For historical reasons, perform a one-time initialization call to
966 bpf_buffer_init(d);
968 d->bd_writer = 2;
969 d->bd_hbuf_in_use = 0;
970 d->bd_bufmode = BPF_BUFMODE_BUFFER;
971 d->bd_sig = SIGIO;
972 d->bd_direction = BPF_D_INOUT;
973 refcount_init(&d->bd_refcnt, 1);
974 BPF_PID_REFRESH(d, td);
976 mac_bpfdesc_init(d);
977 mac_bpfdesc_create(td->td_ucred, d);
979 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
980 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
981 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
984 d->bd_pcp = 0;
990 * bpfread - read next chunk of packets from buffers
995 struct bpf_d *d;
1000 error = devfs_get_cdevpriv((void **)&d);
1008 if (uio->uio_resid != d->bd_bufsize)
1013 BPFD_LOCK(d);
1014 BPF_PID_REFRESH_CUR(d);
1015 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
1016 BPFD_UNLOCK(d);
1019 if (d->bd_state == BPF_WAITING)
1020 callout_stop(&d->bd_callout);
1021 timed_out = (d->bd_state == BPF_TIMED_OUT);
1022 d->bd_state = BPF_IDLE;
1023 while (d->bd_hbuf_in_use) {
1024 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1027 BPFD_UNLOCK(d);
1036 while (d->bd_hbuf == NULL) {
1037 if (d->bd_slen != 0) {
1042 if (d->bd_immediate || non_block || timed_out) {
1045 * if we are in immediate mode, non-blocking
1048 ROTATE_BUFFERS(d);
1059 if (d->bd_bif == NULL) {
1060 BPFD_UNLOCK(d);
1065 BPFD_UNLOCK(d);
1068 error = msleep(d, &d->bd_lock, PRINET | PCATCH,
1069 "bpf", d->bd_rtout);
1071 BPFD_UNLOCK(d);
1080 if (d->bd_hbuf)
1088 if (d->bd_slen == 0) {
1089 BPFD_UNLOCK(d);
1092 ROTATE_BUFFERS(d);
1099 d->bd_hbuf_in_use = 1;
1100 BPFD_UNLOCK(d);
1110 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
1112 BPFD_LOCK(d);
1113 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
1114 d->bd_fbuf = d->bd_hbuf;
1115 d->bd_hbuf = NULL;
1116 d->bd_hlen = 0;
1117 bpf_buf_reclaimed(d);
1118 d->bd_hbuf_in_use = 0;
1119 wakeup(&d->bd_hbuf_in_use);
1120 BPFD_UNLOCK(d);
1129 bpf_wakeup(struct bpf_d *d)
1132 BPFD_LOCK_ASSERT(d);
1133 if (d->bd_state == BPF_WAITING) {
1134 callout_stop(&d->bd_callout);
1135 d->bd_state = BPF_IDLE;
1137 wakeup(d);
1138 if (d->bd_async && d->bd_sig && d->bd_sigio)
1139 pgsigio(&d->bd_sigio, d->bd_sig, 0);
1141 selwakeuppri(&d->bd_sel, PRINET);
1142 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
1148 struct bpf_d *d = (struct bpf_d *)arg;
1150 BPFD_LOCK_ASSERT(d);
1152 if (callout_pending(&d->bd_callout) ||
1153 !callout_active(&d->bd_callout))
1155 if (d->bd_state == BPF_WAITING) {
1156 d->bd_state = BPF_TIMED_OUT;
1157 if (d->bd_slen != 0)
1158 bpf_wakeup(d);
1163 bpf_ready(struct bpf_d *d)
1166 BPFD_LOCK_ASSERT(d);
1168 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1170 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1171 d->bd_slen != 0)
1183 struct bpf_d *d;
1188 error = devfs_get_cdevpriv((void **)&d);
1193 BPFD_LOCK(d);
1194 BPF_PID_REFRESH_CUR(d);
1195 counter_u64_add(d->bd_wcount, 1);
1196 if ((bp = d->bd_bif) == NULL) {
1201 ifp = bp->bif_ifp;
1202 if ((ifp->if_flags & IFF_UP) == 0) {
1207 if (uio->uio_resid == 0)
1215 * Take extra reference, unlock d and exit from epoch section,
1218 bpfd_ref(d);
1220 BPFD_UNLOCK(d);
1222 error = bpf_movein(uio, (int)bp->bif_dlt, ifp,
1223 &m, &dst, &hlen, d);
1226 counter_u64_add(d->bd_wdcount, 1);
1227 bpfd_rele(d);
1231 BPFD_LOCK(d);
1237 if (d->bd_bif == NULL) {
1238 counter_u64_add(d->bd_wdcount, 1);
1239 BPFD_UNLOCK(d);
1240 bpfd_rele(d);
1244 counter_u64_add(d->bd_wfcount, 1);
1245 if (d->bd_hdrcmplt)
1248 if (d->bd_feedback) {
1251 mc->m_pkthdr.rcvif = ifp;
1253 if (d->bd_direction == BPF_D_INOUT)
1254 m->m_flags |= M_PROMISC;
1258 m->m_pkthdr.len -= hlen;
1259 m->m_len -= hlen;
1260 m->m_data += hlen; /* XXX */
1262 CURVNET_SET(ifp->if_vnet);
1264 mac_bpfdesc_create_mbuf(d, m);
1266 mac_bpfdesc_create_mbuf(d, mc);
1276 if (d->bd_pcp != 0)
1277 vlan_set_pcp(m, d->bd_pcp);
1281 BPFD_UNLOCK(d);
1282 error = (*ifp->if_output)(ifp, m, &dst, &ro);
1284 counter_u64_add(d->bd_wdcount, 1);
1288 (*ifp->if_input)(ifp, mc);
1294 bpfd_rele(d);
1298 counter_u64_add(d->bd_wdcount, 1);
1300 BPFD_UNLOCK(d);
1306 * and drop counts. This is doable for kernel-only buffers, but with
1307 * zero-copy buffers, we can't write to (or rotate) buffers that are
1312 reset_d(struct bpf_d *d)
1315 BPFD_LOCK_ASSERT(d);
1317 while (d->bd_hbuf_in_use)
1318 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1320 if ((d->bd_hbuf != NULL) &&
1321 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1323 d->bd_fbuf = d->bd_hbuf;
1324 d->bd_hbuf = NULL;
1325 d->bd_hlen = 0;
1326 bpf_buf_reclaimed(d);
1328 if (bpf_canwritebuf(d))
1329 d->bd_slen = 0;
1330 counter_u64_zero(d->bd_rcount);
1331 counter_u64_zero(d->bd_dcount);
1332 counter_u64_zero(d->bd_fcount);
1333 counter_u64_zero(d->bd_wcount);
1334 counter_u64_zero(d->bd_wfcount);
1335 counter_u64_zero(d->bd_wdcount);
1336 counter_u64_zero(d->bd_zcopy);
1363 * BIOCSETZBUF Set current zero-copy buffer locations.
1364 * BIOCGETZMAX Get maximum zero-copy buffer size.
1365 * BIOCROTZBUF Force rotation of zero-copy buffer
1375 struct bpf_d *d;
1378 error = devfs_get_cdevpriv((void **)&d);
1385 BPFD_LOCK(d);
1386 BPF_PID_REFRESH(d, td);
1387 if (d->bd_state == BPF_WAITING)
1388 callout_stop(&d->bd_callout);
1389 d->bd_state = BPF_IDLE;
1390 BPFD_UNLOCK(d);
1392 if (d->bd_locked == 1) {
1428 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1429 * that it will get 32-bit packet headers.
1438 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1439 BPFD_LOCK(d);
1440 d->bd_compat32 = 1;
1441 BPFD_UNLOCK(d);
1459 BPFD_LOCK(d);
1460 n = d->bd_slen;
1461 while (d->bd_hbuf_in_use)
1462 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1464 if (d->bd_hbuf)
1465 n += d->bd_hlen;
1466 BPFD_UNLOCK(d);
1476 BPFD_LOCK(d);
1477 *(u_int *)addr = d->bd_bufsize;
1478 BPFD_UNLOCK(d);
1485 error = bpf_ioctl_sblen(d, (u_int *)addr);
1499 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1506 BPFD_LOCK(d);
1507 reset_d(d);
1508 BPFD_UNLOCK(d);
1516 if (d->bd_bif == NULL) {
1521 } else if (d->bd_promisc == 0) {
1522 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1524 d->bd_promisc = 1;
1534 if (d->bd_bif == NULL)
1537 *(u_int *)addr = d->bd_bif->bif_dlt;
1551 dltlist.bfl_len = list32->bfl_len;
1552 dltlist.bfl_list = PTRIN(list32->bfl_list);
1554 if (d->bd_bif == NULL)
1557 error = bpf_getdltlist(d, &dltlist);
1559 list32->bfl_len = dltlist.bfl_len;
1568 if (d->bd_bif == NULL)
1571 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1580 if (d->bd_bif == NULL)
1583 error = bpf_setdlt(d, *(u_int *)addr);
1592 if (d->bd_bif == NULL)
1595 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1598 strlcpy(ifr->ifr_name, ifp->if_xname,
1599 sizeof(ifr->ifr_name));
1614 * allocate them here. If we're using zero-copy,
1619 BPFD_LOCK(d);
1620 if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
1621 d->bd_sbuf == NULL)
1623 BPFD_UNLOCK(d);
1625 size = d->bd_bufsize;
1626 error = bpf_buffer_ioctl_sblen(d, &size);
1631 error = bpf_setif(d, (struct ifreq *)addr);
1652 tv->tv_sec = tv32->tv_sec;
1653 tv->tv_usec = tv32->tv_usec;
1660 * a one-shot timer.
1663 d->bd_rtout = tvtohz(tv) - 1;
1686 tv->tv_sec = d->bd_rtout / hz;
1687 tv->tv_usec = (d->bd_rtout % hz) * tick;
1691 tv32->tv_sec = tv->tv_sec;
1692 tv32->tv_usec = tv->tv_usec;
1707 bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount);
1708 bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount);
1716 BPFD_LOCK(d);
1717 d->bd_immediate = *(u_int *)addr;
1718 BPFD_UNLOCK(d);
1725 bv->bv_major = BPF_MAJOR_VERSION;
1726 bv->bv_minor = BPF_MINOR_VERSION;
1734 BPFD_LOCK(d);
1735 *(u_int *)addr = d->bd_hdrcmplt;
1736 BPFD_UNLOCK(d);
1743 BPFD_LOCK(d);
1744 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1745 BPFD_UNLOCK(d);
1752 BPFD_LOCK(d);
1753 *(u_int *)addr = d->bd_direction;
1754 BPFD_UNLOCK(d);
1769 BPFD_LOCK(d);
1770 d->bd_direction = direction;
1771 BPFD_UNLOCK(d);
1783 BPFD_LOCK(d);
1784 *(u_int *)addr = d->bd_tstamp;
1785 BPFD_UNLOCK(d);
1797 d->bd_tstamp = func;
1804 BPFD_LOCK(d);
1805 d->bd_feedback = *(u_int *)addr;
1806 BPFD_UNLOCK(d);
1810 BPFD_LOCK(d);
1811 d->bd_locked = 1;
1812 BPFD_UNLOCK(d);
1815 case FIONBIO: /* Non-blocking I/O */
1819 BPFD_LOCK(d);
1820 d->bd_async = *(int *)addr;
1821 BPFD_UNLOCK(d);
1829 error = fsetown(*(int *)addr, &d->bd_sigio);
1833 BPFD_LOCK(d);
1834 *(int *)addr = fgetown(&d->bd_sigio);
1835 BPFD_UNLOCK(d);
1840 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1845 *(int *)addr = -fgetown(&d->bd_sigio);
1857 BPFD_LOCK(d);
1858 d->bd_sig = sig;
1859 BPFD_UNLOCK(d);
1864 BPFD_LOCK(d);
1865 *(u_int *)addr = d->bd_sig;
1866 BPFD_UNLOCK(d);
1870 BPFD_LOCK(d);
1871 *(u_int *)addr = d->bd_bufmode;
1872 BPFD_UNLOCK(d);
1897 BPFD_LOCK(d);
1898 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1899 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1900 BPFD_UNLOCK(d);
1904 d->bd_bufmode = *(u_int *)addr;
1905 BPFD_UNLOCK(d);
1909 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1913 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1917 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1929 d->bd_pcp = pcp;
1938 * Set d's packet filter program to fp. If this file already has a filter,
1945 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1966 fp_swab.bf_len = fp32->bf_len;
1968 (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1990 flen = fp->bf_len;
1991 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1993 size = flen * sizeof(*fp->bf_insns);
1997 filter = (struct bpf_insn *)fcode->buffer;
1998 if (copyin(fp->bf_insns, filter, size) != 0 ||
2018 BPFD_LOCK(d);
2021 if (d->bd_wfilter != NULL) {
2022 fcode = __containerof((void *)d->bd_wfilter,
2025 fcode->func = NULL;
2028 d->bd_wfilter = filter;
2030 if (d->bd_rfilter != NULL) {
2031 fcode = __containerof((void *)d->bd_rfilter,
2034 fcode->func = d->bd_bfilter;
2037 d->bd_rfilter = filter;
2039 d->bd_bfilter = jfunc;
2042 reset_d(d);
2044 if (bpf_check_upgrade(cmd, d, filter, flen) != 0) {
2047 * specifying interface. In this case just mark d
2050 d->bd_writer = 0;
2051 if (d->bd_bif != NULL) {
2053 * Remove descriptor from writers-only list
2056 CK_LIST_REMOVE(d, bd_next);
2057 CK_LIST_INSERT_HEAD(&d->bd_bif->bif_dlist,
2058 d, bd_next);
2060 "%s: upgrade required by pid %d",
2061 __func__, d->bd_pid);
2066 BPFD_UNLOCK(d);
2069 NET_EPOCH_CALL(bpf_program_buffer_free, &fcode->epoch_ctx);
2073 d->bd_bif->bif_ifp, d->bd_bif->bif_dlt, 1);
2085 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
2092 theywant = ifunit(ifr->ifr_name);
2093 if (theywant == NULL || theywant->if_bpf == NULL)
2096 bp = theywant->if_bpf;
2101 switch (d->bd_bufmode) {
2104 if (d->bd_sbuf == NULL)
2109 panic("bpf_setif: bufmode %d", d->bd_bufmode);
2111 if (bp != d->bd_bif)
2112 bpf_attachd(d, bp);
2114 BPFD_LOCK(d);
2115 reset_d(d);
2116 BPFD_UNLOCK(d);
2130 struct bpf_d *d;
2133 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
2141 BPFD_LOCK(d);
2142 BPF_PID_REFRESH(d, td);
2144 if (bpf_ready(d))
2147 selrecord(td, &d->bd_sel);
2149 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2150 callout_reset(&d->bd_callout, d->bd_rtout,
2151 bpf_timed_out, d);
2152 d->bd_state = BPF_WAITING;
2156 BPFD_UNLOCK(d);
2167 struct bpf_d *d;
2169 if (devfs_get_cdevpriv((void **)&d) != 0)
2172 switch (kn->kn_filter) {
2174 kn->kn_fop = &bpfread_filtops;
2178 kn->kn_fop = &bpfwrite_filtops;
2188 BPFD_LOCK(d);
2189 BPF_PID_REFRESH_CUR(d);
2190 kn->kn_hook = d;
2191 knlist_add(&d->bd_sel.si_note, kn, 1);
2192 BPFD_UNLOCK(d);
2200 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2202 knlist_remove(&d->bd_sel.si_note, kn, 0);
2208 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2211 BPFD_LOCK_ASSERT(d);
2212 ready = bpf_ready(d);
2214 kn->kn_data = d->bd_slen;
2218 if (!d->bd_hbuf_in_use && d->bd_hbuf)
2219 kn->kn_data += d->bd_hlen;
2220 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2221 callout_reset(&d->bd_callout, d->bd_rtout,
2222 bpf_timed_out, d);
2223 d->bd_state = BPF_WAITING;
2232 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2234 BPFD_LOCK_ASSERT(d);
2236 if (d->bd_bif == NULL) {
2237 kn->kn_data = 0;
2240 kn->kn_data = d->bd_bif->bif_ifp->if_mtu;
2274 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | M_TSTMP)) {
2304 struct bpf_d *d;
2313 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2314 counter_u64_add(d->bd_rcount, 1);
2322 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2324 slen = (*(bf->func))(pkt, pktlen, pktlen);
2327 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2332 BPFD_LOCK(d);
2333 counter_u64_add(d->bd_fcount, 1);
2334 if (gottime < bpf_ts_quality(d->bd_tstamp))
2335 gottime = bpf_gettime(&bt, d->bd_tstamp,
2338 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2340 catchpacket(d, pkt, pktlen, slen,
2342 BPFD_UNLOCK(d);
2351 if (bpf_peers_present(ifp->if_bpf))
2352 bpf_tap(ifp->if_bpf, pkt, pktlen);
2355 #define BPF_CHECK_DIRECTION(d, r, i) \
2356 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2357 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2368 struct bpf_d *d;
2376 if ((m->m_flags & M_PROMISC) != 0 && m_rcvif(m) == NULL) {
2377 m->m_flags &= ~M_PROMISC;
2385 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2386 if (BPF_CHECK_DIRECTION(d, m_rcvif(m), bp->bif_ifp))
2388 counter_u64_add(d->bd_rcount, 1);
2390 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2392 if (bf != NULL && m->m_next == NULL)
2393 slen = (*(bf->func))(mtod(m, u_char *), pktlen,
2397 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2399 BPFD_LOCK(d);
2401 counter_u64_add(d->bd_fcount, 1);
2402 if (gottime < bpf_ts_quality(d->bd_tstamp))
2403 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2405 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2407 catchpacket(d, (u_char *)m, pktlen, slen,
2409 BPFD_UNLOCK(d);
2418 if (bpf_peers_present(ifp->if_bpf)) {
2420 bpf_mtap(ifp->if_bpf, m);
2434 struct bpf_d *d;
2439 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2440 m->m_flags &= ~M_PROMISC;
2446 * Craft on-stack mbuf suitable for passing to bpf_filter.
2448 * absolutely needed--this mbuf should never go anywhere else.
2459 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2460 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2462 counter_u64_add(d->bd_rcount, 1);
2463 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2465 BPFD_LOCK(d);
2467 counter_u64_add(d->bd_fcount, 1);
2468 if (gottime < bpf_ts_quality(d->bd_tstamp))
2469 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2471 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2473 catchpacket(d, (u_char *)&mb, pktlen, slen,
2475 BPFD_UNLOCK(d);
2484 if (bpf_peers_present(ifp->if_bpf)) {
2486 bpf_mtap2(ifp->if_bpf, data, dlen, m);
2497 bpf_hdrlen(struct bpf_d *d)
2501 hdrlen = d->bd_bif->bif_hdrlen;
2503 if (d->bd_tstamp == BPF_T_NONE ||
2504 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2506 if (d->bd_compat32)
2515 if (d->bd_compat32)
2521 return (hdrlen - d->bd_bif->bif_hdrlen);
2540 ts->bt_sec = tsm.tv_sec;
2541 ts->bt_frac = tsm.tv_usec;
2545 ts->bt_sec = tsn.tv_sec;
2546 ts->bt_frac = tsn.tv_nsec;
2549 ts->bt_sec = bt->sec;
2550 ts->bt_frac = bt->frac;
2563 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2580 BPFD_LOCK_ASSERT(d);
2581 if (d->bd_bif == NULL) {
2583 counter_u64_add(d->bd_dcount, 1);
2594 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2595 d->bd_fbuf = d->bd_hbuf;
2596 d->bd_hbuf = NULL;
2597 d->bd_hlen = 0;
2598 bpf_buf_reclaimed(d);
2607 hdrlen = bpf_hdrlen(d);
2609 if (totlen > d->bd_bufsize)
2610 totlen = d->bd_bufsize;
2621 if (d->bd_compat32)
2622 curlen = BPF_WORDALIGN32(d->bd_slen);
2625 curlen = BPF_WORDALIGN(d->bd_slen);
2626 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2627 if (d->bd_fbuf == NULL) {
2633 bpf_buffull(d);
2634 counter_u64_add(d->bd_dcount, 1);
2637 KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
2638 ROTATE_BUFFERS(d);
2642 if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
2650 pad = curlen - d->bd_slen;
2652 ("%s: invalid pad byte count %d", __func__, pad));
2655 bpf_append_bytes(d, d->bd_sbuf, d->bd_slen, zeroes,
2660 caplen = totlen - hdrlen;
2661 tstype = d->bd_tstamp;
2669 if (d->bd_compat32) {
2678 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2691 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2707 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2715 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2716 d->bd_slen = curlen + totlen;
2719 bpf_wakeup(d);
2729 struct bpf_d *d;
2737 d = __containerof(ctx, struct bpf_d, epoch_ctx);
2738 bpf_free(d);
2739 if (d->bd_rfilter != NULL) {
2740 p = __containerof((void *)d->bd_rfilter,
2743 p->func = d->bd_bfilter;
2745 bpf_program_buffer_free(&p->epoch_ctx);
2747 if (d->bd_wfilter != NULL) {
2748 p = __containerof((void *)d->bd_wfilter,
2751 p->func = NULL;
2753 bpf_program_buffer_free(&p->epoch_ctx);
2756 mtx_destroy(&d->bd_lock);
2757 counter_u64_free(d->bd_rcount);
2758 counter_u64_free(d->bd_dcount);
2759 counter_u64_free(d->bd_fcount);
2760 counter_u64_free(d->bd_wcount);
2761 counter_u64_free(d->bd_wfcount);
2762 counter_u64_free(d->bd_wdcount);
2763 counter_u64_free(d->bd_zcopy);
2764 free(d, M_BPF);
2775 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2795 CK_LIST_INIT(&bp->bif_dlist);
2796 CK_LIST_INIT(&bp->bif_wlist);
2797 bp->bif_ifp = ifp;
2798 bp->bif_dlt = dlt;
2799 bp->bif_hdrlen = hdrlen;
2800 bp->bif_bpf = driverp;
2801 refcount_init(&bp->bif_refcnt, 1);
2819 * query the dlt and hdrlen before detach so we can re-attch the if_bpf
2834 *bif_dlt = bp->bif_dlt;
2836 *bif_hdrlen = bp->bif_hdrlen;
2851 struct bpf_d *d;
2856 if (ifp != bp->bif_ifp)
2860 *bp->bif_bpf = __DECONST(struct bpf_if *, &dead_bpf_if);
2863 "%s: sheduling free for encap %d (%p) for if %p",
2864 __func__, bp->bif_dlt, bp, ifp);
2867 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) {
2868 bpf_detachd_locked(d, true);
2871 /* Detach writer-only descriptors */
2872 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) {
2873 bpf_detachd_locked(d, true);
2883 return (bpf_peers_present(ifp->if_bpf));
2890 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2899 ifp = d->bd_bif->bif_ifp;
2902 if (bp->bif_ifp == ifp)
2905 if (bfl->bfl_list == NULL) {
2906 bfl->bfl_len = n1;
2909 if (n1 > bfl->bfl_len)
2915 if (bp->bif_ifp != ifp)
2917 lst[n++] = bp->bif_dlt;
2919 error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n);
2921 bfl->bfl_len = n;
2929 bpf_setdlt(struct bpf_d *d, u_int dlt)
2936 MPASS(d->bd_bif != NULL);
2942 if (d->bd_bif->bif_dlt == dlt)
2945 ifp = d->bd_bif->bif_ifp;
2947 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2953 opromisc = d->bd_promisc;
2954 bpf_attachd(d, bp);
2956 error = ifpromisc(bp->bif_ifp, 1);
2958 if_printf(bp->bif_ifp, "%s: ifpromisc failed (%d)\n",
2961 d->bd_promisc = 1;
2996 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2997 counter_u64_zero(bd->bd_rcount);
2998 counter_u64_zero(bd->bd_dcount);
2999 counter_u64_zero(bd->bd_fcount);
3000 counter_u64_zero(bd->bd_wcount);
3001 counter_u64_zero(bd->bd_wfcount);
3002 counter_u64_zero(bd->bd_zcopy);
3012 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
3016 bzero(d, sizeof(*d));
3017 d->bd_structsize = sizeof(*d);
3018 d->bd_immediate = bd->bd_immediate;
3019 d->bd_promisc = bd->bd_promisc;
3020 d->bd_hdrcmplt = bd->bd_hdrcmplt;
3021 d->bd_direction = bd->bd_direction;
3022 d->bd_feedback = bd->bd_feedback;
3023 d->bd_async = bd->bd_async;
3024 d->bd_rcount = counter_u64_fetch(bd->bd_rcount);
3025 d->bd_dcount = counter_u64_fetch(bd->bd_dcount);
3026 d->bd_fcount = counter_u64_fetch(bd->bd_fcount);
3027 d->bd_sig = bd->bd_sig;
3028 d->bd_slen = bd->bd_slen;
3029 d->bd_hlen = bd->bd_hlen;
3030 d->bd_bufsize = bd->bd_bufsize;
3031 d->bd_pid = bd->bd_pid;
3032 strlcpy(d->bd_ifname,
3033 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
3034 d->bd_locked = bd->bd_locked;
3035 d->bd_wcount = counter_u64_fetch(bd->bd_wcount);
3036 d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount);
3037 d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount);
3038 d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy);
3039 d->bd_bufmode = bd->bd_bufmode;
3043 * Handle `netstat -B' stats request
3060 error = priv_check(req->td, PRIV_NET_BPF);
3068 if (req->newptr != NULL) {
3069 if (req->newlen != sizeof(tempstats))
3080 if (req->oldptr == NULL)
3084 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
3086 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
3093 /* Send writers-only first */
3094 CK_LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
3098 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
3114 * NOP stubs to allow bpf-using drivers to load and function.
3141 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
3154 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
3178 return (-1); /* "no filter" behaviour */
3197 #define BPF_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, bpf_if->e);
3206 BPF_DB_PRINTF_RAW("%u", refcount_load(&bpf_if->bif_refcnt));