Lines Matching defs:d

328 bpfd_ref(struct bpf_d *d)
331 refcount_acquire(&d->bd_refcnt);
335 bpfd_rele(struct bpf_d *d)
338 if (!refcount_release(&d->bd_refcnt))
340 NET_EPOCH_CALL(bpfd_free, &d->epoch_ctx);
370 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
374 BPFD_LOCK_ASSERT(d);
376 switch (d->bd_bufmode) {
378 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
381 counter_u64_add(d->bd_zcopy, 1);
382 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
390 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
394 BPFD_LOCK_ASSERT(d);
396 switch (d->bd_bufmode) {
398 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
401 counter_u64_add(d->bd_zcopy, 1);
402 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
413 bpf_buf_reclaimed(struct bpf_d *d)
416 BPFD_LOCK_ASSERT(d);
418 switch (d->bd_bufmode) {
423 bpf_zerocopy_buf_reclaimed(d);
437 bpf_canfreebuf(struct bpf_d *d)
440 BPFD_LOCK_ASSERT(d);
442 switch (d->bd_bufmode) {
444 return (bpf_zerocopy_canfreebuf(d));
455 bpf_canwritebuf(struct bpf_d *d)
457 BPFD_LOCK_ASSERT(d);
459 switch (d->bd_bufmode) {
461 return (bpf_zerocopy_canwritebuf(d));
472 bpf_buffull(struct bpf_d *d)
475 BPFD_LOCK_ASSERT(d);
477 switch (d->bd_bufmode) {
479 bpf_zerocopy_buffull(d);
488 bpf_bufheld(struct bpf_d *d)
491 BPFD_LOCK_ASSERT(d);
493 switch (d->bd_bufmode) {
495 bpf_zerocopy_bufheld(d);
501 bpf_free(struct bpf_d *d)
504 switch (d->bd_bufmode) {
506 return (bpf_buffer_free(d));
509 return (bpf_zerocopy_free(d));
517 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
520 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
522 return (bpf_buffer_uiomove(d, buf, len, uio));
526 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
529 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
531 return (bpf_buffer_ioctl_sblen(d, i));
535 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
538 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
540 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
544 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
547 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
549 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
553 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
556 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
558 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
566 struct sockaddr *sockp, int *hdrlen, struct bpf_d *d)
661 slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len);
678 if (d->bd_hdrcmplt == 0) {
692 * NB: sockp is known to be zero'd so if we do a
717 * Attach descriptor to the bpf interface, i.e. make d listen on bp,
721 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
731 op_w = V_bpf_optimize_writers || d->bd_writer;
733 if (d->bd_bif != NULL)
734 bpf_detachd_locked(d, false);
736 * Point d at bp, and add d to the interface's list.
739 * we can delay adding d to the list of active listeners until
743 BPFD_LOCK(d);
748 d->bd_bif = bp;
751 CK_LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
758 d->bd_writer = 2;
760 CK_LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
762 reset_d(d);
765 bpf_wakeup(d);
767 BPFD_UNLOCK(d);
770 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
771 __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
778 * Check if we need to upgrade our descriptor @d from write-only mode.
781 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode,
789 if (d->bd_writer == 0 || fcode == NULL)
799 * we'd prefer to treat k=0 (deny ALL) case the same way: e.g.
821 if (--d->bd_writer == 0) {
832 "%s: filter function set by pid %d, "
833 "bd_writer counter %d, snap %d upgrade %d",
834 __func__, d->bd_pid, d->bd_writer,
844 bpf_detachd(struct bpf_d *d)
847 bpf_detachd_locked(d, false);
852 bpf_detachd_locked(struct bpf_d *d, bool detached_ifp)
859 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
862 if ((bp = d->bd_bif) == NULL)
865 BPFD_LOCK(d);
866 /* Remove d from the interface's descriptor list. */
867 CK_LIST_REMOVE(d, bd_next);
869 error = d->bd_writer;
871 d->bd_bif = NULL;
877 bpf_wakeup(d);
879 BPFD_UNLOCK(d);
882 /* Call event handler iff d is attached */
890 if (d->bd_promisc && !detached_ifp) {
891 d->bd_promisc = 0;
903 "bpf_detach: ifpromisc failed (%d)\n", error);
916 struct bpf_d *d = data;
918 BPFD_LOCK(d);
919 if (d->bd_state == BPF_WAITING)
920 callout_stop(&d->bd_callout);
921 d->bd_state = BPF_IDLE;
922 BPFD_UNLOCK(d);
923 funsetown(&d->bd_sigio);
924 bpf_detachd(d);
926 mac_bpfdesc_destroy(d);
928 seldrain(&d->bd_sel);
929 knlist_destroy(&d->bd_sel.si_note);
930 callout_drain(&d->bd_callout);
931 bpfd_rele(d);
942 struct bpf_d *d;
945 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
946 error = devfs_set_cdevpriv(d, bpf_dtor);
948 free(d, M_BPF);
953 d->bd_rcount = counter_u64_alloc(M_WAITOK);
954 d->bd_dcount = counter_u64_alloc(M_WAITOK);
955 d->bd_fcount = counter_u64_alloc(M_WAITOK);
956 d->bd_wcount = counter_u64_alloc(M_WAITOK);
957 d->bd_wfcount = counter_u64_alloc(M_WAITOK);
958 d->bd_wdcount = counter_u64_alloc(M_WAITOK);
959 d->bd_zcopy = counter_u64_alloc(M_WAITOK);
966 bpf_buffer_init(d);
968 d->bd_writer = 2;
969 d->bd_hbuf_in_use = 0;
970 d->bd_bufmode = BPF_BUFMODE_BUFFER;
971 d->bd_sig = SIGIO;
972 d->bd_direction = BPF_D_INOUT;
973 refcount_init(&d->bd_refcnt, 1);
974 BPF_PID_REFRESH(d, td);
976 mac_bpfdesc_init(d);
977 mac_bpfdesc_create(td->td_ucred, d);
979 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
980 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
981 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
984 d->bd_pcp = 0;
995 struct bpf_d *d;
1000 error = devfs_get_cdevpriv((void **)&d);
1008 if (uio->uio_resid != d->bd_bufsize)
1013 BPFD_LOCK(d);
1014 BPF_PID_REFRESH_CUR(d);
1015 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
1016 BPFD_UNLOCK(d);
1019 if (d->bd_state == BPF_WAITING)
1020 callout_stop(&d->bd_callout);
1021 timed_out = (d->bd_state == BPF_TIMED_OUT);
1022 d->bd_state = BPF_IDLE;
1023 while (d->bd_hbuf_in_use) {
1024 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1027 BPFD_UNLOCK(d);
1036 while (d->bd_hbuf == NULL) {
1037 if (d->bd_slen != 0) {
1042 if (d->bd_immediate || non_block || timed_out) {
1048 ROTATE_BUFFERS(d);
1059 if (d->bd_bif == NULL) {
1060 BPFD_UNLOCK(d);
1065 BPFD_UNLOCK(d);
1068 error = msleep(d, &d->bd_lock, PRINET | PCATCH,
1069 "bpf", d->bd_rtout);
1071 BPFD_UNLOCK(d);
1080 if (d->bd_hbuf)
1088 if (d->bd_slen == 0) {
1089 BPFD_UNLOCK(d);
1092 ROTATE_BUFFERS(d);
1099 d->bd_hbuf_in_use = 1;
1100 BPFD_UNLOCK(d);
1110 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
1112 BPFD_LOCK(d);
1113 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
1114 d->bd_fbuf = d->bd_hbuf;
1115 d->bd_hbuf = NULL;
1116 d->bd_hlen = 0;
1117 bpf_buf_reclaimed(d);
1118 d->bd_hbuf_in_use = 0;
1119 wakeup(&d->bd_hbuf_in_use);
1120 BPFD_UNLOCK(d);
1129 bpf_wakeup(struct bpf_d *d)
1132 BPFD_LOCK_ASSERT(d);
1133 if (d->bd_state == BPF_WAITING) {
1134 callout_stop(&d->bd_callout);
1135 d->bd_state = BPF_IDLE;
1137 wakeup(d);
1138 if (d->bd_async && d->bd_sig && d->bd_sigio)
1139 pgsigio(&d->bd_sigio, d->bd_sig, 0);
1141 selwakeuppri(&d->bd_sel, PRINET);
1142 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
1148 struct bpf_d *d = (struct bpf_d *)arg;
1150 BPFD_LOCK_ASSERT(d);
1152 if (callout_pending(&d->bd_callout) ||
1153 !callout_active(&d->bd_callout))
1155 if (d->bd_state == BPF_WAITING) {
1156 d->bd_state = BPF_TIMED_OUT;
1157 if (d->bd_slen != 0)
1158 bpf_wakeup(d);
1163 bpf_ready(struct bpf_d *d)
1166 BPFD_LOCK_ASSERT(d);
1168 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1170 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1171 d->bd_slen != 0)
1183 struct bpf_d *d;
1188 error = devfs_get_cdevpriv((void **)&d);
1193 BPFD_LOCK(d);
1194 BPF_PID_REFRESH_CUR(d);
1195 counter_u64_add(d->bd_wcount, 1);
1196 if ((bp = d->bd_bif) == NULL) {
1215 * Take extra reference, unlock d and exit from epoch section,
1218 bpfd_ref(d);
1220 BPFD_UNLOCK(d);
1223 &m, &dst, &hlen, d);
1226 counter_u64_add(d->bd_wdcount, 1);
1227 bpfd_rele(d);
1231 BPFD_LOCK(d);
1237 if (d->bd_bif == NULL) {
1238 counter_u64_add(d->bd_wdcount, 1);
1239 BPFD_UNLOCK(d);
1240 bpfd_rele(d);
1244 counter_u64_add(d->bd_wfcount, 1);
1245 if (d->bd_hdrcmplt)
1248 if (d->bd_feedback) {
1253 if (d->bd_direction == BPF_D_INOUT)
1264 mac_bpfdesc_create_mbuf(d, m);
1266 mac_bpfdesc_create_mbuf(d, mc);
1276 if (d->bd_pcp != 0)
1277 vlan_set_pcp(m, d->bd_pcp);
1281 BPFD_UNLOCK(d);
1284 counter_u64_add(d->bd_wdcount, 1);
1294 bpfd_rele(d);
1298 counter_u64_add(d->bd_wdcount, 1);
1300 BPFD_UNLOCK(d);
1312 reset_d(struct bpf_d *d)
1315 BPFD_LOCK_ASSERT(d);
1317 while (d->bd_hbuf_in_use)
1318 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1320 if ((d->bd_hbuf != NULL) &&
1321 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1323 d->bd_fbuf = d->bd_hbuf;
1324 d->bd_hbuf = NULL;
1325 d->bd_hlen = 0;
1326 bpf_buf_reclaimed(d);
1328 if (bpf_canwritebuf(d))
1329 d->bd_slen = 0;
1330 counter_u64_zero(d->bd_rcount);
1331 counter_u64_zero(d->bd_dcount);
1332 counter_u64_zero(d->bd_fcount);
1333 counter_u64_zero(d->bd_wcount);
1334 counter_u64_zero(d->bd_wfcount);
1335 counter_u64_zero(d->bd_wdcount);
1336 counter_u64_zero(d->bd_zcopy);
1375 struct bpf_d *d;
1378 error = devfs_get_cdevpriv((void **)&d);
1385 BPFD_LOCK(d);
1386 BPF_PID_REFRESH(d, td);
1387 if (d->bd_state == BPF_WAITING)
1388 callout_stop(&d->bd_callout);
1389 d->bd_state = BPF_IDLE;
1390 BPFD_UNLOCK(d);
1392 if (d->bd_locked == 1) {
1439 BPFD_LOCK(d);
1440 d->bd_compat32 = 1;
1441 BPFD_UNLOCK(d);
1459 BPFD_LOCK(d);
1460 n = d->bd_slen;
1461 while (d->bd_hbuf_in_use)
1462 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1464 if (d->bd_hbuf)
1465 n += d->bd_hlen;
1466 BPFD_UNLOCK(d);
1476 BPFD_LOCK(d);
1477 *(u_int *)addr = d->bd_bufsize;
1478 BPFD_UNLOCK(d);
1485 error = bpf_ioctl_sblen(d, (u_int *)addr);
1499 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1506 BPFD_LOCK(d);
1507 reset_d(d);
1508 BPFD_UNLOCK(d);
1516 if (d->bd_bif == NULL) {
1521 } else if (d->bd_promisc == 0) {
1522 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1524 d->bd_promisc = 1;
1534 if (d->bd_bif == NULL)
1537 *(u_int *)addr = d->bd_bif->bif_dlt;
1554 if (d->bd_bif == NULL)
1557 error = bpf_getdltlist(d, &dltlist);
1568 if (d->bd_bif == NULL)
1571 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1580 if (d->bd_bif == NULL)
1583 error = bpf_setdlt(d, *(u_int *)addr);
1592 if (d->bd_bif == NULL)
1595 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1619 BPFD_LOCK(d);
1620 if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
1621 d->bd_sbuf == NULL)
1623 BPFD_UNLOCK(d);
1625 size = d->bd_bufsize;
1626 error = bpf_buffer_ioctl_sblen(d, &size);
1631 error = bpf_setif(d, (struct ifreq *)addr);
1663 d->bd_rtout = tvtohz(tv) - 1;
1686 tv->tv_sec = d->bd_rtout / hz;
1687 tv->tv_usec = (d->bd_rtout % hz) * tick;
1707 bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount);
1708 bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount);
1716 BPFD_LOCK(d);
1717 d->bd_immediate = *(u_int *)addr;
1718 BPFD_UNLOCK(d);
1734 BPFD_LOCK(d);
1735 *(u_int *)addr = d->bd_hdrcmplt;
1736 BPFD_UNLOCK(d);
1743 BPFD_LOCK(d);
1744 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1745 BPFD_UNLOCK(d);
1752 BPFD_LOCK(d);
1753 *(u_int *)addr = d->bd_direction;
1754 BPFD_UNLOCK(d);
1769 BPFD_LOCK(d);
1770 d->bd_direction = direction;
1771 BPFD_UNLOCK(d);
1783 BPFD_LOCK(d);
1784 *(u_int *)addr = d->bd_tstamp;
1785 BPFD_UNLOCK(d);
1797 d->bd_tstamp = func;
1804 BPFD_LOCK(d);
1805 d->bd_feedback = *(u_int *)addr;
1806 BPFD_UNLOCK(d);
1810 BPFD_LOCK(d);
1811 d->bd_locked = 1;
1812 BPFD_UNLOCK(d);
1819 BPFD_LOCK(d);
1820 d->bd_async = *(int *)addr;
1821 BPFD_UNLOCK(d);
1829 error = fsetown(*(int *)addr, &d->bd_sigio);
1833 BPFD_LOCK(d);
1834 *(int *)addr = fgetown(&d->bd_sigio);
1835 BPFD_UNLOCK(d);
1840 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1845 *(int *)addr = -fgetown(&d->bd_sigio);
1857 BPFD_LOCK(d);
1858 d->bd_sig = sig;
1859 BPFD_UNLOCK(d);
1864 BPFD_LOCK(d);
1865 *(u_int *)addr = d->bd_sig;
1866 BPFD_UNLOCK(d);
1870 BPFD_LOCK(d);
1871 *(u_int *)addr = d->bd_bufmode;
1872 BPFD_UNLOCK(d);
1897 BPFD_LOCK(d);
1898 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1899 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1900 BPFD_UNLOCK(d);
1904 d->bd_bufmode = *(u_int *)addr;
1905 BPFD_UNLOCK(d);
1909 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1913 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1917 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1929 d->bd_pcp = pcp;
1938 * Set d's packet filter program to fp. If this file already has a filter,
1945 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
2018 BPFD_LOCK(d);
2021 if (d->bd_wfilter != NULL) {
2022 fcode = __containerof((void *)d->bd_wfilter,
2028 d->bd_wfilter = filter;
2030 if (d->bd_rfilter != NULL) {
2031 fcode = __containerof((void *)d->bd_rfilter,
2034 fcode->func = d->bd_bfilter;
2037 d->bd_rfilter = filter;
2039 d->bd_bfilter = jfunc;
2042 reset_d(d);
2044 if (bpf_check_upgrade(cmd, d, filter, flen) != 0) {
2047 * specifying interface. In this case just mark d
2050 d->bd_writer = 0;
2051 if (d->bd_bif != NULL) {
2056 CK_LIST_REMOVE(d, bd_next);
2057 CK_LIST_INSERT_HEAD(&d->bd_bif->bif_dlist,
2058 d, bd_next);
2060 "%s: upgrade required by pid %d",
2061 __func__, d->bd_pid);
2066 BPFD_UNLOCK(d);
2073 d->bd_bif->bif_ifp, d->bd_bif->bif_dlt, 1);
2085 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
2101 switch (d->bd_bufmode) {
2104 if (d->bd_sbuf == NULL)
2109 panic("bpf_setif: bufmode %d", d->bd_bufmode);
2111 if (bp != d->bd_bif)
2112 bpf_attachd(d, bp);
2114 BPFD_LOCK(d);
2115 reset_d(d);
2116 BPFD_UNLOCK(d);
2130 struct bpf_d *d;
2133 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
2141 BPFD_LOCK(d);
2142 BPF_PID_REFRESH(d, td);
2144 if (bpf_ready(d))
2147 selrecord(td, &d->bd_sel);
2149 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2150 callout_reset(&d->bd_callout, d->bd_rtout,
2151 bpf_timed_out, d);
2152 d->bd_state = BPF_WAITING;
2156 BPFD_UNLOCK(d);
2167 struct bpf_d *d;
2169 if (devfs_get_cdevpriv((void **)&d) != 0)
2188 BPFD_LOCK(d);
2189 BPF_PID_REFRESH_CUR(d);
2190 kn->kn_hook = d;
2191 knlist_add(&d->bd_sel.si_note, kn, 1);
2192 BPFD_UNLOCK(d);
2200 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2202 knlist_remove(&d->bd_sel.si_note, kn, 0);
2208 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2211 BPFD_LOCK_ASSERT(d);
2212 ready = bpf_ready(d);
2214 kn->kn_data = d->bd_slen;
2218 if (!d->bd_hbuf_in_use && d->bd_hbuf)
2219 kn->kn_data += d->bd_hlen;
2220 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2221 callout_reset(&d->bd_callout, d->bd_rtout,
2222 bpf_timed_out, d);
2223 d->bd_state = BPF_WAITING;
2232 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2234 BPFD_LOCK_ASSERT(d);
2236 if (d->bd_bif == NULL) {
2240 kn->kn_data = d->bd_bif->bif_ifp->if_mtu;
2304 struct bpf_d *d;
2313 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2314 counter_u64_add(d->bd_rcount, 1);
2322 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2327 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2332 BPFD_LOCK(d);
2333 counter_u64_add(d->bd_fcount, 1);
2334 if (gottime < bpf_ts_quality(d->bd_tstamp))
2335 gottime = bpf_gettime(&bt, d->bd_tstamp,
2338 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2340 catchpacket(d, pkt, pktlen, slen,
2342 BPFD_UNLOCK(d);
2355 #define BPF_CHECK_DIRECTION(d, r, i) \
2356 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2357 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2368 struct bpf_d *d;
2385 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2386 if (BPF_CHECK_DIRECTION(d, m_rcvif(m), bp->bif_ifp))
2388 counter_u64_add(d->bd_rcount, 1);
2390 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2397 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2399 BPFD_LOCK(d);
2401 counter_u64_add(d->bd_fcount, 1);
2402 if (gottime < bpf_ts_quality(d->bd_tstamp))
2403 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2405 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2407 catchpacket(d, (u_char *)m, pktlen, slen,
2409 BPFD_UNLOCK(d);
2434 struct bpf_d *d;
2459 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2460 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2462 counter_u64_add(d->bd_rcount, 1);
2463 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2465 BPFD_LOCK(d);
2467 counter_u64_add(d->bd_fcount, 1);
2468 if (gottime < bpf_ts_quality(d->bd_tstamp))
2469 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2471 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2473 catchpacket(d, (u_char *)&mb, pktlen, slen,
2475 BPFD_UNLOCK(d);
2497 bpf_hdrlen(struct bpf_d *d)
2501 hdrlen = d->bd_bif->bif_hdrlen;
2503 if (d->bd_tstamp == BPF_T_NONE ||
2504 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2506 if (d->bd_compat32)
2515 if (d->bd_compat32)
2521 return (hdrlen - d->bd_bif->bif_hdrlen);
2563 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2580 BPFD_LOCK_ASSERT(d);
2581 if (d->bd_bif == NULL) {
2583 counter_u64_add(d->bd_dcount, 1);
2594 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2595 d->bd_fbuf = d->bd_hbuf;
2596 d->bd_hbuf = NULL;
2597 d->bd_hlen = 0;
2598 bpf_buf_reclaimed(d);
2607 hdrlen = bpf_hdrlen(d);
2609 if (totlen > d->bd_bufsize)
2610 totlen = d->bd_bufsize;
2621 if (d->bd_compat32)
2622 curlen = BPF_WORDALIGN32(d->bd_slen);
2625 curlen = BPF_WORDALIGN(d->bd_slen);
2626 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2627 if (d->bd_fbuf == NULL) {
2633 bpf_buffull(d);
2634 counter_u64_add(d->bd_dcount, 1);
2637 KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
2638 ROTATE_BUFFERS(d);
2642 if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
2650 pad = curlen - d->bd_slen;
2652 ("%s: invalid pad byte count %d", __func__, pad));
2655 bpf_append_bytes(d, d->bd_sbuf, d->bd_slen, zeroes,
2661 tstype = d->bd_tstamp;
2669 if (d->bd_compat32) {
2678 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2691 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2707 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2715 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2716 d->bd_slen = curlen + totlen;
2719 bpf_wakeup(d);
2729 struct bpf_d *d;
2737 d = __containerof(ctx, struct bpf_d, epoch_ctx);
2738 bpf_free(d);
2739 if (d->bd_rfilter != NULL) {
2740 p = __containerof((void *)d->bd_rfilter,
2743 p->func = d->bd_bfilter;
2747 if (d->bd_wfilter != NULL) {
2748 p = __containerof((void *)d->bd_wfilter,
2756 mtx_destroy(&d->bd_lock);
2757 counter_u64_free(d->bd_rcount);
2758 counter_u64_free(d->bd_dcount);
2759 counter_u64_free(d->bd_fcount);
2760 counter_u64_free(d->bd_wcount);
2761 counter_u64_free(d->bd_wfcount);
2762 counter_u64_free(d->bd_wdcount);
2763 counter_u64_free(d->bd_zcopy);
2764 free(d, M_BPF);
2851 struct bpf_d *d;
2863 "%s: sheduling free for encap %d (%p) for if %p",
2867 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) {
2868 bpf_detachd_locked(d, true);
2872 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) {
2873 bpf_detachd_locked(d, true);
2890 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2899 ifp = d->bd_bif->bif_ifp;
2929 bpf_setdlt(struct bpf_d *d, u_int dlt)
2936 MPASS(d->bd_bif != NULL);
2942 if (d->bd_bif->bif_dlt == dlt)
2945 ifp = d->bd_bif->bif_ifp;
2953 opromisc = d->bd_promisc;
2954 bpf_attachd(d, bp);
2958 if_printf(bp->bif_ifp, "%s: ifpromisc failed (%d)\n",
2961 d->bd_promisc = 1;
3012 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
3016 bzero(d, sizeof(*d));
3017 d->bd_structsize = sizeof(*d);
3018 d->bd_immediate = bd->bd_immediate;
3019 d->bd_promisc = bd->bd_promisc;
3020 d->bd_hdrcmplt = bd->bd_hdrcmplt;
3021 d->bd_direction = bd->bd_direction;
3022 d->bd_feedback = bd->bd_feedback;
3023 d->bd_async = bd->bd_async;
3024 d->bd_rcount = counter_u64_fetch(bd->bd_rcount);
3025 d->bd_dcount = counter_u64_fetch(bd->bd_dcount);
3026 d->bd_fcount = counter_u64_fetch(bd->bd_fcount);
3027 d->bd_sig = bd->bd_sig;
3028 d->bd_slen = bd->bd_slen;
3029 d->bd_hlen = bd->bd_hlen;
3030 d->bd_bufsize = bd->bd_bufsize;
3031 d->bd_pid = bd->bd_pid;
3032 strlcpy(d->bd_ifname,
3034 d->bd_locked = bd->bd_locked;
3035 d->bd_wcount = counter_u64_fetch(bd->bd_wcount);
3036 d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount);
3037 d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount);
3038 d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy);
3039 d->bd_bufmode = bd->bd_bufmode;
3141 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)