Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
33 /*-
152 * * The MLD subsystem lock ends up being system-wide for the moment,
153 * but could be per-VIMAGE later on.
158 * * MLD_LOCK covers per-link state and any global variables in this file.
160 * per-link state iterators.
192 * to a vnet in ifp->if_vnet.
200 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
203 * VIMAGE-wide globals.
228 "Rate limit for MLDv2 Group-and-Source queries in seconds");
231 * Non-virtualized sysctls.
235 "Per-interface MLDv2 state");
259 * Router Alert hop-by-hop option header.
266 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
278 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet; in mld_save_context()
280 m->m_pkthdr.rcvif = ifp; in mld_save_context()
281 m->m_pkthdr.flowid = ifp->if_index; in mld_save_context()
288 m->m_pkthdr.PH_loc.ptr = NULL; in mld_scrub_context()
289 m->m_pkthdr.flowid = 0; in mld_scrub_context()
304 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr, in mld_restore_context()
306 __func__, curvnet, m->m_pkthdr.PH_loc.ptr)); in mld_restore_context()
308 return (m->m_pkthdr.flowid); in mld_restore_context()
312 * Retrieve or set threshold between group-source queries in seconds.
332 if (error || !req->newptr) in sysctl_mld_gsr()
335 if (i < -1 || i >= 60) { in sysctl_mld_gsr()
369 if (req->newptr != NULL) in sysctl_mld_ifinfo()
390 if (ifp == mli->mli_ifp) { in sysctl_mld_ifinfo()
393 info.mli_version = mli->mli_version; in sysctl_mld_ifinfo()
394 info.mli_v1_timer = mli->mli_v1_timer; in sysctl_mld_ifinfo()
395 info.mli_v2_timer = mli->mli_v2_timer; in sysctl_mld_ifinfo()
396 info.mli_flags = mli->mli_flags; in sysctl_mld_ifinfo()
397 info.mli_rv = mli->mli_rv; in sysctl_mld_ifinfo()
398 info.mli_qi = mli->mli_qi; in sysctl_mld_ifinfo()
399 info.mli_qri = mli->mli_qri; in sysctl_mld_ifinfo()
400 info.mli_uri = mli->mli_uri; in sysctl_mld_ifinfo()
426 if (--limit == 0) in mld_dispatch_queue()
434 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
435 * and node-local addresses. However, kernel and socket consumers
441 * should be suppressed, or non-zero if reports should be issued.
474 mli->mli_ifp = ifp; in mld_domifattach()
475 mli->mli_version = MLD_VERSION_2; in mld_domifattach()
476 mli->mli_flags = 0; in mld_domifattach()
477 mli->mli_rv = MLD_RV_INIT; in mld_domifattach()
478 mli->mli_qi = MLD_QI_INIT; in mld_domifattach()
479 mli->mli_qri = MLD_QRI_INIT; in mld_domifattach()
480 mli->mli_uri = MLD_URI_INIT; in mld_domifattach()
481 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS); in mld_domifattach()
482 if ((ifp->if_flags & IFF_MULTICAST) == 0) in mld_domifattach()
483 mli->mli_flags |= MLIF_SILENT; in mld_domifattach()
485 mli->mli_flags |= MLIF_USEALLOW; in mld_domifattach()
499 * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
526 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_ifdetach()
532 if (mli->mli_version == MLD_VERSION_2) { in mld_ifdetach()
539 if (inm->in6m_state == MLD_LEAVING_MEMBER) { in mld_ifdetach()
540 inm->in6m_state = MLD_NOT_MEMBER; in mld_ifdetach()
552 * Runs after link-layer cleanup; free MLD state.
579 if (mli->mli_ifp == ifp) { in mli_delete_locked()
583 mbufq_drain(&mli->mli_gq); in mli_delete_locked()
594 * Process a received MLDv1 general or address-specific query.
619 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_query()
626 * a router's link-local address. in mld_v1_input_query()
628 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { in mld_v1_input_query()
630 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v1_input_query()
639 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { in mld_v1_input_query()
642 * If this was not sent to the all-nodes group, ignore it. in mld_v1_input_query()
646 dst = ip6->ip6_dst; in mld_v1_input_query()
656 in6_setscope(&mld->mld_addr, ifp, NULL); in mld_v1_input_query()
669 timer = (ntohs(mld->mld_maxdelay) * MLD_FASTHZ) / MLD_TIMER_SCALE; in mld_v1_input_query()
680 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_v1_input_query()
688 * MLDv1 Group-Specific Query. in mld_v1_input_query()
689 * If this is a group-specific MLDv1 query, we need only in mld_v1_input_query()
692 inm = in6m_lookup_locked(ifp, &mld->mld_addr); in mld_v1_input_query()
695 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_query()
700 in6_clearscope(&mld->mld_addr); in mld_v1_input_query()
714 * below the threshold, reset it.
719 * for group and group-source query responses.
732 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v1_update_group()
733 if_name(inm->in6m_ifp), timer); in mld_v1_update_group()
737 switch (inm->in6m_state) { in mld_v1_update_group()
742 if (inm->in6m_timer != 0 && in mld_v1_update_group()
743 inm->in6m_timer <= timer) { in mld_v1_update_group()
754 CTR1(KTR_MLD, "%s: ->REPORTING", __func__); in mld_v1_update_group()
755 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v1_update_group()
756 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v1_update_group()
760 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__); in mld_v1_update_group()
761 inm->in6m_state = MLD_AWAKENING_MEMBER; in mld_v1_update_group()
769 * Process a received MLDv2 general, group-specific or
770 * group-and-source-specific query.
795 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v2_input_query()
802 * a router's link-local address. in mld_v2_input_query()
804 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { in mld_v2_input_query()
806 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v2_input_query()
815 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ in mld_v2_input_query()
824 qrv = MLD_QRV(mld->mld_misc); in mld_v2_input_query()
831 qqi = mld->mld_qqi; in mld_v2_input_query()
833 qqi = MLD_QQIC_MANT(mld->mld_qqi) << in mld_v2_input_query()
834 (MLD_QQIC_EXP(mld->mld_qqi) + 3); in mld_v2_input_query()
837 nsrc = ntohs(mld->mld_numsrc); in mld_v2_input_query()
848 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { in mld_v2_input_query()
862 in6_setscope(&mld->mld_addr, ifp, NULL); in mld_v2_input_query()
876 if (mli->mli_version != MLD_VERSION_2) in mld_v2_input_query()
880 mli->mli_rv = qrv; in mld_v2_input_query()
881 mli->mli_qi = qqi; in mld_v2_input_query()
882 mli->mli_qri = maxdelay; in mld_v2_input_query()
891 * Schedule a current-state report on this ifp for in mld_v2_input_query()
901 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) { in mld_v2_input_query()
902 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer); in mld_v2_input_query()
907 * MLDv2 Group-specific or Group-and-source-specific Query. in mld_v2_input_query()
909 * Group-source-specific queries are throttled on in mld_v2_input_query()
910 * a per-group basis to defeat denial-of-service attempts. in mld_v2_input_query()
914 inm = in6m_lookup_locked(ifp, &mld->mld_addr); in mld_v2_input_query()
918 if (!ratecheck(&inm->in6m_lastgsrtv, in mld_v2_input_query()
932 * group-specific or group-and-source query. in mld_v2_input_query()
934 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) in mld_v2_input_query()
938 in6_clearscope(&mld->mld_addr); in mld_v2_input_query()
949 * Process a received MLDv2 group-specific or group-and-source-specific
965 switch (inm->in6m_state) { in mld_v2_process_group_query()
981 nsrc = ntohs(mld->mld_numsrc); in mld_v2_process_group_query()
984 KASSERT((m0->m_flags & M_PKTHDR) == 0 || in mld_v2_process_group_query()
985 m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) + in mld_v2_process_group_query()
988 m0->m_pkthdr.len, off + sizeof(struct mldv2_query) + in mld_v2_process_group_query()
992 * Deal with group-specific queries upfront. in mld_v2_process_group_query()
994 * source-list state if it exists, and schedule a query response in mld_v2_process_group_query()
995 * for this group-specific query. in mld_v2_process_group_query()
998 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || in mld_v2_process_group_query()
999 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { in mld_v2_process_group_query()
1001 timer = min(inm->in6m_timer, timer); in mld_v2_process_group_query()
1003 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; in mld_v2_process_group_query()
1004 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v2_process_group_query()
1010 * Deal with the case where a group-and-source-specific query has in mld_v2_process_group_query()
1011 * been received but a group-specific query is already pending. in mld_v2_process_group_query()
1013 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { in mld_v2_process_group_query()
1014 timer = min(inm->in6m_timer, timer); in mld_v2_process_group_query()
1015 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v2_process_group_query()
1021 * Finally, deal with the case where a group-and-source-specific in mld_v2_process_group_query()
1022 * query has been received, where a response to a previous g-s-r in mld_v2_process_group_query()
1024 * In this case, we need to parse the source-list which the Querier in mld_v2_process_group_query()
1025 * has provided us with and check if we have any source list filter in mld_v2_process_group_query()
1028 * If we do, we must record them and schedule a current-state in mld_v2_process_group_query()
1031 if (inm->in6m_nsrc > 0) { in mld_v2_process_group_query()
1050 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; in mld_v2_process_group_query()
1051 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v2_process_group_query()
1081 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_report()
1086 if (ifp->if_flags & IFF_LOOPBACK) in mld_v1_input_report()
1090 * MLDv1 reports must originate from a host's link-local address, in mld_v1_input_report()
1093 src = ip6->ip6_src; in mld_v1_input_report()
1097 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v1_input_report()
1106 dst = ip6->ip6_dst; in mld_v1_input_report()
1108 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) || in mld_v1_input_report()
1109 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) { in mld_v1_input_report()
1111 ip6_sprintf(ip6tbuf, &ip6->ip6_dst), in mld_v1_input_report()
1119 * group. Assume we used the link-local address if available, in mld_v1_input_report()
1124 * performed for the on-wire address. in mld_v1_input_report()
1127 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) || in mld_v1_input_report()
1130 ifa_free(&ia->ia_ifa); in mld_v1_input_report()
1134 ifa_free(&ia->ia_ifa); in mld_v1_input_report()
1137 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp)); in mld_v1_input_report()
1143 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) in mld_v1_input_report()
1144 in6_setscope(&mld->mld_addr, ifp, NULL); in mld_v1_input_report()
1155 inm = in6m_lookup_locked(ifp, &mld->mld_addr); in mld_v1_input_report()
1159 mli = inm->in6m_mli; in mld_v1_input_report()
1167 if (mli->mli_version == MLD_VERSION_2) in mld_v1_input_report()
1170 inm->in6m_timer = 0; in mld_v1_input_report()
1172 switch (inm->in6m_state) { in mld_v1_input_report()
1182 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_report()
1185 inm->in6m_state = MLD_LAZY_MEMBER; in mld_v1_input_report()
1199 in6_clearscope(&mld->mld_addr); in mld_v1_input_report()
1227 ifp = m->m_pkthdr.rcvif; in mld_input()
1230 if (m->m_len < off + sizeof(*mld)) { in mld_input()
1238 if (mld->mld_type == MLD_LISTENER_QUERY && in mld_input()
1244 if (m->m_len < off + mldlen) { in mld_input()
1259 switch (mld->mld_type) { in mld_input()
1318 * Fast timeout handler (per-vnet).
1325 struct mbufq scq; /* State-change packets */ in mld_fasttimo_vnet()
1356 if (mli->mli_v2_timer == 0) { in mld_fasttimo_vnet()
1358 } else if (--mli->mli_v2_timer == 0) { in mld_fasttimo_vnet()
1376 * MLD host report and state-change timer processing. in mld_fasttimo_vnet()
1380 ifp = mli->mli_ifp; in mld_fasttimo_vnet()
1382 if (mli->mli_version == MLD_VERSION_2) { in mld_fasttimo_vnet()
1383 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri * in mld_fasttimo_vnet()
1390 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_fasttimo_vnet()
1394 switch (mli->mli_version) { in mld_fasttimo_vnet()
1406 switch (mli->mli_version) { in mld_fasttimo_vnet()
1446 if (inm->in6m_timer == 0) { in mld_v1_process_group_timer()
1448 } else if (--inm->in6m_timer == 0) { in mld_v1_process_group_timer()
1455 switch (inm->in6m_state) { in mld_v1_process_group_timer()
1465 inm->in6m_state = MLD_IDLE_MEMBER; in mld_v1_process_group_timer()
1501 * timer active. This is a no-op in this function; it is easier in mld_v2_process_group_timers()
1502 * to deal with it here than to complicate the slow-timeout path. in mld_v2_process_group_timers()
1504 if (inm->in6m_timer == 0) { in mld_v2_process_group_timers()
1506 } else if (--inm->in6m_timer == 0) { in mld_v2_process_group_timers()
1512 if (inm->in6m_sctimer == 0) { in mld_v2_process_group_timers()
1514 } else if (--inm->in6m_sctimer == 0) { in mld_v2_process_group_timers()
1525 switch (inm->in6m_state) { in mld_v2_process_group_timers()
1536 * Respond to a previously pending Group-Specific in mld_v2_process_group_timers()
1537 * or Group-and-Source-Specific query by enqueueing in mld_v2_process_group_timers()
1538 * the appropriate Current-State report for in mld_v2_process_group_timers()
1545 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER), in mld_v2_process_group_timers()
1549 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v2_process_group_timers()
1557 * State-change retransmission timer fired. in mld_v2_process_group_timers()
1559 * set the global pending state-change flag, and in mld_v2_process_group_timers()
1562 if (--inm->in6m_scrv > 0) { in mld_v2_process_group_timers()
1563 inm->in6m_sctimer = uri_fasthz; in mld_v2_process_group_timers()
1567 * Retransmit the previously computed state-change in mld_v2_process_group_timers()
1571 * a state-change. in mld_v2_process_group_timers()
1576 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_v2_process_group_timers()
1577 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v2_process_group_timers()
1578 if_name(inm->in6m_ifp)); in mld_v2_process_group_timers()
1587 if (inm->in6m_state == MLD_LEAVING_MEMBER && in mld_v2_process_group_timers()
1588 inm->in6m_scrv == 0) { in mld_v2_process_group_timers()
1589 inm->in6m_state = MLD_NOT_MEMBER; in mld_v2_process_group_timers()
1610 version, mli->mli_ifp, if_name(mli->mli_ifp)); in mld_set_version()
1617 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri; in mld_set_version()
1619 mli->mli_v1_timer = old_version_timer; in mld_set_version()
1622 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) { in mld_set_version()
1623 mli->mli_version = MLD_VERSION_1; in mld_set_version()
1630 * joined on it; state-change, general-query, and group-query timers.
1642 mli->mli_ifp, if_name(mli->mli_ifp)); in mld_v2_cancel_link_timers()
1649 * Fast-track this potentially expensive operation in mld_v2_cancel_link_timers()
1657 mli->mli_v2_timer = 0; in mld_v2_cancel_link_timers()
1659 ifp = mli->mli_ifp; in mld_v2_cancel_link_timers()
1663 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_v2_cancel_link_timers()
1667 switch (inm->in6m_state) { in mld_v2_cancel_link_timers()
1681 if (inm->in6m_refcount == 1) in mld_v2_cancel_link_timers()
1690 inm->in6m_sctimer = 0; in mld_v2_cancel_link_timers()
1691 inm->in6m_timer = 0; in mld_v2_cancel_link_timers()
1692 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v2_cancel_link_timers()
1694 * Free any pending MLDv2 state-change records. in mld_v2_cancel_link_timers()
1696 mbufq_drain(&inm->in6m_scq); in mld_v2_cancel_link_timers()
1727 * Per-vnet slowtimo handler.
1753 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) { in mld_v1_process_querier_timers()
1758 "%s: transition from v%d -> v%d on %p(%s)", in mld_v1_process_querier_timers()
1759 __func__, mli->mli_version, MLD_VERSION_2, in mld_v1_process_querier_timers()
1760 mli->mli_ifp, if_name(mli->mli_ifp)); in mld_v1_process_querier_timers()
1761 mli->mli_version = MLD_VERSION_2; in mld_v1_process_querier_timers()
1781 ifp = in6m->in6m_ifp; in mld_v1_transmit_report()
1786 /* ia may be NULL if link-local address is tentative. */ in mld_v1_transmit_report()
1791 ifa_free(&ia->ia_ifa); in mld_v1_transmit_report()
1798 ifa_free(&ia->ia_ifa); in mld_v1_transmit_report()
1801 mh->m_next = md; in mld_v1_transmit_report()
1809 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr); in mld_v1_transmit_report()
1810 mh->m_len = sizeof(struct ip6_hdr); in mld_v1_transmit_report()
1813 ip6->ip6_flow = 0; in mld_v1_transmit_report()
1814 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; in mld_v1_transmit_report()
1815 ip6->ip6_vfc |= IPV6_VERSION; in mld_v1_transmit_report()
1816 ip6->ip6_nxt = IPPROTO_ICMPV6; in mld_v1_transmit_report()
1817 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; in mld_v1_transmit_report()
1818 ip6->ip6_dst = in6m->in6m_addr; in mld_v1_transmit_report()
1820 md->m_len = sizeof(struct mld_hdr); in mld_v1_transmit_report()
1822 mld->mld_type = type; in mld_v1_transmit_report()
1823 mld->mld_code = 0; in mld_v1_transmit_report()
1824 mld->mld_cksum = 0; in mld_v1_transmit_report()
1825 mld->mld_maxdelay = 0; in mld_v1_transmit_report()
1826 mld->mld_reserved = 0; in mld_v1_transmit_report()
1827 mld->mld_addr = in6m->in6m_addr; in mld_v1_transmit_report()
1828 in6_clearscope(&mld->mld_addr); in mld_v1_transmit_report()
1829 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, in mld_v1_transmit_report()
1833 mh->m_flags |= M_MLDV1; in mld_v1_transmit_report()
1838 ifa_free(&ia->ia_ifa); in mld_v1_transmit_report()
1848 * has been any change between T0 (when the last state-change was issued)
1856 * If delay is non-zero, and the state change is an initial multicast
1860 * is sooner, a pending state-change timer or delay itself.
1879 if (inm->in6m_ifp == NULL) { in mld_change_state()
1885 * Try to detect if the upper layer just asked us to change state in mld_change_state()
1888 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); in mld_change_state()
1889 ifp = inm->in6m_ifma->ifma_ifp; in mld_change_state()
1896 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); in mld_change_state()
1907 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) { in mld_change_state()
1908 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__, in mld_change_state()
1909 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode); in mld_change_state()
1910 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) { in mld_change_state()
1914 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) { in mld_change_state()
1936 * MLDv2 will schedule an MLDv2 state-change report containing the
1939 * If the delay argument is non-zero, then we must delay sending the
1956 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_initial_join()
1957 inm->in6m_ifp, if_name(inm->in6m_ifp)); in mld_initial_join()
1962 ifp = inm->in6m_ifp; in mld_initial_join()
1967 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__)); in mld_initial_join()
1978 if ((ifp->if_flags & IFF_LOOPBACK) || in mld_initial_join()
1979 (mli->mli_flags & MLIF_SILENT) || in mld_initial_join()
1980 !mld_is_addr_reported(&inm->in6m_addr)) { in mld_initial_join()
1983 inm->in6m_state = MLD_SILENT_MEMBER; in mld_initial_join()
1984 inm->in6m_timer = 0; in mld_initial_join()
1992 if (mli->mli_version == MLD_VERSION_2 && in mld_initial_join()
1993 inm->in6m_state == MLD_LEAVING_MEMBER) { in mld_initial_join()
1994 inm->in6m_refcount--; in mld_initial_join()
1995 MPASS(inm->in6m_refcount > 0); in mld_initial_join()
1997 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_initial_join()
1999 switch (mli->mli_version) { in mld_initial_join()
2010 inm->in6m_timer = max(delay, odelay); in mld_initial_join()
2013 inm->in6m_state = MLD_IDLE_MEMBER; in mld_initial_join()
2019 inm->in6m_timer = odelay; in mld_initial_join()
2033 * Immediately enqueue a State-Change Report for in mld_initial_join()
2038 mq = &inm->in6m_scq; in mld_initial_join()
2041 0, 0, (mli->mli_flags & MLIF_USEALLOW)); in mld_initial_join()
2045 error = retval * -1; in mld_initial_join()
2050 * Schedule transmission of pending state-change in mld_initial_join()
2053 * giving us an opportunity to merge the reports. in mld_initial_join()
2058 KASSERT(mli->mli_rv > 1, in mld_initial_join()
2060 mli->mli_rv)); in mld_initial_join()
2061 inm->in6m_scrv = mli->mli_rv; in mld_initial_join()
2063 if (inm->in6m_sctimer > 1) { in mld_initial_join()
2064 inm->in6m_sctimer = in mld_initial_join()
2065 min(inm->in6m_sctimer, delay); in mld_initial_join()
2067 inm->in6m_sctimer = delay; in mld_initial_join()
2069 inm->in6m_sctimer = 1; in mld_initial_join()
2078 * Only update the T0 state if state change is atomic, in mld_initial_join()
2084 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_initial_join()
2085 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_initial_join()
2086 if_name(inm->in6m_ifp)); in mld_initial_join()
2093 * Issue an intermediate state change during the life-cycle.
2105 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_handle_state_change()
2106 inm->in6m_ifp, if_name(inm->in6m_ifp)); in mld_handle_state_change()
2108 ifp = inm->in6m_ifp; in mld_handle_state_change()
2113 KASSERT(mli && mli->mli_ifp == ifp, in mld_handle_state_change()
2116 if ((ifp->if_flags & IFF_LOOPBACK) || in mld_handle_state_change()
2117 (mli->mli_flags & MLIF_SILENT) || in mld_handle_state_change()
2118 !mld_is_addr_reported(&inm->in6m_addr) || in mld_handle_state_change()
2119 (mli->mli_version != MLD_VERSION_2)) { in mld_handle_state_change()
2120 if (!mld_is_addr_reported(&inm->in6m_addr)) { in mld_handle_state_change()
2126 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_handle_state_change()
2127 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_handle_state_change()
2128 if_name(inm->in6m_ifp)); in mld_handle_state_change()
2132 mbufq_drain(&inm->in6m_scq); in mld_handle_state_change()
2134 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0, in mld_handle_state_change()
2135 (mli->mli_flags & MLIF_USEALLOW)); in mld_handle_state_change()
2138 return (-retval); in mld_handle_state_change()
2141 * If record(s) were enqueued, start the state-change in mld_handle_state_change()
2144 inm->in6m_scrv = mli->mli_rv; in mld_handle_state_change()
2145 inm->in6m_sctimer = 1; in mld_handle_state_change()
2156 * MLDv2 enqueues a state-change report containing a transition
2168 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_final_leave()
2169 inm->in6m_ifp, if_name(inm->in6m_ifp)); in mld_final_leave()
2174 switch (inm->in6m_state) { in mld_final_leave()
2186 if (mli->mli_version == MLD_VERSION_1) { in mld_final_leave()
2188 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || in mld_final_leave()
2189 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) in mld_final_leave()
2196 inm->in6m_state = MLD_NOT_MEMBER; in mld_final_leave()
2198 } else if (mli->mli_version == MLD_VERSION_2) { in mld_final_leave()
2201 * Immediately enqueue a state-change report in mld_final_leave()
2203 * giving us an opportunity to merge reports. in mld_final_leave()
2205 mbufq_drain(&inm->in6m_scq); in mld_final_leave()
2206 inm->in6m_timer = 0; in mld_final_leave()
2207 inm->in6m_scrv = mli->mli_rv; in mld_final_leave()
2210 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_final_leave()
2211 if_name(inm->in6m_ifp), inm->in6m_scrv); in mld_final_leave()
2212 if (inm->in6m_scrv == 0) { in mld_final_leave()
2213 inm->in6m_state = MLD_NOT_MEMBER; in mld_final_leave()
2214 inm->in6m_sctimer = 0; in mld_final_leave()
2221 &inm->in6m_scq, inm, 1, 0, 0, in mld_final_leave()
2222 (mli->mli_flags & MLIF_USEALLOW)); in mld_final_leave()
2227 inm->in6m_state = MLD_LEAVING_MEMBER; in mld_final_leave()
2228 inm->in6m_sctimer = 1; in mld_final_leave()
2242 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_final_leave()
2243 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_final_leave()
2244 if_name(inm->in6m_ifp)); in mld_final_leave()
2245 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; in mld_final_leave()
2247 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp)); in mld_final_leave()
2253 * If is_state_change is zero, a current-state record is appended.
2254 * If is_state_change is non-zero, a state-change report is appended.
2256 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2262 * If is_source_query is non-zero, each source is checked to see if
2263 * it was recorded for a Group-Source query, and will be omitted if
2264 * it is not both in-mode and recorded.
2266 * If use_block_allow is non-zero, state change reports for initial join
2299 ifp = inm->in6m_ifp; in mld_v2_enqueue_group_record()
2310 mode = inm->in6m_st[1].iss_fmode; in mld_v2_enqueue_group_record()
2313 * If we did not transition out of ASM mode during t0->t1, in mld_v2_enqueue_group_record()
2317 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && in mld_v2_enqueue_group_record()
2318 inm->in6m_nsrc == 0) in mld_v2_enqueue_group_record()
2324 * If the mode did not change, and there are non-ASM in mld_v2_enqueue_group_record()
2338 if (mode != inm->in6m_st[0].iss_fmode) { in mld_v2_enqueue_group_record()
2382 KASSERT(inm->in6m_st[1].iss_asm == 0, in mld_v2_enqueue_group_record()
2384 __func__, inm, inm->in6m_st[1].iss_asm)); in mld_v2_enqueue_group_record()
2396 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v2_enqueue_group_record()
2397 if_name(inm->in6m_ifp)); in mld_v2_enqueue_group_record()
2412 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v2_enqueue_group_record()
2413 if_name(inm->in6m_ifp)); in mld_v2_enqueue_group_record()
2426 (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && in mld_v2_enqueue_group_record()
2427 (m0->m_pkthdr.len + minrec0len) < in mld_v2_enqueue_group_record()
2428 (ifp->if_mtu - MLD_MTUSPACE)) { in mld_v2_enqueue_group_record()
2429 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - in mld_v2_enqueue_group_record()
2437 return (-ENOMEM); in mld_v2_enqueue_group_record()
2440 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - in mld_v2_enqueue_group_record()
2447 return (-ENOMEM); in mld_v2_enqueue_group_record()
2461 mr.mr_addr = inm->in6m_addr; in mld_v2_enqueue_group_record()
2467 return (-ENOMEM); in mld_v2_enqueue_group_record()
2479 * Only append sources which are in-mode at t1. If we are in mld_v2_enqueue_group_record()
2485 * to a group-source query. in mld_v2_enqueue_group_record()
2491 md->m_len - nbytes); in mld_v2_enqueue_group_record()
2498 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, in mld_v2_enqueue_group_record()
2501 ip6_sprintf(ip6tbuf, &ims->im6s_addr)); in mld_v2_enqueue_group_record()
2510 if (is_source_query && ims->im6s_stp == 0) { in mld_v2_enqueue_group_record()
2517 (void *)&ims->im6s_addr)) { in mld_v2_enqueue_group_record()
2522 return (-ENOMEM); in mld_v2_enqueue_group_record()
2531 pmr->mr_numsrc = htons(msrcs); in mld_v2_enqueue_group_record()
2547 m->m_pkthdr.vt_nrecs = 1; in mld_v2_enqueue_group_record()
2550 m->m_pkthdr.vt_nrecs++; in mld_v2_enqueue_group_record()
2566 return (-ENOMEM); in mld_v2_enqueue_group_record()
2572 return (-ENOMEM); in mld_v2_enqueue_group_record()
2582 return (-ENOMEM); in mld_v2_enqueue_group_record()
2584 m->m_pkthdr.vt_nrecs = 1; in mld_v2_enqueue_group_record()
2587 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - in mld_v2_enqueue_group_record()
2593 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr)); in mld_v2_enqueue_group_record()
2601 if (is_source_query && ims->im6s_stp == 0) { in mld_v2_enqueue_group_record()
2608 (void *)&ims->im6s_addr)) { in mld_v2_enqueue_group_record()
2613 return (-ENOMEM); in mld_v2_enqueue_group_record()
2619 pmr->mr_numsrc = htons(msrcs); in mld_v2_enqueue_group_record()
2644 * Source list filter state is held in an RB-tree. When the filter list
2649 * As we may potentially queue two record types, and the entire R-B tree
2655 * which makes things easier on us, and it may or may not be harder on
2682 if (inm->in6m_nsrc == 0 || in mld_v2_enqueue_filter_change()
2683 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) in mld_v2_enqueue_filter_change()
2686 ifp = inm->in6m_ifp; /* interface */ in mld_v2_enqueue_filter_change()
2687 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ in mld_v2_enqueue_filter_change()
2693 nbytes = 0; /* # of bytes appended to group's state-change queue */ in mld_v2_enqueue_filter_change()
2704 * The first kind of source we encounter tells us which in mld_v2_enqueue_filter_change()
2713 (m0->m_pkthdr.vt_nrecs + 1 <= in mld_v2_enqueue_filter_change()
2715 (m0->m_pkthdr.len + MINRECLEN) < in mld_v2_enqueue_filter_change()
2716 (ifp->if_mtu - MLD_MTUSPACE)) { in mld_v2_enqueue_filter_change()
2718 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - in mld_v2_enqueue_filter_change()
2730 return (-ENOMEM); in mld_v2_enqueue_filter_change()
2732 m->m_pkthdr.vt_nrecs = 0; in mld_v2_enqueue_filter_change()
2734 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - in mld_v2_enqueue_filter_change()
2749 mr.mr_addr = inm->in6m_addr; in mld_v2_enqueue_filter_change()
2756 return (-ENOMEM); in mld_v2_enqueue_filter_change()
2761 md = m_getptr(m, npbytes - in mld_v2_enqueue_filter_change()
2769 uint8_t *) + md->m_len - in mld_v2_enqueue_filter_change()
2776 * Only report deltas in-mode at t1. in mld_v2_enqueue_filter_change()
2784 &inm->in6m_srcs); in mld_v2_enqueue_filter_change()
2788 ip6_sprintf(ip6tbuf, &ims->im6s_addr)); in mld_v2_enqueue_filter_change()
2813 (void *)&ims->im6s_addr)) { in mld_v2_enqueue_filter_change()
2818 return (-ENOMEM); in mld_v2_enqueue_filter_change()
2832 npbytes -= sizeof(struct mldv2_record); in mld_v2_enqueue_filter_change()
2839 "%s: m_adj(m, -mr)", __func__); in mld_v2_enqueue_filter_change()
2840 m_adj(m, -((int)sizeof( in mld_v2_enqueue_filter_change()
2847 pmr->mr_type = MLD_ALLOW_NEW_SOURCES; in mld_v2_enqueue_filter_change()
2849 pmr->mr_type = MLD_BLOCK_OLD_SOURCES; in mld_v2_enqueue_filter_change()
2850 pmr->mr_numsrc = htons(rsrcs); in mld_v2_enqueue_filter_change()
2855 m->m_pkthdr.vt_nrecs++; in mld_v2_enqueue_filter_change()
2874 struct mbuf *m; /* pending state-change */ in mld_v2_merge_state_changes()
2875 struct mbuf *m0; /* copy of pending state-change */ in mld_v2_merge_state_changes()
2876 struct mbuf *mt; /* last state-change in packet */ in mld_v2_merge_state_changes()
2889 * copy of each queued state-change message before merging. in mld_v2_merge_state_changes()
2891 if (inm->in6m_scrv > 0) in mld_v2_merge_state_changes()
2894 gq = &inm->in6m_scq; in mld_v2_merge_state_changes()
2917 if ((mt->m_pkthdr.vt_nrecs + in mld_v2_merge_state_changes()
2918 m->m_pkthdr.vt_nrecs <= in mld_v2_merge_state_changes()
2920 (mt->m_pkthdr.len + recslen <= in mld_v2_merge_state_changes()
2921 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) in mld_v2_merge_state_changes()
2929 mt = m->m_nextpkt; in mld_v2_merge_state_changes()
2939 m = m0->m_nextpkt; in mld_v2_merge_state_changes()
2945 m0->m_nextpkt = NULL; in mld_v2_merge_state_changes()
2946 m = m->m_nextpkt; in mld_v2_merge_state_changes()
2960 m0->m_flags &= ~M_PKTHDR; in mld_v2_merge_state_changes()
2961 mt->m_pkthdr.len += recslen; in mld_v2_merge_state_changes()
2962 mt->m_pkthdr.vt_nrecs += in mld_v2_merge_state_changes()
2963 m0->m_pkthdr.vt_nrecs; in mld_v2_merge_state_changes()
2965 mtl->m_next = m0; in mld_v2_merge_state_changes()
2987 KASSERT(mli->mli_version == MLD_VERSION_2, in mld_v2_dispatch_general_query()
2988 ("%s: called when version %d", __func__, mli->mli_version)); in mld_v2_dispatch_general_query()
2996 if (!mbufq_empty(&mli->mli_gq)) in mld_v2_dispatch_general_query()
2999 ifp = mli->mli_ifp; in mld_v2_dispatch_general_query()
3001 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_v2_dispatch_general_query()
3005 KASSERT(ifp == inm->in6m_ifp, in mld_v2_dispatch_general_query()
3008 switch (inm->in6m_state) { in mld_v2_dispatch_general_query()
3017 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v2_dispatch_general_query()
3018 retval = mld_v2_enqueue_group_record(&mli->mli_gq, in mld_v2_dispatch_general_query()
3031 mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST); in mld_v2_dispatch_general_query()
3036 if (mbufq_first(&mli->mli_gq) != NULL) { in mld_v2_dispatch_general_query()
3037 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( in mld_v2_dispatch_general_query()
3046 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3048 * a link and uses a link-scope multicast address.
3094 if (m->m_flags & M_MLDV1) { in mld_dispatch_packet()
3107 m0->m_pkthdr.rcvif = V_loif; in mld_dispatch_packet()
3111 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */ in mld_dispatch_packet()
3118 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index); in mld_dispatch_packet()
3127 type = mld->mld_type; in mld_dispatch_packet()
3156 * KAME IPv6 requires that hop-by-hop options be passed separately,
3172 KASSERT((m->m_flags & M_PKTHDR), in mld_v2_encap_report()
3186 ifa_free(&ia->ia_ifa); in mld_v2_encap_report()
3195 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report); in mld_v2_encap_report()
3196 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + in mld_v2_encap_report()
3200 ip6->ip6_flow = 0; in mld_v2_encap_report()
3201 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; in mld_v2_encap_report()
3202 ip6->ip6_vfc |= IPV6_VERSION; in mld_v2_encap_report()
3203 ip6->ip6_nxt = IPPROTO_ICMPV6; in mld_v2_encap_report()
3204 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; in mld_v2_encap_report()
3206 ifa_free(&ia->ia_ifa); in mld_v2_encap_report()
3207 ip6->ip6_dst = in6addr_linklocal_allv2routers; in mld_v2_encap_report()
3211 mld->mld_type = MLDV2_LISTENER_REPORT; in mld_v2_encap_report()
3212 mld->mld_code = 0; in mld_v2_encap_report()
3213 mld->mld_cksum = 0; in mld_v2_encap_report()
3214 mld->mld_v2_reserved = 0; in mld_v2_encap_report()
3215 mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs); in mld_v2_encap_report()
3216 m->m_pkthdr.vt_nrecs = 0; in mld_v2_encap_report()
3218 mh->m_next = m; in mld_v2_encap_report()
3219 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, in mld_v2_encap_report()