10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51676Sjpk  * Common Development and Distribution License (the "License").
61676Sjpk  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211735Skcpoon 
220Sstevel@tonic-gate /*
231676Sjpk  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/types.h>
300Sstevel@tonic-gate #include <sys/systm.h>
310Sstevel@tonic-gate #include <sys/stream.h>
320Sstevel@tonic-gate #include <sys/cmn_err.h>
330Sstevel@tonic-gate #include <sys/kmem.h>
340Sstevel@tonic-gate #define	_SUN_TPI_VERSION 2
350Sstevel@tonic-gate #include <sys/tihdr.h>
360Sstevel@tonic-gate #include <sys/socket.h>
370Sstevel@tonic-gate #include <sys/strsun.h>
380Sstevel@tonic-gate #include <sys/strsubr.h>
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include <netinet/in.h>
410Sstevel@tonic-gate #include <netinet/ip6.h>
420Sstevel@tonic-gate #include <netinet/tcp_seq.h>
430Sstevel@tonic-gate #include <netinet/sctp.h>
440Sstevel@tonic-gate 
450Sstevel@tonic-gate #include <inet/common.h>
460Sstevel@tonic-gate #include <inet/ip.h>
470Sstevel@tonic-gate #include <inet/ip6.h>
480Sstevel@tonic-gate #include <inet/mib2.h>
490Sstevel@tonic-gate #include <inet/ipclassifier.h>
500Sstevel@tonic-gate #include <inet/ipp_common.h>
510Sstevel@tonic-gate #include <inet/ipsec_impl.h>
520Sstevel@tonic-gate #include <inet/sctp_ip.h>
530Sstevel@tonic-gate 
540Sstevel@tonic-gate #include "sctp_impl.h"
550Sstevel@tonic-gate #include "sctp_asconf.h"
560Sstevel@tonic-gate #include "sctp_addr.h"
570Sstevel@tonic-gate 
580Sstevel@tonic-gate static struct kmem_cache *sctp_kmem_set_cache;
590Sstevel@tonic-gate 
600Sstevel@tonic-gate /*
610Sstevel@tonic-gate  * PR-SCTP comments.
620Sstevel@tonic-gate  *
630Sstevel@tonic-gate  * When we get a valid Forward TSN chunk, we check the fragment list for this
640Sstevel@tonic-gate  * SSN and preceeding SSNs free all them. Further, if this Forward TSN causes
650Sstevel@tonic-gate  * the next expected SSN to be present in the stream queue, we deliver any
660Sstevel@tonic-gate  * such stranded messages upstream. We also update the SACK info. appropriately.
670Sstevel@tonic-gate  * When checking for advancing the cumulative ack (in sctp_cumack()) we must
680Sstevel@tonic-gate  * check for abandoned chunks and messages. While traversing the tramsmit
690Sstevel@tonic-gate  * list if we come across an abandoned chunk, we can skip the message (i.e.
700Sstevel@tonic-gate  * take it out of the (re)transmit list) since this message, and hence this
710Sstevel@tonic-gate  * chunk, has been marked abandoned by sctp_rexmit(). If we come across an
720Sstevel@tonic-gate  * unsent chunk for a message this now abandoned we need to check if a
730Sstevel@tonic-gate  * Forward TSN needs to be sent, this could be a case where we deferred sending
740Sstevel@tonic-gate  * a Forward TSN in sctp_get_msg_to_send(). Further, after processing a
750Sstevel@tonic-gate  * SACK we check if the Advanced peer ack point can be moved ahead, i.e.
760Sstevel@tonic-gate  * if we can send a Forward TSN via sctp_check_abandoned_data().
770Sstevel@tonic-gate  */
780Sstevel@tonic-gate void
790Sstevel@tonic-gate sctp_free_set(sctp_set_t *s)
800Sstevel@tonic-gate {
810Sstevel@tonic-gate 	sctp_set_t *p;
820Sstevel@tonic-gate 
830Sstevel@tonic-gate 	while (s) {
840Sstevel@tonic-gate 		p = s->next;
850Sstevel@tonic-gate 		kmem_cache_free(sctp_kmem_set_cache, s);
860Sstevel@tonic-gate 		s = p;
870Sstevel@tonic-gate 	}
880Sstevel@tonic-gate }
890Sstevel@tonic-gate 
900Sstevel@tonic-gate static void
910Sstevel@tonic-gate sctp_ack_add(sctp_set_t **head, uint32_t tsn, int *num)
920Sstevel@tonic-gate {
930Sstevel@tonic-gate 	sctp_set_t *p, *t;
940Sstevel@tonic-gate 
950Sstevel@tonic-gate 	if (head == NULL || num == NULL)
960Sstevel@tonic-gate 		return;
970Sstevel@tonic-gate 
980Sstevel@tonic-gate 	ASSERT(*num >= 0);
990Sstevel@tonic-gate 	ASSERT((*num == 0 && *head == NULL) || (*num > 0 && *head != NULL));
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate 	if (*head == NULL) {
1020Sstevel@tonic-gate 		*head = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
1030Sstevel@tonic-gate 		if (*head == NULL)
1040Sstevel@tonic-gate 			return;
1050Sstevel@tonic-gate 		(*head)->prev = (*head)->next = NULL;
1060Sstevel@tonic-gate 		(*head)->begin = tsn;
1070Sstevel@tonic-gate 		(*head)->end = tsn;
1080Sstevel@tonic-gate 		*num = 1;
1090Sstevel@tonic-gate 		return;
1100Sstevel@tonic-gate 	}
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate 	ASSERT((*head)->prev == NULL);
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate 	/*
1150Sstevel@tonic-gate 	 * Handle this special case here so we don't have to check
1160Sstevel@tonic-gate 	 * for it each time in the loop.
1170Sstevel@tonic-gate 	 */
1180Sstevel@tonic-gate 	if (SEQ_LT(tsn + 1, (*head)->begin)) {
1190Sstevel@tonic-gate 		/* add a new set, and move the head pointer */
1200Sstevel@tonic-gate 		t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
1210Sstevel@tonic-gate 		if (t == NULL)
1220Sstevel@tonic-gate 			return;
1230Sstevel@tonic-gate 		t->next = *head;
1240Sstevel@tonic-gate 		t->prev = NULL;
1250Sstevel@tonic-gate 		(*head)->prev = t;
1260Sstevel@tonic-gate 		t->begin = tsn;
1270Sstevel@tonic-gate 		t->end = tsn;
1280Sstevel@tonic-gate 		(*num)++;
1290Sstevel@tonic-gate 		*head = t;
1300Sstevel@tonic-gate 		return;
1310Sstevel@tonic-gate 	}
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate 	/*
1340Sstevel@tonic-gate 	 * We need to handle the following cases, where p points to
1350Sstevel@tonic-gate 	 * the current set (as we walk through the loop):
1360Sstevel@tonic-gate 	 *
1370Sstevel@tonic-gate 	 * 1. tsn is entirely less than p; create a new set before p.
1380Sstevel@tonic-gate 	 * 2. tsn borders p from less; coalesce p with tsn.
1390Sstevel@tonic-gate 	 * 3. tsn is withing p; do nothing.
1400Sstevel@tonic-gate 	 * 4. tsn borders p from greater; coalesce p with tsn.
1410Sstevel@tonic-gate 	 * 4a. p may now border p->next from less; if so, coalesce those
1420Sstevel@tonic-gate 	 *    two sets.
1430Sstevel@tonic-gate 	 * 5. tsn is entirely greater then all sets; add a new set at
1440Sstevel@tonic-gate 	 *    the end.
1450Sstevel@tonic-gate 	 */
1460Sstevel@tonic-gate 	for (p = *head; ; p = p->next) {
1470Sstevel@tonic-gate 		if (SEQ_LT(tsn + 1, p->begin)) {
1480Sstevel@tonic-gate 			/* 1: add a new set before p. */
1490Sstevel@tonic-gate 			t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
1500Sstevel@tonic-gate 			if (t == NULL)
1510Sstevel@tonic-gate 				return;
1520Sstevel@tonic-gate 			t->next = p;
1530Sstevel@tonic-gate 			t->prev = NULL;
1540Sstevel@tonic-gate 			t->begin = tsn;
1550Sstevel@tonic-gate 			t->end = tsn;
1560Sstevel@tonic-gate 			if (p->prev) {
1570Sstevel@tonic-gate 				t->prev = p->prev;
1580Sstevel@tonic-gate 				p->prev->next = t;
1590Sstevel@tonic-gate 			}
1600Sstevel@tonic-gate 			p->prev = t;
1610Sstevel@tonic-gate 			(*num)++;
1620Sstevel@tonic-gate 			return;
1630Sstevel@tonic-gate 		}
1640Sstevel@tonic-gate 
1650Sstevel@tonic-gate 		if ((tsn + 1) == p->begin) {
1660Sstevel@tonic-gate 			/* 2: adjust p->begin */
1670Sstevel@tonic-gate 			p->begin = tsn;
1680Sstevel@tonic-gate 			return;
1690Sstevel@tonic-gate 		}
1700Sstevel@tonic-gate 
1710Sstevel@tonic-gate 		if (SEQ_GEQ(tsn, p->begin) && SEQ_LEQ(tsn, p->end)) {
1720Sstevel@tonic-gate 			/* 3; do nothing */
1730Sstevel@tonic-gate 			return;
1740Sstevel@tonic-gate 		}
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 		if ((p->end + 1) == tsn) {
1770Sstevel@tonic-gate 			/* 4; adjust p->end */
1780Sstevel@tonic-gate 			p->end = tsn;
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 			if (p->next != NULL && (tsn + 1) == p->next->begin) {
1810Sstevel@tonic-gate 				/* 4a: coalesce p and p->next */
1820Sstevel@tonic-gate 				t = p->next;
1830Sstevel@tonic-gate 				p->end = t->end;
1840Sstevel@tonic-gate 				p->next = t->next;
1850Sstevel@tonic-gate 				if (t->next != NULL)
1860Sstevel@tonic-gate 					t->next->prev = p;
1870Sstevel@tonic-gate 				kmem_cache_free(sctp_kmem_set_cache, t);
1880Sstevel@tonic-gate 				(*num)--;
1890Sstevel@tonic-gate 			}
1900Sstevel@tonic-gate 			return;
1910Sstevel@tonic-gate 		}
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate 		if (p->next == NULL) {
1940Sstevel@tonic-gate 			/* 5: add new set at the end */
1950Sstevel@tonic-gate 			t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
1960Sstevel@tonic-gate 			if (t == NULL)
1970Sstevel@tonic-gate 				return;
1980Sstevel@tonic-gate 			t->next = NULL;
1990Sstevel@tonic-gate 			t->prev = p;
2000Sstevel@tonic-gate 			t->begin = tsn;
2010Sstevel@tonic-gate 			t->end = tsn;
2020Sstevel@tonic-gate 			p->next = t;
2030Sstevel@tonic-gate 			(*num)++;
2040Sstevel@tonic-gate 			return;
2050Sstevel@tonic-gate 		}
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate 		if (SEQ_GT(tsn, p->end + 1))
2080Sstevel@tonic-gate 			continue;
2090Sstevel@tonic-gate 	}
2100Sstevel@tonic-gate }
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate static void
2130Sstevel@tonic-gate sctp_ack_rem(sctp_set_t **head, uint32_t end, int *num)
2140Sstevel@tonic-gate {
2150Sstevel@tonic-gate 	sctp_set_t *p, *t;
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	if (head == NULL || *head == NULL || num == NULL)
2180Sstevel@tonic-gate 		return;
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate 	/* Nothing to remove */
2210Sstevel@tonic-gate 	if (SEQ_LT(end, (*head)->begin))
2220Sstevel@tonic-gate 		return;
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 	/* Find out where to start removing sets */
2250Sstevel@tonic-gate 	for (p = *head; p->next; p = p->next) {
2260Sstevel@tonic-gate 		if (SEQ_LEQ(end, p->end))
2270Sstevel@tonic-gate 			break;
2280Sstevel@tonic-gate 	}
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 	if (SEQ_LT(end, p->end) && SEQ_GEQ(end, p->begin)) {
2310Sstevel@tonic-gate 		/* adjust p */
2320Sstevel@tonic-gate 		p->begin = end + 1;
2330Sstevel@tonic-gate 		/* all done */
2340Sstevel@tonic-gate 		if (p == *head)
2350Sstevel@tonic-gate 			return;
2360Sstevel@tonic-gate 	} else if (SEQ_GEQ(end, p->end)) {
2370Sstevel@tonic-gate 		/* remove this set too */
2380Sstevel@tonic-gate 		p = p->next;
2390Sstevel@tonic-gate 	}
2400Sstevel@tonic-gate 
2410Sstevel@tonic-gate 	/* unlink everything before this set */
2420Sstevel@tonic-gate 	t = *head;
2430Sstevel@tonic-gate 	*head = p;
2440Sstevel@tonic-gate 	if (p != NULL && p->prev != NULL) {
2450Sstevel@tonic-gate 		p->prev->next = NULL;
2460Sstevel@tonic-gate 		p->prev = NULL;
2470Sstevel@tonic-gate 	}
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 	sctp_free_set(t);
2500Sstevel@tonic-gate 
2510Sstevel@tonic-gate 	/* recount the number of sets */
2520Sstevel@tonic-gate 	*num = 0;
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate 	for (p = *head; p != NULL; p = p->next)
2550Sstevel@tonic-gate 		(*num)++;
2560Sstevel@tonic-gate }
2570Sstevel@tonic-gate 
2580Sstevel@tonic-gate void
2590Sstevel@tonic-gate sctp_sets_init()
2600Sstevel@tonic-gate {
2610Sstevel@tonic-gate 	sctp_kmem_set_cache = kmem_cache_create("sctp_set_cache",
2620Sstevel@tonic-gate 	    sizeof (sctp_set_t), 0, NULL, NULL, NULL, NULL,
2630Sstevel@tonic-gate 	    NULL, 0);
2640Sstevel@tonic-gate }
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate void
2670Sstevel@tonic-gate sctp_sets_fini()
2680Sstevel@tonic-gate {
2690Sstevel@tonic-gate 	kmem_cache_destroy(sctp_kmem_set_cache);
2700Sstevel@tonic-gate }
2710Sstevel@tonic-gate 
2720Sstevel@tonic-gate sctp_chunk_hdr_t *
2730Sstevel@tonic-gate sctp_first_chunk(uchar_t *rptr, ssize_t remaining)
2740Sstevel@tonic-gate {
2750Sstevel@tonic-gate 	sctp_chunk_hdr_t *ch;
2760Sstevel@tonic-gate 	uint16_t ch_len;
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	if (remaining < sizeof (*ch)) {
2790Sstevel@tonic-gate 		return (NULL);
2800Sstevel@tonic-gate 	}
2810Sstevel@tonic-gate 
2820Sstevel@tonic-gate 	ch = (sctp_chunk_hdr_t *)rptr;
2830Sstevel@tonic-gate 	ch_len = ntohs(ch->sch_len);
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate 	if (ch_len < sizeof (*ch) || remaining < ch_len) {
2860Sstevel@tonic-gate 		return (NULL);
2870Sstevel@tonic-gate 	}
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate 	return (ch);
2900Sstevel@tonic-gate }
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate sctp_chunk_hdr_t *
2930Sstevel@tonic-gate sctp_next_chunk(sctp_chunk_hdr_t *ch, ssize_t *remaining)
2940Sstevel@tonic-gate {
2950Sstevel@tonic-gate 	int pad;
2960Sstevel@tonic-gate 	uint16_t ch_len;
2970Sstevel@tonic-gate 
2980Sstevel@tonic-gate 	if (!ch) {
2990Sstevel@tonic-gate 		return (NULL);
3000Sstevel@tonic-gate 	}
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 	ch_len = ntohs(ch->sch_len);
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate 	if ((pad = ch_len & (SCTP_ALIGN - 1)) != 0) {
3050Sstevel@tonic-gate 		pad = SCTP_ALIGN - pad;
3060Sstevel@tonic-gate 	}
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate 	*remaining -= (ch_len + pad);
3090Sstevel@tonic-gate 	ch = (sctp_chunk_hdr_t *)((char *)ch + ch_len + pad);
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	return (sctp_first_chunk((uchar_t *)ch, *remaining));
3120Sstevel@tonic-gate }
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate /*
3150Sstevel@tonic-gate  * Attach ancillary data to a received SCTP segments.
3160Sstevel@tonic-gate  * If the source address (fp) is not the primary, send up a
3170Sstevel@tonic-gate  * unitdata_ind so recvfrom() can populate the msg_name field.
3180Sstevel@tonic-gate  * If ancillary data is also requested, we append it to the
3190Sstevel@tonic-gate  * unitdata_req. Otherwise, we just send up an optdata_ind.
3200Sstevel@tonic-gate  */
3210Sstevel@tonic-gate static int
3220Sstevel@tonic-gate sctp_input_add_ancillary(sctp_t *sctp, mblk_t **mp, sctp_data_hdr_t *dcp,
3230Sstevel@tonic-gate     sctp_faddr_t *fp, ip6_pkt_t *ipp)
3240Sstevel@tonic-gate {
3250Sstevel@tonic-gate 	struct T_unitdata_ind	*tudi;
3260Sstevel@tonic-gate 	int			optlen;
3270Sstevel@tonic-gate 	int			hdrlen;
3280Sstevel@tonic-gate 	uchar_t			*optptr;
3290Sstevel@tonic-gate 	struct cmsghdr		*cmsg;
3300Sstevel@tonic-gate 	mblk_t			*mp1;
3310Sstevel@tonic-gate 	struct sockaddr_in6	sin_buf[1];
3320Sstevel@tonic-gate 	struct sockaddr_in6	*sin6;
3330Sstevel@tonic-gate 	struct sockaddr_in	*sin4;
3340Sstevel@tonic-gate 	uint_t			addflag = 0;
3350Sstevel@tonic-gate 
3360Sstevel@tonic-gate 	sin4 = NULL;
3370Sstevel@tonic-gate 	sin6 = NULL;
3380Sstevel@tonic-gate 
3390Sstevel@tonic-gate 	optlen = hdrlen = 0;
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate 	/* Figure out address size */
3420Sstevel@tonic-gate 	if (sctp->sctp_ipversion == IPV4_VERSION) {
3430Sstevel@tonic-gate 		sin4 = (struct sockaddr_in *)sin_buf;
3440Sstevel@tonic-gate 		sin4->sin_family = AF_INET;
3450Sstevel@tonic-gate 		sin4->sin_port = sctp->sctp_fport;
3460Sstevel@tonic-gate 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr);
3470Sstevel@tonic-gate 		hdrlen = sizeof (*tudi) + sizeof (*sin4);
3480Sstevel@tonic-gate 	} else {
3490Sstevel@tonic-gate 		sin6 = sin_buf;
3500Sstevel@tonic-gate 		sin6->sin6_family = AF_INET6;
3510Sstevel@tonic-gate 		sin6->sin6_port = sctp->sctp_fport;
3520Sstevel@tonic-gate 		sin6->sin6_addr = fp->faddr;
3530Sstevel@tonic-gate 		hdrlen = sizeof (*tudi) + sizeof (*sin6);
3540Sstevel@tonic-gate 	}
3550Sstevel@tonic-gate 
3560Sstevel@tonic-gate 	/* If app asked to receive send / recv info */
3570Sstevel@tonic-gate 	if (sctp->sctp_recvsndrcvinfo) {
3580Sstevel@tonic-gate 		optlen += sizeof (*cmsg) + sizeof (struct sctp_sndrcvinfo);
3590Sstevel@tonic-gate 		if (hdrlen == 0)
3600Sstevel@tonic-gate 			hdrlen = sizeof (struct T_optdata_ind);
3610Sstevel@tonic-gate 	}
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 	if (sctp->sctp_ipv6_recvancillary == 0)
3640Sstevel@tonic-gate 		goto noancillary;
3650Sstevel@tonic-gate 
3660Sstevel@tonic-gate 	if ((ipp->ipp_fields & IPPF_IFINDEX) &&
3670Sstevel@tonic-gate 	    ipp->ipp_ifindex != sctp->sctp_recvifindex &&
3680Sstevel@tonic-gate 	    (sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVPKTINFO)) {
3690Sstevel@tonic-gate 		optlen += sizeof (*cmsg) + sizeof (struct in6_pktinfo);
3700Sstevel@tonic-gate 		if (hdrlen == 0)
3710Sstevel@tonic-gate 			hdrlen = sizeof (struct T_unitdata_ind);
3720Sstevel@tonic-gate 		addflag |= SCTP_IPV6_RECVPKTINFO;
3730Sstevel@tonic-gate 	}
3740Sstevel@tonic-gate 	/* If app asked for hoplimit and it has changed ... */
3750Sstevel@tonic-gate 	if ((ipp->ipp_fields & IPPF_HOPLIMIT) &&
3760Sstevel@tonic-gate 	    ipp->ipp_hoplimit != sctp->sctp_recvhops &&
3770Sstevel@tonic-gate 	    (sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVHOPLIMIT)) {
3780Sstevel@tonic-gate 		optlen += sizeof (*cmsg) + sizeof (uint_t);
3790Sstevel@tonic-gate 		if (hdrlen == 0)
3800Sstevel@tonic-gate 			hdrlen = sizeof (struct T_unitdata_ind);
3810Sstevel@tonic-gate 		addflag |= SCTP_IPV6_RECVHOPLIMIT;
3820Sstevel@tonic-gate 	}
3830Sstevel@tonic-gate 	/* If app asked for hopbyhop headers and it has changed ... */
3840Sstevel@tonic-gate 	if ((sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVHOPOPTS) &&
3851676Sjpk 	    ip_cmpbuf(sctp->sctp_hopopts, sctp->sctp_hopoptslen,
3860Sstevel@tonic-gate 		(ipp->ipp_fields & IPPF_HOPOPTS),
3870Sstevel@tonic-gate 		ipp->ipp_hopopts, ipp->ipp_hopoptslen)) {
3881676Sjpk 		optlen += sizeof (*cmsg) + ipp->ipp_hopoptslen -
3891676Sjpk 		    sctp->sctp_v6label_len;
3900Sstevel@tonic-gate 		if (hdrlen == 0)
3910Sstevel@tonic-gate 			hdrlen = sizeof (struct T_unitdata_ind);
3920Sstevel@tonic-gate 		addflag |= SCTP_IPV6_RECVHOPOPTS;
3931676Sjpk 		if (!ip_allocbuf((void **)&sctp->sctp_hopopts,
3940Sstevel@tonic-gate 		    &sctp->sctp_hopoptslen,
3950Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_HOPOPTS),
3960Sstevel@tonic-gate 		    ipp->ipp_hopopts, ipp->ipp_hopoptslen))
3970Sstevel@tonic-gate 			return (-1);
3980Sstevel@tonic-gate 	}
3990Sstevel@tonic-gate 	/* If app asked for dst headers before routing headers ... */
4000Sstevel@tonic-gate 	if ((sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVRTDSTOPTS) &&
4011676Sjpk 	    ip_cmpbuf(sctp->sctp_rtdstopts, sctp->sctp_rtdstoptslen,
4020Sstevel@tonic-gate 		(ipp->ipp_fields & IPPF_RTDSTOPTS),
4030Sstevel@tonic-gate 		ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) {
4040Sstevel@tonic-gate 		optlen += sizeof (*cmsg) + ipp->ipp_rtdstoptslen;
4050Sstevel@tonic-gate 		if (hdrlen == 0)
4060Sstevel@tonic-gate 			hdrlen = sizeof (struct T_unitdata_ind);
4070Sstevel@tonic-gate 		addflag |= SCTP_IPV6_RECVRTDSTOPTS;
4081676Sjpk 		if (!ip_allocbuf((void **)&sctp->sctp_rtdstopts,
4090Sstevel@tonic-gate 		    &sctp->sctp_rtdstoptslen,
4100Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_RTDSTOPTS),
4110Sstevel@tonic-gate 		    ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen))
4120Sstevel@tonic-gate 			return (-1);
4130Sstevel@tonic-gate 	}
4140Sstevel@tonic-gate 	/* If app asked for routing headers and it has changed ... */
4150Sstevel@tonic-gate 	if (sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVRTHDR) {
4161676Sjpk 		if (ip_cmpbuf(sctp->sctp_rthdr, sctp->sctp_rthdrlen,
4170Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_RTHDR),
4180Sstevel@tonic-gate 		    ipp->ipp_rthdr, ipp->ipp_rthdrlen)) {
4190Sstevel@tonic-gate 			optlen += sizeof (*cmsg) + ipp->ipp_rthdrlen;
4200Sstevel@tonic-gate 			if (hdrlen == 0)
4210Sstevel@tonic-gate 				hdrlen = sizeof (struct T_unitdata_ind);
4220Sstevel@tonic-gate 			addflag |= SCTP_IPV6_RECVRTHDR;
4231676Sjpk 			if (!ip_allocbuf((void **)&sctp->sctp_rthdr,
4240Sstevel@tonic-gate 			    &sctp->sctp_rthdrlen,
4250Sstevel@tonic-gate 			    (ipp->ipp_fields & IPPF_RTHDR),
4260Sstevel@tonic-gate 			    ipp->ipp_rthdr, ipp->ipp_rthdrlen))
4270Sstevel@tonic-gate 				return (-1);
4280Sstevel@tonic-gate 		}
4290Sstevel@tonic-gate 	}
4300Sstevel@tonic-gate 	/* If app asked for dest headers and it has changed ... */
4310Sstevel@tonic-gate 	if ((sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVDSTOPTS) &&
4321676Sjpk 	    ip_cmpbuf(sctp->sctp_dstopts, sctp->sctp_dstoptslen,
4330Sstevel@tonic-gate 		(ipp->ipp_fields & IPPF_DSTOPTS),
4340Sstevel@tonic-gate 		ipp->ipp_dstopts, ipp->ipp_dstoptslen)) {
4350Sstevel@tonic-gate 		optlen += sizeof (*cmsg) + ipp->ipp_dstoptslen;
4360Sstevel@tonic-gate 		if (hdrlen == 0)
4370Sstevel@tonic-gate 			hdrlen = sizeof (struct T_unitdata_ind);
4380Sstevel@tonic-gate 		addflag |= SCTP_IPV6_RECVDSTOPTS;
4391676Sjpk 		if (!ip_allocbuf((void **)&sctp->sctp_dstopts,
4400Sstevel@tonic-gate 		    &sctp->sctp_dstoptslen,
4410Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_DSTOPTS),
4420Sstevel@tonic-gate 		    ipp->ipp_dstopts, ipp->ipp_dstoptslen))
4430Sstevel@tonic-gate 			return (-1);
4440Sstevel@tonic-gate 	}
4450Sstevel@tonic-gate noancillary:
4460Sstevel@tonic-gate 	/* Nothing to add */
4470Sstevel@tonic-gate 	if (hdrlen == 0)
4480Sstevel@tonic-gate 		return (-1);
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	mp1 = allocb(hdrlen + optlen + sizeof (void *), BPRI_MED);
4510Sstevel@tonic-gate 	if (mp1 == NULL)
4520Sstevel@tonic-gate 		return (-1);
4530Sstevel@tonic-gate 	mp1->b_cont = *mp;
4540Sstevel@tonic-gate 	*mp = mp1;
4550Sstevel@tonic-gate 	mp1->b_rptr += sizeof (void *);  /* pointer worth of padding */
4560Sstevel@tonic-gate 	mp1->b_wptr = mp1->b_rptr + hdrlen + optlen;
4570Sstevel@tonic-gate 	DB_TYPE(mp1) = M_PROTO;
4580Sstevel@tonic-gate 	tudi = (struct T_unitdata_ind *)mp1->b_rptr;
4590Sstevel@tonic-gate 	tudi->PRIM_type = T_UNITDATA_IND;
4600Sstevel@tonic-gate 	tudi->SRC_length = sin4 ? sizeof (*sin4) : sizeof (*sin6);
4610Sstevel@tonic-gate 	tudi->SRC_offset = sizeof (*tudi);
4620Sstevel@tonic-gate 	tudi->OPT_offset = sizeof (*tudi) + tudi->SRC_length;
4630Sstevel@tonic-gate 	tudi->OPT_length = optlen;
4640Sstevel@tonic-gate 	if (sin4) {
4650Sstevel@tonic-gate 		bcopy(sin4, tudi + 1, sizeof (*sin4));
4660Sstevel@tonic-gate 	} else {
4670Sstevel@tonic-gate 		bcopy(sin6, tudi + 1, sizeof (*sin6));
4680Sstevel@tonic-gate 	}
4690Sstevel@tonic-gate 	optptr = (uchar_t *)tudi + tudi->OPT_offset;
4700Sstevel@tonic-gate 
4710Sstevel@tonic-gate 	if (sctp->sctp_recvsndrcvinfo) {
4720Sstevel@tonic-gate 		/* XXX need backout method if memory allocation fails. */
4730Sstevel@tonic-gate 		struct sctp_sndrcvinfo *sri;
4740Sstevel@tonic-gate 
4750Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
4760Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_SCTP;
4770Sstevel@tonic-gate 		cmsg->cmsg_type = SCTP_SNDRCV;
4780Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*sri);
4790Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate 		sri = (struct sctp_sndrcvinfo *)(cmsg + 1);
4820Sstevel@tonic-gate 		ASSERT(OK_32PTR(sri));
4830Sstevel@tonic-gate 		sri->sinfo_stream = ntohs(dcp->sdh_sid);
4840Sstevel@tonic-gate 		sri->sinfo_ssn = ntohs(dcp->sdh_ssn);
4850Sstevel@tonic-gate 		if (SCTP_DATA_GET_UBIT(dcp)) {
4860Sstevel@tonic-gate 			sri->sinfo_flags = MSG_UNORDERED;
4870Sstevel@tonic-gate 		} else {
4880Sstevel@tonic-gate 			sri->sinfo_flags = 0;
4890Sstevel@tonic-gate 		}
4900Sstevel@tonic-gate 		sri->sinfo_ppid = dcp->sdh_payload_id;
4910Sstevel@tonic-gate 		sri->sinfo_context = 0;
4920Sstevel@tonic-gate 		sri->sinfo_timetolive = 0;
4930Sstevel@tonic-gate 		sri->sinfo_tsn = ntohl(dcp->sdh_tsn);
4940Sstevel@tonic-gate 		sri->sinfo_cumtsn = sctp->sctp_ftsn;
4950Sstevel@tonic-gate 		sri->sinfo_assoc_id = 0;
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate 		optptr += sizeof (*sri);
4980Sstevel@tonic-gate 	}
4990Sstevel@tonic-gate 
5000Sstevel@tonic-gate 	/*
5010Sstevel@tonic-gate 	 * If app asked for pktinfo and the index has changed ...
5020Sstevel@tonic-gate 	 * Note that the local address never changes for the connection.
5030Sstevel@tonic-gate 	 */
5040Sstevel@tonic-gate 	if (addflag & SCTP_IPV6_RECVPKTINFO) {
5050Sstevel@tonic-gate 		struct in6_pktinfo *pkti;
5060Sstevel@tonic-gate 
5070Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
5080Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_IPV6;
5090Sstevel@tonic-gate 		cmsg->cmsg_type = IPV6_PKTINFO;
5100Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*pkti);
5110Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
5120Sstevel@tonic-gate 
5130Sstevel@tonic-gate 		pkti = (struct in6_pktinfo *)optptr;
5140Sstevel@tonic-gate 		if (sctp->sctp_ipversion == IPV6_VERSION)
5150Sstevel@tonic-gate 			pkti->ipi6_addr = sctp->sctp_ip6h->ip6_src;
5160Sstevel@tonic-gate 		else
5170Sstevel@tonic-gate 			IN6_IPADDR_TO_V4MAPPED(sctp->sctp_ipha->ipha_src,
5180Sstevel@tonic-gate 			    &pkti->ipi6_addr);
5190Sstevel@tonic-gate 		pkti->ipi6_ifindex = ipp->ipp_ifindex;
5200Sstevel@tonic-gate 		optptr += sizeof (*pkti);
5210Sstevel@tonic-gate 		ASSERT(OK_32PTR(optptr));
5220Sstevel@tonic-gate 		/* Save as "last" value */
5230Sstevel@tonic-gate 		sctp->sctp_recvifindex = ipp->ipp_ifindex;
5240Sstevel@tonic-gate 	}
5250Sstevel@tonic-gate 	/* If app asked for hoplimit and it has changed ... */
5260Sstevel@tonic-gate 	if (addflag & SCTP_IPV6_RECVHOPLIMIT) {
5270Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
5280Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_IPV6;
5290Sstevel@tonic-gate 		cmsg->cmsg_type = IPV6_HOPLIMIT;
5300Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t);
5310Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
5320Sstevel@tonic-gate 
5330Sstevel@tonic-gate 		*(uint_t *)optptr = ipp->ipp_hoplimit;
5340Sstevel@tonic-gate 		optptr += sizeof (uint_t);
5350Sstevel@tonic-gate 		ASSERT(OK_32PTR(optptr));
5360Sstevel@tonic-gate 		/* Save as "last" value */
5370Sstevel@tonic-gate 		sctp->sctp_recvhops = ipp->ipp_hoplimit;
5380Sstevel@tonic-gate 	}
5390Sstevel@tonic-gate 	if (addflag & SCTP_IPV6_RECVHOPOPTS) {
5400Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
5410Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_IPV6;
5420Sstevel@tonic-gate 		cmsg->cmsg_type = IPV6_HOPOPTS;
5430Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_hopoptslen;
5440Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
5450Sstevel@tonic-gate 
5460Sstevel@tonic-gate 		bcopy(ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen);
5470Sstevel@tonic-gate 		optptr += ipp->ipp_hopoptslen;
5480Sstevel@tonic-gate 		ASSERT(OK_32PTR(optptr));
5490Sstevel@tonic-gate 		/* Save as last value */
5501676Sjpk 		ip_savebuf((void **)&sctp->sctp_hopopts,
5510Sstevel@tonic-gate 		    &sctp->sctp_hopoptslen,
5520Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_HOPOPTS),
5530Sstevel@tonic-gate 		    ipp->ipp_hopopts, ipp->ipp_hopoptslen);
5540Sstevel@tonic-gate 	}
5550Sstevel@tonic-gate 	if (addflag & SCTP_IPV6_RECVRTDSTOPTS) {
5560Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
5570Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_IPV6;
5580Sstevel@tonic-gate 		cmsg->cmsg_type = IPV6_RTHDRDSTOPTS;
5590Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rtdstoptslen;
5600Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
5610Sstevel@tonic-gate 
5620Sstevel@tonic-gate 		bcopy(ipp->ipp_rtdstopts, optptr, ipp->ipp_rtdstoptslen);
5630Sstevel@tonic-gate 		optptr += ipp->ipp_rtdstoptslen;
5640Sstevel@tonic-gate 		ASSERT(OK_32PTR(optptr));
5650Sstevel@tonic-gate 		/* Save as last value */
5661676Sjpk 		ip_savebuf((void **)&sctp->sctp_rtdstopts,
5670Sstevel@tonic-gate 		    &sctp->sctp_rtdstoptslen,
5680Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_RTDSTOPTS),
5690Sstevel@tonic-gate 		    ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen);
5700Sstevel@tonic-gate 	}
5710Sstevel@tonic-gate 	if (addflag & SCTP_IPV6_RECVRTHDR) {
5720Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
5730Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_IPV6;
5740Sstevel@tonic-gate 		cmsg->cmsg_type = IPV6_RTHDR;
5750Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrlen;
5760Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate 		bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen);
5790Sstevel@tonic-gate 		optptr += ipp->ipp_rthdrlen;
5800Sstevel@tonic-gate 		ASSERT(OK_32PTR(optptr));
5810Sstevel@tonic-gate 		/* Save as last value */
5821676Sjpk 		ip_savebuf((void **)&sctp->sctp_rthdr,
5830Sstevel@tonic-gate 		    &sctp->sctp_rthdrlen,
5840Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_RTHDR),
5850Sstevel@tonic-gate 		    ipp->ipp_rthdr, ipp->ipp_rthdrlen);
5860Sstevel@tonic-gate 	}
5870Sstevel@tonic-gate 	if (addflag & SCTP_IPV6_RECVDSTOPTS) {
5880Sstevel@tonic-gate 		cmsg = (struct cmsghdr *)optptr;
5890Sstevel@tonic-gate 		cmsg->cmsg_level = IPPROTO_IPV6;
5900Sstevel@tonic-gate 		cmsg->cmsg_type = IPV6_DSTOPTS;
5910Sstevel@tonic-gate 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_dstoptslen;
5920Sstevel@tonic-gate 		optptr += sizeof (*cmsg);
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 		bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen);
5950Sstevel@tonic-gate 		optptr += ipp->ipp_dstoptslen;
5960Sstevel@tonic-gate 		ASSERT(OK_32PTR(optptr));
5970Sstevel@tonic-gate 		/* Save as last value */
5981676Sjpk 		ip_savebuf((void **)&sctp->sctp_dstopts,
5990Sstevel@tonic-gate 		    &sctp->sctp_dstoptslen,
6000Sstevel@tonic-gate 		    (ipp->ipp_fields & IPPF_DSTOPTS),
6010Sstevel@tonic-gate 		    ipp->ipp_dstopts, ipp->ipp_dstoptslen);
6020Sstevel@tonic-gate 	}
6030Sstevel@tonic-gate 
6040Sstevel@tonic-gate 	ASSERT(optptr == mp1->b_wptr);
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 	return (0);
6070Sstevel@tonic-gate }
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate void
6100Sstevel@tonic-gate sctp_free_reass(sctp_instr_t *sip)
6110Sstevel@tonic-gate {
6120Sstevel@tonic-gate 	mblk_t *mp, *mpnext, *mctl;
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 	for (mp = sip->istr_reass; mp != NULL; mp = mpnext) {
6150Sstevel@tonic-gate 		mpnext = mp->b_next;
6160Sstevel@tonic-gate 		mp->b_next = NULL;
6170Sstevel@tonic-gate 		mp->b_prev = NULL;
6180Sstevel@tonic-gate 		if (DB_TYPE(mp) == M_CTL) {
6190Sstevel@tonic-gate 			mctl = mp;
6200Sstevel@tonic-gate 			ASSERT(mp->b_cont != NULL);
6210Sstevel@tonic-gate 			mp = mp->b_cont;
6220Sstevel@tonic-gate 			mctl->b_cont = NULL;
6230Sstevel@tonic-gate 			freeb(mctl);
6240Sstevel@tonic-gate 		}
6250Sstevel@tonic-gate 		freemsg(mp);
6260Sstevel@tonic-gate 	}
6270Sstevel@tonic-gate }
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate /*
6300Sstevel@tonic-gate  * If the series of data fragments of which dmp is a part is successfully
6310Sstevel@tonic-gate  * reassembled, the first mblk in the series is returned. dc is adjusted
6320Sstevel@tonic-gate  * to point at the data chunk in the lead mblk, and b_rptr also points to
6330Sstevel@tonic-gate  * the data chunk; the following mblk's b_rptr's point at the actual payload.
6340Sstevel@tonic-gate  *
6350Sstevel@tonic-gate  * If the series is not yet reassembled, NULL is returned. dc is not changed.
6360Sstevel@tonic-gate  * XXX should probably move this up into the state machine.
6370Sstevel@tonic-gate  */
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate /* Fragment list for un-ordered messages. Partial delivery is not supported */
6400Sstevel@tonic-gate static mblk_t *
6410Sstevel@tonic-gate sctp_uodata_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc)
6420Sstevel@tonic-gate {
6430Sstevel@tonic-gate 	mblk_t		*hmp;
6440Sstevel@tonic-gate 	mblk_t		*begin = NULL;
6450Sstevel@tonic-gate 	mblk_t		*end = NULL;
6460Sstevel@tonic-gate 	sctp_data_hdr_t	*qdc;
6470Sstevel@tonic-gate 	uint32_t	ntsn;
6480Sstevel@tonic-gate 	uint32_t	tsn = ntohl((*dc)->sdh_tsn);
6490Sstevel@tonic-gate #ifdef	DEBUG
6500Sstevel@tonic-gate 	mblk_t		*mp1;
6510Sstevel@tonic-gate #endif
6520Sstevel@tonic-gate 
6530Sstevel@tonic-gate 	/* First frag. */
6540Sstevel@tonic-gate 	if (sctp->sctp_uo_frags == NULL) {
6550Sstevel@tonic-gate 		sctp->sctp_uo_frags = dmp;
6560Sstevel@tonic-gate 		return (NULL);
6570Sstevel@tonic-gate 	}
6580Sstevel@tonic-gate 	hmp = sctp->sctp_uo_frags;
6590Sstevel@tonic-gate 	/*
6600Sstevel@tonic-gate 	 * Insert the segment according to the TSN, fragmented unordered
6610Sstevel@tonic-gate 	 * chunks are sequenced by TSN.
6620Sstevel@tonic-gate 	 */
6630Sstevel@tonic-gate 	while (hmp != NULL) {
6640Sstevel@tonic-gate 		qdc = (sctp_data_hdr_t *)hmp->b_rptr;
6650Sstevel@tonic-gate 		ntsn = ntohl(qdc->sdh_tsn);
6660Sstevel@tonic-gate 		if (SEQ_GT(ntsn, tsn)) {
6670Sstevel@tonic-gate 			if (hmp->b_prev == NULL) {
6680Sstevel@tonic-gate 				dmp->b_next = hmp;
6690Sstevel@tonic-gate 				hmp->b_prev = dmp;
6700Sstevel@tonic-gate 				sctp->sctp_uo_frags = dmp;
6710Sstevel@tonic-gate 			} else {
6720Sstevel@tonic-gate 				dmp->b_next = hmp;
6730Sstevel@tonic-gate 				dmp->b_prev = hmp->b_prev;
6740Sstevel@tonic-gate 				hmp->b_prev->b_next = dmp;
6750Sstevel@tonic-gate 				hmp->b_prev = dmp;
6760Sstevel@tonic-gate 			}
6770Sstevel@tonic-gate 			break;
6780Sstevel@tonic-gate 		}
6790Sstevel@tonic-gate 		if (hmp->b_next == NULL) {
6800Sstevel@tonic-gate 			hmp->b_next = dmp;
6810Sstevel@tonic-gate 			dmp->b_prev = hmp;
6820Sstevel@tonic-gate 			break;
6830Sstevel@tonic-gate 		}
6840Sstevel@tonic-gate 		hmp = hmp->b_next;
6850Sstevel@tonic-gate 	}
6860Sstevel@tonic-gate 	/* check if we completed a msg */
6870Sstevel@tonic-gate 	if (SCTP_DATA_GET_BBIT(*dc)) {
6880Sstevel@tonic-gate 		begin = dmp;
6890Sstevel@tonic-gate 	} else if (SCTP_DATA_GET_EBIT(*dc)) {
6900Sstevel@tonic-gate 		end = dmp;
6910Sstevel@tonic-gate 	}
6920Sstevel@tonic-gate 	/*
6930Sstevel@tonic-gate 	 * We walk consecutive TSNs backwards till we get a seg. with
6940Sstevel@tonic-gate 	 * the B bit
6950Sstevel@tonic-gate 	 */
6960Sstevel@tonic-gate 	if (begin == NULL) {
6970Sstevel@tonic-gate 		for (hmp = dmp->b_prev; hmp != NULL; hmp = hmp->b_prev) {
6980Sstevel@tonic-gate 			qdc = (sctp_data_hdr_t *)hmp->b_rptr;
6990Sstevel@tonic-gate 			ntsn = ntohl(qdc->sdh_tsn);
7000Sstevel@tonic-gate 			if ((int32_t)(tsn - ntsn) > 1) {
7010Sstevel@tonic-gate 				return (NULL);
7020Sstevel@tonic-gate 			}
7030Sstevel@tonic-gate 			if (SCTP_DATA_GET_BBIT(qdc)) {
7040Sstevel@tonic-gate 				begin = hmp;
7050Sstevel@tonic-gate 				break;
7060Sstevel@tonic-gate 			}
7070Sstevel@tonic-gate 			tsn = ntsn;
7080Sstevel@tonic-gate 		}
7090Sstevel@tonic-gate 	}
7100Sstevel@tonic-gate 	tsn = ntohl((*dc)->sdh_tsn);
7110Sstevel@tonic-gate 	/*
7120Sstevel@tonic-gate 	 * We walk consecutive TSNs till we get a seg. with the E bit
7130Sstevel@tonic-gate 	 */
7140Sstevel@tonic-gate 	if (end == NULL) {
7150Sstevel@tonic-gate 		for (hmp = dmp->b_next; hmp != NULL; hmp = hmp->b_next) {
7160Sstevel@tonic-gate 			qdc = (sctp_data_hdr_t *)hmp->b_rptr;
7170Sstevel@tonic-gate 			ntsn = ntohl(qdc->sdh_tsn);
7180Sstevel@tonic-gate 			if ((int32_t)(ntsn - tsn) > 1) {
7190Sstevel@tonic-gate 				return (NULL);
7200Sstevel@tonic-gate 			}
7210Sstevel@tonic-gate 			if (SCTP_DATA_GET_EBIT(qdc)) {
7220Sstevel@tonic-gate 				end = hmp;
7230Sstevel@tonic-gate 				break;
7240Sstevel@tonic-gate 			}
7250Sstevel@tonic-gate 			tsn = ntsn;
7260Sstevel@tonic-gate 		}
7270Sstevel@tonic-gate 	}
7280Sstevel@tonic-gate 	if (begin == NULL || end == NULL) {
7290Sstevel@tonic-gate 		return (NULL);
7300Sstevel@tonic-gate 	}
7310Sstevel@tonic-gate 	/* Got one!, Remove the msg from the list */
7320Sstevel@tonic-gate 	if (sctp->sctp_uo_frags == begin) {
7330Sstevel@tonic-gate 		ASSERT(begin->b_prev == NULL);
7340Sstevel@tonic-gate 		sctp->sctp_uo_frags = end->b_next;
7350Sstevel@tonic-gate 		if (end->b_next != NULL)
7360Sstevel@tonic-gate 			end->b_next->b_prev = NULL;
7370Sstevel@tonic-gate 	} else {
7380Sstevel@tonic-gate 		begin->b_prev->b_next = end->b_next;
7390Sstevel@tonic-gate 		if (end->b_next != NULL)
7400Sstevel@tonic-gate 			end->b_next->b_prev = begin->b_prev;
7410Sstevel@tonic-gate 	}
7420Sstevel@tonic-gate 	begin->b_prev = NULL;
7430Sstevel@tonic-gate 	end->b_next = NULL;
7440Sstevel@tonic-gate 
7450Sstevel@tonic-gate 	/*
7460Sstevel@tonic-gate 	 * Null out b_next and b_prev and chain using b_cont.
7470Sstevel@tonic-gate 	 */
7480Sstevel@tonic-gate 	dmp = end = begin;
7490Sstevel@tonic-gate 	hmp = begin->b_next;
7500Sstevel@tonic-gate 	*dc = (sctp_data_hdr_t *)begin->b_rptr;
7510Sstevel@tonic-gate 	begin->b_next = NULL;
7520Sstevel@tonic-gate 	while (hmp != NULL) {
7530Sstevel@tonic-gate 		qdc = (sctp_data_hdr_t *)hmp->b_rptr;
7540Sstevel@tonic-gate 		hmp->b_rptr = (uchar_t *)(qdc + 1);
7550Sstevel@tonic-gate 		end = hmp->b_next;
7560Sstevel@tonic-gate 		dmp->b_cont = hmp;
7570Sstevel@tonic-gate 		dmp = hmp;
7580Sstevel@tonic-gate 
7590Sstevel@tonic-gate 		if (end != NULL)
7600Sstevel@tonic-gate 			hmp->b_next = NULL;
7610Sstevel@tonic-gate 		hmp->b_prev = NULL;
7620Sstevel@tonic-gate 		hmp = end;
7630Sstevel@tonic-gate 	}
7640Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_reassmsgs);
7650Sstevel@tonic-gate #ifdef	DEBUG
7660Sstevel@tonic-gate 	mp1 = begin;
7670Sstevel@tonic-gate 	while (mp1 != NULL) {
7680Sstevel@tonic-gate 		ASSERT(mp1->b_next == NULL);
7690Sstevel@tonic-gate 		ASSERT(mp1->b_prev == NULL);
7700Sstevel@tonic-gate 		mp1 = mp1->b_cont;
7710Sstevel@tonic-gate 	}
7720Sstevel@tonic-gate #endif
7730Sstevel@tonic-gate 	return (begin);
7740Sstevel@tonic-gate }
7750Sstevel@tonic-gate /*
7760Sstevel@tonic-gate  * Fragment list for ordered messages.
7770Sstevel@tonic-gate  * If no error occures, error is set to 0. If we run out of memory, error
7780Sstevel@tonic-gate  * is set to 1. If the peer commits a fatal error (like using different
7790Sstevel@tonic-gate  * sequence numbers for the same data fragment series), the association is
7800Sstevel@tonic-gate  * aborted and error is set to 2.
7810Sstevel@tonic-gate  */
7820Sstevel@tonic-gate static mblk_t *
7830Sstevel@tonic-gate sctp_data_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc, int *error,
7840Sstevel@tonic-gate     sctp_instr_t *sip, int trypartial, int *tpfinished)
7850Sstevel@tonic-gate {
7860Sstevel@tonic-gate 	mblk_t		*hmp;
7870Sstevel@tonic-gate 	mblk_t		*pmp;
7880Sstevel@tonic-gate 	mblk_t		*qmp;
7890Sstevel@tonic-gate 	mblk_t		*mp;
7900Sstevel@tonic-gate 	mblk_t		*prev;
7910Sstevel@tonic-gate 	mblk_t		*prevprev;
7920Sstevel@tonic-gate 	mblk_t		*first_mp;
7930Sstevel@tonic-gate 	sctp_reass_t	*srp;
7940Sstevel@tonic-gate 	sctp_data_hdr_t	*qdc;
7950Sstevel@tonic-gate 	sctp_data_hdr_t	*bdc;
7960Sstevel@tonic-gate 	sctp_data_hdr_t	*edc;
7970Sstevel@tonic-gate 	uint32_t	tsn;
7980Sstevel@tonic-gate 
7990Sstevel@tonic-gate 	/*
8000Sstevel@tonic-gate 	 * We can overwrite the Link Layer + IP header here, I suppose.
8010Sstevel@tonic-gate 	 * The M_CTL does not leave this function. We need to check
8020Sstevel@tonic-gate 	 * DB_REF(dmp) before using DB_BASE(dmp), since there could be
8030Sstevel@tonic-gate 	 * two fragments for different ssns in the same mblk.
8040Sstevel@tonic-gate 	 */
8050Sstevel@tonic-gate #define	SCTP_NEW_REASS(nmp, dmp, srp, seterror)				\
8060Sstevel@tonic-gate 	if ((DB_REF(dmp) == 2) && (MBLKHEAD(dmp) >= 			\
8070Sstevel@tonic-gate 	    (sizeof (*(srp)) + sizeof (sctp_hdr_t)))) {			\
8080Sstevel@tonic-gate 		(nmp) = (dmp);						\
8090Sstevel@tonic-gate 	} else {							\
8100Sstevel@tonic-gate 		(nmp) = allocb(sizeof (*(srp)), BPRI_MED); 		\
8110Sstevel@tonic-gate 		if ((nmp) == NULL) {					\
8120Sstevel@tonic-gate 			switch (seterror) {				\
8130Sstevel@tonic-gate 			case B_TRUE:					\
8140Sstevel@tonic-gate 				*error = 1;				\
8150Sstevel@tonic-gate 				break;					\
8160Sstevel@tonic-gate 			}						\
8170Sstevel@tonic-gate 			return (NULL);					\
8180Sstevel@tonic-gate 		}							\
8190Sstevel@tonic-gate 		DB_TYPE(nmp) = M_CTL;					\
8200Sstevel@tonic-gate 		(nmp)->b_cont = dmp;					\
8210Sstevel@tonic-gate 	}								\
8220Sstevel@tonic-gate 	(srp) = (sctp_reass_t *)DB_BASE(nmp);
8230Sstevel@tonic-gate 
8240Sstevel@tonic-gate 	*error = 0;
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate 	/* find the reassembly queue for this data chunk */
8270Sstevel@tonic-gate 	hmp = qmp = sip->istr_reass;
8280Sstevel@tonic-gate 	for (; hmp != NULL; hmp = hmp->b_next) {
8290Sstevel@tonic-gate 		srp = (sctp_reass_t *)DB_BASE(hmp);
8300Sstevel@tonic-gate 		if (ntohs((*dc)->sdh_ssn) == srp->ssn)
8310Sstevel@tonic-gate 			goto foundit;
8320Sstevel@tonic-gate 		else if (SSN_GT(srp->ssn, ntohs((*dc)->sdh_ssn)))
8330Sstevel@tonic-gate 			break;
8340Sstevel@tonic-gate 		qmp = hmp;
8350Sstevel@tonic-gate 	}
8360Sstevel@tonic-gate 
8370Sstevel@tonic-gate 	SCTP_NEW_REASS(pmp, dmp, srp, B_TRUE);
8380Sstevel@tonic-gate 	srp->ssn = ntohs((*dc)->sdh_ssn);
8390Sstevel@tonic-gate 	srp->needed = 0;
8400Sstevel@tonic-gate 	srp->got = 1;
8410Sstevel@tonic-gate 	srp->tail = dmp;
8420Sstevel@tonic-gate 	srp->partial_delivered = B_FALSE;
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate 	if (hmp != NULL) {
8450Sstevel@tonic-gate 		if (sip->istr_reass == hmp) {
8460Sstevel@tonic-gate 			sip->istr_reass = pmp;
8470Sstevel@tonic-gate 			pmp->b_next = hmp;
8480Sstevel@tonic-gate 			pmp->b_prev = NULL;
8490Sstevel@tonic-gate 			hmp->b_prev = pmp;
8500Sstevel@tonic-gate 		} else {
8510Sstevel@tonic-gate 			qmp->b_next = pmp;
8520Sstevel@tonic-gate 			pmp->b_prev = qmp;
8530Sstevel@tonic-gate 			pmp->b_next = hmp;
8540Sstevel@tonic-gate 			hmp->b_prev = pmp;
8550Sstevel@tonic-gate 		}
8560Sstevel@tonic-gate 	} else {
8570Sstevel@tonic-gate 		/* make a new reass head and stick it on the end */
8580Sstevel@tonic-gate 		if (sip->istr_reass == NULL) {
8590Sstevel@tonic-gate 			sip->istr_reass = pmp;
8600Sstevel@tonic-gate 			pmp->b_prev = NULL;
8610Sstevel@tonic-gate 		} else {
8620Sstevel@tonic-gate 			qmp->b_next = pmp;
8630Sstevel@tonic-gate 			pmp->b_prev = qmp;
8640Sstevel@tonic-gate 		}
8650Sstevel@tonic-gate 		pmp->b_next = NULL;
8660Sstevel@tonic-gate 	}
8670Sstevel@tonic-gate 	return (NULL);
8680Sstevel@tonic-gate foundit:
8690Sstevel@tonic-gate 	/*
8700Sstevel@tonic-gate 	 * else already have a reassembly queue. Insert the new data chunk
8710Sstevel@tonic-gate 	 * in the reassemble queue. Try the tail first, on the assumption
8720Sstevel@tonic-gate 	 * that the fragments are coming in in order.
8730Sstevel@tonic-gate 	 */
8740Sstevel@tonic-gate 
8750Sstevel@tonic-gate 	qmp = srp->tail;
8760Sstevel@tonic-gate 	qdc = (sctp_data_hdr_t *)qmp->b_rptr;
8770Sstevel@tonic-gate 	ASSERT(qmp->b_cont == NULL);
8780Sstevel@tonic-gate 
8790Sstevel@tonic-gate 	/* XXXIs it fine to do this just here? */
8800Sstevel@tonic-gate 	if ((*dc)->sdh_sid != qdc->sdh_sid) {
8810Sstevel@tonic-gate 		/* our peer is fatally confused; XXX abort the assc */
8820Sstevel@tonic-gate 		*error = 2;
8830Sstevel@tonic-gate 		return (NULL);
8840Sstevel@tonic-gate 	}
8850Sstevel@tonic-gate 	if (SEQ_GT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
8860Sstevel@tonic-gate 		qmp->b_cont = dmp;
8870Sstevel@tonic-gate 		srp->tail = dmp;
8880Sstevel@tonic-gate 		dmp->b_cont = NULL;
8890Sstevel@tonic-gate 		goto inserted;
8900Sstevel@tonic-gate 	}
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate 	/* Next check for insertion at the beginning */
8930Sstevel@tonic-gate 	qmp = (DB_TYPE(hmp) == M_DATA) ? hmp : hmp->b_cont;
8940Sstevel@tonic-gate 	qdc = (sctp_data_hdr_t *)qmp->b_rptr;
8950Sstevel@tonic-gate 	if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
8960Sstevel@tonic-gate 		if (DB_TYPE(hmp) == M_DATA) {
8970Sstevel@tonic-gate 			sctp_reass_t	*srp1 = srp;
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 			SCTP_NEW_REASS(pmp, dmp, srp, B_TRUE);
9000Sstevel@tonic-gate 			ASSERT(pmp->b_prev == NULL && pmp->b_next == NULL);
9010Sstevel@tonic-gate 			if (sip->istr_reass == hmp) {
9020Sstevel@tonic-gate 				sip->istr_reass = pmp;
9030Sstevel@tonic-gate 				if (hmp->b_next != NULL) {
9040Sstevel@tonic-gate 					hmp->b_next->b_prev = pmp;
9050Sstevel@tonic-gate 					pmp->b_next = hmp->b_next;
9060Sstevel@tonic-gate 				}
9070Sstevel@tonic-gate 			} else {
9080Sstevel@tonic-gate 				hmp->b_prev->b_next = pmp;
9090Sstevel@tonic-gate 				pmp->b_prev = hmp->b_prev;
9100Sstevel@tonic-gate 				if (hmp->b_next != NULL) {
9110Sstevel@tonic-gate 					hmp->b_next->b_prev = pmp;
9120Sstevel@tonic-gate 					pmp->b_next = hmp->b_next;
9130Sstevel@tonic-gate 				}
9140Sstevel@tonic-gate 			}
9150Sstevel@tonic-gate 			srp->ssn = srp1->ssn;
9160Sstevel@tonic-gate 			srp->needed = srp1->needed;
9170Sstevel@tonic-gate 			srp->got = srp1->got;
9180Sstevel@tonic-gate 			srp->tail = srp1->tail;
9190Sstevel@tonic-gate 			srp->partial_delivered = srp1->partial_delivered;
9200Sstevel@tonic-gate 			hmp->b_next = hmp->b_prev = NULL;
9210Sstevel@tonic-gate 			dmp->b_cont = hmp;
9220Sstevel@tonic-gate 			hmp = pmp;
9230Sstevel@tonic-gate 		} else {
9240Sstevel@tonic-gate 			ASSERT(DB_TYPE(hmp) == M_CTL);
9250Sstevel@tonic-gate 			dmp->b_cont = qmp;
9260Sstevel@tonic-gate 			hmp->b_cont = dmp;
9270Sstevel@tonic-gate 		}
9280Sstevel@tonic-gate 		goto inserted;
9290Sstevel@tonic-gate 	}
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	/* Insert somewhere in the middle */
9320Sstevel@tonic-gate 	for (;;) {
9330Sstevel@tonic-gate 		/* Tail check above should have caught this */
9340Sstevel@tonic-gate 		ASSERT(qmp->b_cont != NULL);
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate 		qdc = (sctp_data_hdr_t *)qmp->b_cont->b_rptr;
9370Sstevel@tonic-gate 		if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
9380Sstevel@tonic-gate 			/* insert here */
9390Sstevel@tonic-gate 			dmp->b_cont = qmp->b_cont;
9400Sstevel@tonic-gate 			qmp->b_cont = dmp;
9410Sstevel@tonic-gate 			break;
9420Sstevel@tonic-gate 		}
9430Sstevel@tonic-gate 		qmp = qmp->b_cont;
9440Sstevel@tonic-gate 	}
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate inserted:
9470Sstevel@tonic-gate 	(srp->got)++;
9480Sstevel@tonic-gate 	first_mp = (DB_TYPE(hmp) == M_DATA) ? hmp : hmp->b_cont;
9490Sstevel@tonic-gate 	if (srp->needed == 0) {
9500Sstevel@tonic-gate 		/* check if we have the first and last fragments */
9510Sstevel@tonic-gate 		bdc = (sctp_data_hdr_t *)first_mp->b_rptr;
9520Sstevel@tonic-gate 		edc = (sctp_data_hdr_t *)srp->tail->b_rptr;
9530Sstevel@tonic-gate 
9540Sstevel@tonic-gate 		/* calculate how many fragments are needed, if possible  */
9550Sstevel@tonic-gate 		if (SCTP_DATA_GET_BBIT(bdc) && SCTP_DATA_GET_EBIT(edc))
9560Sstevel@tonic-gate 			srp->needed = ntohl(edc->sdh_tsn) -
9570Sstevel@tonic-gate 			    ntohl(bdc->sdh_tsn) + 1;
9580Sstevel@tonic-gate 	}
9590Sstevel@tonic-gate 
9600Sstevel@tonic-gate 	if (srp->needed != srp->got) {
9610Sstevel@tonic-gate 		if (!trypartial)
9620Sstevel@tonic-gate 			return (NULL);
9630Sstevel@tonic-gate 		/*
9640Sstevel@tonic-gate 		 * Try partial delivery. We need a consecutive run of
9650Sstevel@tonic-gate 		 * at least two chunks, starting from the first chunk
9660Sstevel@tonic-gate 		 * (which may have been the last + 1 chunk from a
9670Sstevel@tonic-gate 		 * previous partial delivery).
9680Sstevel@tonic-gate 		 */
9690Sstevel@tonic-gate 		dprint(4, ("trypartial: got=%d, needed=%d\n",
9700Sstevel@tonic-gate 		    (int)(srp->got), (int)(srp->needed)));
9710Sstevel@tonic-gate 		mp = first_mp;
9720Sstevel@tonic-gate 		if (mp->b_cont == NULL) {
9730Sstevel@tonic-gate 			/* need at least two chunks */
9740Sstevel@tonic-gate 			dprint(4, ("trypartial: only 1 chunk\n"));
9750Sstevel@tonic-gate 			return (NULL);
9760Sstevel@tonic-gate 		}
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate 		qdc = (sctp_data_hdr_t *)mp->b_rptr;
9790Sstevel@tonic-gate 		if (!SCTP_DATA_GET_BBIT(qdc)) {
9800Sstevel@tonic-gate 			/* don't have first chunk; can't do it. */
9810Sstevel@tonic-gate 			dprint(4, ("trypartial: no beginning\n"));
9820Sstevel@tonic-gate 			return (NULL);
9830Sstevel@tonic-gate 		}
9840Sstevel@tonic-gate 
9850Sstevel@tonic-gate 		tsn = ntohl(qdc->sdh_tsn) + 1;
9860Sstevel@tonic-gate 
9870Sstevel@tonic-gate 		/*
9880Sstevel@tonic-gate 		 * This loop has two exit conditions: the
9890Sstevel@tonic-gate 		 * end of received chunks has been reached, or
9900Sstevel@tonic-gate 		 * there is a break in the sequence. We want
9910Sstevel@tonic-gate 		 * to chop the reassembly list as follows (the
9920Sstevel@tonic-gate 		 * numbers are TSNs):
9930Sstevel@tonic-gate 		 *   10 -> 11 -> | 12	(end of chunks)
9940Sstevel@tonic-gate 		 *   10 -> 11 -> | 12 -> 14 (break in sequence)
9950Sstevel@tonic-gate 		 */
9960Sstevel@tonic-gate 		prevprev = prev = mp;
9970Sstevel@tonic-gate 		mp = mp->b_cont;
9980Sstevel@tonic-gate 		while (mp != NULL) {
9990Sstevel@tonic-gate 			qdc = (sctp_data_hdr_t *)mp->b_rptr;
10000Sstevel@tonic-gate 			if (ntohl(qdc->sdh_tsn) != tsn) {
10010Sstevel@tonic-gate 				/*
10020Sstevel@tonic-gate 				 * break in sequence.
10030Sstevel@tonic-gate 				 * 1st and 2nd chunks are not sequntial.
10040Sstevel@tonic-gate 				 */
10050Sstevel@tonic-gate 				if (mp == first_mp->b_cont)
10060Sstevel@tonic-gate 					return (NULL);
10070Sstevel@tonic-gate 				/* Back up mp and prev */
10080Sstevel@tonic-gate 				mp = prev;
10090Sstevel@tonic-gate 				prev = prevprev;
10100Sstevel@tonic-gate 				break;
10110Sstevel@tonic-gate 			}
10120Sstevel@tonic-gate 
10130Sstevel@tonic-gate 			/* end of sequence */
10140Sstevel@tonic-gate 			if (mp->b_cont == NULL)
10150Sstevel@tonic-gate 				break;
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate 			prevprev = prev;
10180Sstevel@tonic-gate 			prev = mp;
10190Sstevel@tonic-gate 			mp = mp->b_cont;
10200Sstevel@tonic-gate 			tsn++;
10210Sstevel@tonic-gate 		}
10220Sstevel@tonic-gate 		if (DB_TYPE(hmp) == M_DATA) {
10230Sstevel@tonic-gate 			sctp_reass_t	*srp1 = srp;
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 			SCTP_NEW_REASS(pmp, mp, srp, B_FALSE);
10260Sstevel@tonic-gate 			ASSERT(pmp->b_prev == NULL && pmp->b_next == NULL);
10270Sstevel@tonic-gate 			if (sip->istr_reass == hmp) {
10280Sstevel@tonic-gate 				sip->istr_reass = pmp;
10290Sstevel@tonic-gate 				if (hmp->b_next != NULL) {
10300Sstevel@tonic-gate 					hmp->b_next->b_prev = pmp;
10310Sstevel@tonic-gate 					pmp->b_next = hmp->b_next;
10320Sstevel@tonic-gate 				}
10330Sstevel@tonic-gate 			} else {
10340Sstevel@tonic-gate 				hmp->b_prev->b_next = pmp;
10350Sstevel@tonic-gate 				pmp->b_prev = hmp->b_prev;
10360Sstevel@tonic-gate 				if (hmp->b_next != NULL) {
10370Sstevel@tonic-gate 					hmp->b_next->b_prev = pmp;
10380Sstevel@tonic-gate 					pmp->b_next = hmp->b_next;
10390Sstevel@tonic-gate 				}
10400Sstevel@tonic-gate 			}
10410Sstevel@tonic-gate 			srp->ssn = srp1->ssn;
10420Sstevel@tonic-gate 			srp->needed = srp1->needed;
10430Sstevel@tonic-gate 			srp->got = srp1->got;
10440Sstevel@tonic-gate 			srp->tail = srp1->tail;
10450Sstevel@tonic-gate 			hmp->b_next = hmp->b_prev = NULL;
10460Sstevel@tonic-gate 			dmp = hmp;
10470Sstevel@tonic-gate 			hmp = pmp;
10480Sstevel@tonic-gate 		} else {
10490Sstevel@tonic-gate 			ASSERT(DB_TYPE(hmp) == M_CTL);
10500Sstevel@tonic-gate 			dmp = hmp->b_cont;
10510Sstevel@tonic-gate 			hmp->b_cont = mp;
10520Sstevel@tonic-gate 		}
10530Sstevel@tonic-gate 		/*
10540Sstevel@tonic-gate 		 * mp now points at the last chunk in the sequence,
10550Sstevel@tonic-gate 		 * and prev points to mp's previous in the list.
10560Sstevel@tonic-gate 		 * We chop the list at prev, and convert mp into the
10570Sstevel@tonic-gate 		 * new list head by setting the B bit. Subsequence
10580Sstevel@tonic-gate 		 * fragment deliveries will follow the normal reassembly
10590Sstevel@tonic-gate 		 * path.
10600Sstevel@tonic-gate 		 */
10610Sstevel@tonic-gate 		prev->b_cont = NULL;
10620Sstevel@tonic-gate 		bdc = (sctp_data_hdr_t *)mp->b_rptr;
10630Sstevel@tonic-gate 		SCTP_DATA_SET_BBIT(bdc);
10640Sstevel@tonic-gate 		*tpfinished = 0;
10650Sstevel@tonic-gate 		srp->partial_delivered = B_TRUE;
10660Sstevel@tonic-gate 
10670Sstevel@tonic-gate 		dprint(4, ("trypartial: got some, got=%d, needed=%d\n",
10680Sstevel@tonic-gate 		    (int)(srp->got), (int)(srp->needed)));
10690Sstevel@tonic-gate 		goto fixup;
10700Sstevel@tonic-gate 	}
10710Sstevel@tonic-gate 
10720Sstevel@tonic-gate 	/*
10730Sstevel@tonic-gate 	 * else reassembly done; prepare the data for delivery.
10740Sstevel@tonic-gate 	 * First unlink hmp from the ssn list.
10750Sstevel@tonic-gate 	 */
10760Sstevel@tonic-gate 	if (sip->istr_reass == hmp) {
10770Sstevel@tonic-gate 		sip->istr_reass = hmp->b_next;
10780Sstevel@tonic-gate 		if (hmp->b_next) {
10790Sstevel@tonic-gate 			hmp->b_next->b_prev = NULL;
10800Sstevel@tonic-gate 		}
10810Sstevel@tonic-gate 	} else {
10820Sstevel@tonic-gate 		ASSERT(hmp->b_prev != NULL);
10830Sstevel@tonic-gate 		hmp->b_prev->b_next = hmp->b_next;
10840Sstevel@tonic-gate 		if (hmp->b_next) {
10850Sstevel@tonic-gate 			hmp->b_next->b_prev = hmp->b_prev;
10860Sstevel@tonic-gate 		}
10870Sstevel@tonic-gate 	}
10880Sstevel@tonic-gate 
10890Sstevel@tonic-gate 	/*
10900Sstevel@tonic-gate 	 * Using b_prev and b_next was a little sinful, but OK since
10910Sstevel@tonic-gate 	 * this mblk is never put*'d. However, freeb() will still
10920Sstevel@tonic-gate 	 * ASSERT that they are unused, so we need to NULL them out now.
10930Sstevel@tonic-gate 	 */
10940Sstevel@tonic-gate 	hmp->b_next = NULL;
10950Sstevel@tonic-gate 	hmp->b_prev = NULL;
10960Sstevel@tonic-gate 	dmp = hmp;
10970Sstevel@tonic-gate 	if (DB_TYPE(hmp) == M_CTL) {
10980Sstevel@tonic-gate 		dmp = dmp->b_cont;
10990Sstevel@tonic-gate 		hmp->b_cont = NULL;
11000Sstevel@tonic-gate 		freeb(hmp);
11010Sstevel@tonic-gate 	}
11020Sstevel@tonic-gate 	*tpfinished = 1;
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate fixup:
11050Sstevel@tonic-gate 	/*
11060Sstevel@tonic-gate 	 * Adjust all mblk's except the lead so their rptr's point to the
11070Sstevel@tonic-gate 	 * payload. sctp_data_chunk() will need to process the lead's
11080Sstevel@tonic-gate 	 * data chunk section, so leave it's rptr pointing at the data chunk.
11090Sstevel@tonic-gate 	 */
11100Sstevel@tonic-gate 	*dc = (sctp_data_hdr_t *)dmp->b_rptr;
11110Sstevel@tonic-gate 	if (trypartial && !(*tpfinished)) {
11120Sstevel@tonic-gate 		(srp->got)--;
11130Sstevel@tonic-gate 		ASSERT(srp->got != 0);
11140Sstevel@tonic-gate 		if (srp->needed != 0) {
11150Sstevel@tonic-gate 			(srp->needed)--;
11160Sstevel@tonic-gate 			ASSERT(srp->needed != 0);
11170Sstevel@tonic-gate 		}
11180Sstevel@tonic-gate 	}
11190Sstevel@tonic-gate 	for (qmp = dmp->b_cont; qmp; qmp = qmp->b_cont) {
11200Sstevel@tonic-gate 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
11210Sstevel@tonic-gate 		qmp->b_rptr = (uchar_t *)(qdc + 1);
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate 		/*
11240Sstevel@tonic-gate 		 * If in partial delivery, deduct the balance from got
11250Sstevel@tonic-gate 		 * and needed here, now that we know we are actually
11260Sstevel@tonic-gate 		 * delivering these data.
11270Sstevel@tonic-gate 		 */
11280Sstevel@tonic-gate 		if (trypartial && !(*tpfinished)) {
11290Sstevel@tonic-gate 			(srp->got)--;
11300Sstevel@tonic-gate 			ASSERT(srp->got != 0);
11310Sstevel@tonic-gate 			if (srp->needed != 0) {
11320Sstevel@tonic-gate 				(srp->needed)--;
11330Sstevel@tonic-gate 				ASSERT(srp->needed != 0);
11340Sstevel@tonic-gate 			}
11350Sstevel@tonic-gate 		}
11360Sstevel@tonic-gate 	}
11370Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_reassmsgs);
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate 	return (dmp);
11400Sstevel@tonic-gate }
11410Sstevel@tonic-gate 
11420Sstevel@tonic-gate static void
11430Sstevel@tonic-gate sctp_add_dup(uint32_t tsn, mblk_t **dups)
11440Sstevel@tonic-gate {
11450Sstevel@tonic-gate 	mblk_t *mp;
11460Sstevel@tonic-gate 	size_t bsize = SCTP_DUP_MBLK_SZ * sizeof (tsn);
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 	if (dups == NULL) {
11490Sstevel@tonic-gate 		return;
11500Sstevel@tonic-gate 	}
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate 	/* first time? */
11530Sstevel@tonic-gate 	if (*dups == NULL) {
11540Sstevel@tonic-gate 		*dups = allocb(bsize, BPRI_MED);
11550Sstevel@tonic-gate 		if (*dups == NULL) {
11560Sstevel@tonic-gate 			return;
11570Sstevel@tonic-gate 		}
11580Sstevel@tonic-gate 	}
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 	mp = *dups;
11610Sstevel@tonic-gate 	if ((mp->b_wptr - mp->b_rptr) >= bsize) {
11620Sstevel@tonic-gate 		/* maximum reached */
11630Sstevel@tonic-gate 		return;
11640Sstevel@tonic-gate 	}
11650Sstevel@tonic-gate 
11660Sstevel@tonic-gate 	/* add the duplicate tsn */
11670Sstevel@tonic-gate 	bcopy(&tsn, mp->b_wptr, sizeof (tsn));
11680Sstevel@tonic-gate 	mp->b_wptr += sizeof (tsn);
11690Sstevel@tonic-gate 	ASSERT((mp->b_wptr - mp->b_rptr) <= bsize);
11700Sstevel@tonic-gate }
11710Sstevel@tonic-gate 
11720Sstevel@tonic-gate static void
11730Sstevel@tonic-gate sctp_data_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *mp, mblk_t **dups,
11740Sstevel@tonic-gate     sctp_faddr_t *fp, ip6_pkt_t *ipp)
11750Sstevel@tonic-gate {
11760Sstevel@tonic-gate 	sctp_data_hdr_t *dc;
11770Sstevel@tonic-gate 	mblk_t *dmp, *pmp;
11780Sstevel@tonic-gate 	mblk_t *errmp;
11790Sstevel@tonic-gate 	sctp_instr_t *instr;
11800Sstevel@tonic-gate 	int ubit;
11810Sstevel@tonic-gate 	int isfrag;
11820Sstevel@tonic-gate 	uint16_t ssn;
11830Sstevel@tonic-gate 	uint32_t oftsn;
11840Sstevel@tonic-gate 	boolean_t can_deliver = B_TRUE;
11850Sstevel@tonic-gate 	uint32_t tsn;
11860Sstevel@tonic-gate 	int dlen;
11870Sstevel@tonic-gate 	int trypartial = 0;
11880Sstevel@tonic-gate 	int tpfinished = 1;
11890Sstevel@tonic-gate 	int32_t new_rwnd;
11900Sstevel@tonic-gate 
11910Sstevel@tonic-gate 	/* The following are used multiple times, so we inline them */
11920Sstevel@tonic-gate #define	SCTP_ACK_IT(sctp, tsn)						\
11930Sstevel@tonic-gate 	if (tsn == sctp->sctp_ftsn) {					\
11940Sstevel@tonic-gate 		dprint(2, ("data_chunk: acking next %x\n", tsn));	\
1195*1932Svi117747 		(sctp)->sctp_ftsn++;					\
1196*1932Svi117747 		if ((sctp)->sctp_sack_gaps > 0)				\
1197*1932Svi117747 			(sctp)->sctp_force_sack = 1;			\
11980Sstevel@tonic-gate 	} else if (SEQ_GT(tsn, sctp->sctp_ftsn)) {			\
11990Sstevel@tonic-gate 		/* Got a gap; record it */				\
12000Sstevel@tonic-gate 		dprint(2, ("data_chunk: acking gap %x\n", tsn));	\
1201*1932Svi117747 		sctp_ack_add(&sctp->sctp_sack_info, tsn,		\
1202*1932Svi117747 		    &sctp->sctp_sack_gaps);				\
12030Sstevel@tonic-gate 		sctp->sctp_force_sack = 1;				\
12040Sstevel@tonic-gate 	}
12050Sstevel@tonic-gate 
12060Sstevel@tonic-gate 	errmp = NULL;
12070Sstevel@tonic-gate 	dmp = NULL;
12080Sstevel@tonic-gate 
12090Sstevel@tonic-gate 	dc = (sctp_data_hdr_t *)ch;
12100Sstevel@tonic-gate 	tsn = ntohl(dc->sdh_tsn);
12110Sstevel@tonic-gate 
12121676Sjpk 	dprint(3, ("sctp_data_chunk: mp=%p tsn=%x\n", (void *)mp, tsn));
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate 	/* Check for duplicates */
12150Sstevel@tonic-gate 	if (SEQ_LT(tsn, sctp->sctp_ftsn)) {
12160Sstevel@tonic-gate 		dprint(4, ("sctp_data_chunk: dropping duplicate\n"));
12170Sstevel@tonic-gate 		sctp->sctp_force_sack = 1;
12180Sstevel@tonic-gate 		sctp_add_dup(dc->sdh_tsn, dups);
12190Sstevel@tonic-gate 		return;
12200Sstevel@tonic-gate 	}
12210Sstevel@tonic-gate 
12220Sstevel@tonic-gate 	if (sctp->sctp_sack_info != NULL) {
12230Sstevel@tonic-gate 		sctp_set_t *sp;
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 		for (sp = sctp->sctp_sack_info; sp; sp = sp->next) {
12260Sstevel@tonic-gate 			if (SEQ_GEQ(tsn, sp->begin) && SEQ_LEQ(tsn, sp->end)) {
12270Sstevel@tonic-gate 				dprint(4,
12280Sstevel@tonic-gate 				("sctp_data_chunk: dropping dup > cumtsn\n"));
12290Sstevel@tonic-gate 				sctp->sctp_force_sack = 1;
12300Sstevel@tonic-gate 				sctp_add_dup(dc->sdh_tsn, dups);
12310Sstevel@tonic-gate 				return;
12320Sstevel@tonic-gate 			}
12330Sstevel@tonic-gate 		}
12340Sstevel@tonic-gate 	}
12350Sstevel@tonic-gate 
12360Sstevel@tonic-gate 	/* We cannot deliver anything up now but we still need to handle it. */
12370Sstevel@tonic-gate 	if (SCTP_IS_DETACHED(sctp)) {
12380Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpInClosed);
12390Sstevel@tonic-gate 		can_deliver = B_FALSE;
12400Sstevel@tonic-gate 	}
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate 	dlen = ntohs(dc->sdh_len) - sizeof (*dc);
12430Sstevel@tonic-gate 
12440Sstevel@tonic-gate 	/* Check for buffer space */
12450Sstevel@tonic-gate 	if (sctp->sctp_rwnd - sctp->sctp_rxqueued < dlen) {
12460Sstevel@tonic-gate 		/* Drop and SACK, but don't advance the cumulative TSN. */
12470Sstevel@tonic-gate 		sctp->sctp_force_sack = 1;
12480Sstevel@tonic-gate 		dprint(0, ("sctp_data_chunk: exceed rwnd %d rxqueued %d "
12490Sstevel@tonic-gate 			"ssn %d tsn %x\n", sctp->sctp_rwnd,
12500Sstevel@tonic-gate 			sctp->sctp_rxqueued, dc->sdh_ssn, ntohl(dc->sdh_tsn)));
12510Sstevel@tonic-gate 		return;
12520Sstevel@tonic-gate 	}
12530Sstevel@tonic-gate 
12540Sstevel@tonic-gate 	if (ntohs(dc->sdh_sid) >= sctp->sctp_num_istr) {
12550Sstevel@tonic-gate 		uint16_t	inval_parm[2];
12560Sstevel@tonic-gate 
12570Sstevel@tonic-gate 		inval_parm[0] = dc->sdh_sid;
12580Sstevel@tonic-gate 		/* RESERVED to be ignored at the receiving end */
12590Sstevel@tonic-gate 		inval_parm[1] = 0;
12600Sstevel@tonic-gate 		/* ack and drop it */
12610Sstevel@tonic-gate 		errmp = sctp_make_err(sctp, SCTP_ERR_BAD_SID,
12620Sstevel@tonic-gate 		    (char *)inval_parm, sizeof (inval_parm));
12630Sstevel@tonic-gate 		SCTP_ACK_IT(sctp, tsn);
12640Sstevel@tonic-gate 		if (errmp != NULL)
12650Sstevel@tonic-gate 			sctp_send_err(sctp, errmp, NULL);
12660Sstevel@tonic-gate 		return;
12670Sstevel@tonic-gate 	}
12680Sstevel@tonic-gate 
12690Sstevel@tonic-gate 	ubit = SCTP_DATA_GET_UBIT(dc);
12700Sstevel@tonic-gate 	ASSERT(sctp->sctp_instr != NULL);
12710Sstevel@tonic-gate 	instr = &sctp->sctp_instr[ntohs(dc->sdh_sid)];
12720Sstevel@tonic-gate 	/* Initialize the stream, if not yet used */
12730Sstevel@tonic-gate 	if (instr->sctp == NULL)
12740Sstevel@tonic-gate 		instr->sctp = sctp;
12750Sstevel@tonic-gate 	/*
12760Sstevel@tonic-gate 	 * If we are getting low on buffers set trypartial to try
12770Sstevel@tonic-gate 	 * a partial delivery if we are reassembling a fragmented
12780Sstevel@tonic-gate 	 * message. Only do this if we can immediately deliver the
12790Sstevel@tonic-gate 	 * partially assembled message, and only partially deliver
12800Sstevel@tonic-gate 	 * one message at a time (i.e. messages cannot be intermixed
12810Sstevel@tonic-gate 	 * arriving at the upper layer). A simple way to enforce
12820Sstevel@tonic-gate 	 * this is to only try partial delivery if this TSN is
12830Sstevel@tonic-gate 	 * the next expected TSN. Partial Delivery not supported
12840Sstevel@tonic-gate 	 * for un-ordered message.
12850Sstevel@tonic-gate 	 */
12860Sstevel@tonic-gate 	isfrag = !(SCTP_DATA_GET_BBIT(dc) && SCTP_DATA_GET_EBIT(dc));
12870Sstevel@tonic-gate 	ssn = ntohs(dc->sdh_ssn);
12880Sstevel@tonic-gate 	if ((sctp->sctp_rwnd - sctp->sctp_rxqueued < SCTP_RECV_LOWATER) &&
12890Sstevel@tonic-gate 	    !ubit && isfrag && (tsn == sctp->sctp_ftsn)) {
12900Sstevel@tonic-gate 		trypartial = 1;
12910Sstevel@tonic-gate 	}
12920Sstevel@tonic-gate 
12930Sstevel@tonic-gate 	dmp = dupb(mp);
12940Sstevel@tonic-gate 	if (dmp == NULL) {
12950Sstevel@tonic-gate 		/* drop it and don't ack it, causing the peer to retransmit */
12960Sstevel@tonic-gate 		return;
12970Sstevel@tonic-gate 	}
12980Sstevel@tonic-gate 	dmp->b_wptr = (uchar_t *)ch + ntohs(ch->sch_len);
12990Sstevel@tonic-gate 
13000Sstevel@tonic-gate 	sctp->sctp_rxqueued += dlen;
13010Sstevel@tonic-gate 
13020Sstevel@tonic-gate 	oftsn = sctp->sctp_ftsn;
13030Sstevel@tonic-gate 
13040Sstevel@tonic-gate 	if (isfrag) {
13050Sstevel@tonic-gate 		int error = 0;
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate 		/* fragmented data chunk */
13080Sstevel@tonic-gate 		dmp->b_rptr = (uchar_t *)dc;
13090Sstevel@tonic-gate 		if (ubit) {
13100Sstevel@tonic-gate 			dmp = sctp_uodata_frag(sctp, dmp, &dc);
13110Sstevel@tonic-gate #if	DEBUG
13120Sstevel@tonic-gate 			if (dmp != NULL) {
13130Sstevel@tonic-gate 				ASSERT(instr ==
13140Sstevel@tonic-gate 				    &sctp->sctp_instr[ntohs(dc->sdh_sid)]);
13150Sstevel@tonic-gate 			}
13160Sstevel@tonic-gate #endif
13170Sstevel@tonic-gate 		} else {
13180Sstevel@tonic-gate 			dmp = sctp_data_frag(sctp, dmp, &dc, &error, instr,
13190Sstevel@tonic-gate 			    trypartial, &tpfinished);
13200Sstevel@tonic-gate 		}
13210Sstevel@tonic-gate 		if (error != 0) {
13220Sstevel@tonic-gate 			sctp->sctp_rxqueued -= dlen;
13230Sstevel@tonic-gate 			if (error == 1) {
13240Sstevel@tonic-gate 				/*
13250Sstevel@tonic-gate 				 * out of memory; don't ack it so
13260Sstevel@tonic-gate 				 * the peer retransmits
13270Sstevel@tonic-gate 				 */
13280Sstevel@tonic-gate 				return;
13290Sstevel@tonic-gate 			} else if (error == 2) {
13300Sstevel@tonic-gate 				/*
13310Sstevel@tonic-gate 				 * fatal error (i.e. peer used different
13320Sstevel@tonic-gate 				 * ssn's for same fragmented data) --
13330Sstevel@tonic-gate 				 * the association has been aborted.
13340Sstevel@tonic-gate 				 * XXX need to return errval so state
13350Sstevel@tonic-gate 				 * machine can also abort processing.
13360Sstevel@tonic-gate 				 */
13370Sstevel@tonic-gate 				dprint(0, ("error 2: must not happen!\n"));
13380Sstevel@tonic-gate 				return;
13390Sstevel@tonic-gate 			}
13400Sstevel@tonic-gate 		}
13410Sstevel@tonic-gate 
13420Sstevel@tonic-gate 		if (dmp == NULL) {
13430Sstevel@tonic-gate 			/*
13440Sstevel@tonic-gate 			 * Can't process this data now, but the cumulative
13450Sstevel@tonic-gate 			 * TSN may be advanced, so do the checks at done.
13460Sstevel@tonic-gate 			 */
13470Sstevel@tonic-gate 			SCTP_ACK_IT(sctp, tsn);
13480Sstevel@tonic-gate 			goto done;
13490Sstevel@tonic-gate 		}
13500Sstevel@tonic-gate 	}
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate 	if (!ubit && !trypartial && ssn != instr->nextseq) {
13530Sstevel@tonic-gate 		/* Adjust rptr to point at the data chunk for compares */
13540Sstevel@tonic-gate 		dmp->b_rptr = (uchar_t *)dc;
13550Sstevel@tonic-gate 
13560Sstevel@tonic-gate 		dprint(2,
13570Sstevel@tonic-gate 		    ("data_chunk: inserted %x in pq (ssn %d expected %d)\n",
13580Sstevel@tonic-gate 		    ntohl(dc->sdh_tsn), (int)(ssn), (int)(instr->nextseq)));
13590Sstevel@tonic-gate 
13600Sstevel@tonic-gate 		if (instr->istr_msgs == NULL) {
13610Sstevel@tonic-gate 			instr->istr_msgs = dmp;
13620Sstevel@tonic-gate 			ASSERT(dmp->b_prev == NULL && dmp->b_next == NULL);
13630Sstevel@tonic-gate 		} else {
13640Sstevel@tonic-gate 			mblk_t			*imblk = instr->istr_msgs;
13650Sstevel@tonic-gate 			sctp_data_hdr_t		*idc;
13660Sstevel@tonic-gate 
13670Sstevel@tonic-gate 			/*
13680Sstevel@tonic-gate 			 * XXXNeed to take sequence wraps into account,
13690Sstevel@tonic-gate 			 * ... and a more efficient insertion algo.
13700Sstevel@tonic-gate 			 */
13710Sstevel@tonic-gate 			for (;;) {
13720Sstevel@tonic-gate 				idc = (sctp_data_hdr_t *)imblk->b_rptr;
13730Sstevel@tonic-gate 				if (SSN_GT(ntohs(idc->sdh_ssn),
13740Sstevel@tonic-gate 					ntohs(dc->sdh_ssn))) {
13750Sstevel@tonic-gate 					if (instr->istr_msgs == imblk) {
13760Sstevel@tonic-gate 						instr->istr_msgs = dmp;
13770Sstevel@tonic-gate 						dmp->b_next = imblk;
13780Sstevel@tonic-gate 						imblk->b_prev = dmp;
13790Sstevel@tonic-gate 					} else {
13800Sstevel@tonic-gate 						ASSERT(imblk->b_prev != NULL);
13810Sstevel@tonic-gate 						imblk->b_prev->b_next = dmp;
13820Sstevel@tonic-gate 						dmp->b_prev = imblk->b_prev;
13830Sstevel@tonic-gate 						imblk->b_prev = dmp;
13840Sstevel@tonic-gate 						dmp->b_next = imblk;
13850Sstevel@tonic-gate 					}
13860Sstevel@tonic-gate 					break;
13870Sstevel@tonic-gate 				}
13880Sstevel@tonic-gate 				if (imblk->b_next == NULL) {
13890Sstevel@tonic-gate 					imblk->b_next = dmp;
13900Sstevel@tonic-gate 					dmp->b_prev = imblk;
13910Sstevel@tonic-gate 					break;
13920Sstevel@tonic-gate 				}
13930Sstevel@tonic-gate 				imblk = imblk->b_next;
13940Sstevel@tonic-gate 			}
13950Sstevel@tonic-gate 		}
13960Sstevel@tonic-gate 		(instr->istr_nmsgs)++;
13970Sstevel@tonic-gate 		(sctp->sctp_istr_nmsgs)++;
13980Sstevel@tonic-gate 		SCTP_ACK_IT(sctp, tsn);
13990Sstevel@tonic-gate 		return;
14000Sstevel@tonic-gate 	}
14010Sstevel@tonic-gate 
14020Sstevel@tonic-gate 	/*
14030Sstevel@tonic-gate 	 * Else we can deliver the data directly. Recalculate
14040Sstevel@tonic-gate 	 * dlen now since we may have reassembled data.
14050Sstevel@tonic-gate 	 */
14060Sstevel@tonic-gate 	dlen = dmp->b_wptr - (uchar_t *)dc - sizeof (*dc);
14070Sstevel@tonic-gate 	for (pmp = dmp->b_cont; pmp != NULL; pmp = pmp->b_cont)
14080Sstevel@tonic-gate 		dlen += pmp->b_wptr - pmp->b_rptr;
14090Sstevel@tonic-gate 	ASSERT(sctp->sctp_rxqueued >= dlen);
14100Sstevel@tonic-gate 	ASSERT(sctp->sctp_rwnd >= dlen);
14110Sstevel@tonic-gate 
14120Sstevel@tonic-gate 	/* Deliver the message. */
14130Sstevel@tonic-gate 	sctp->sctp_rxqueued -= dlen;
14140Sstevel@tonic-gate 
14150Sstevel@tonic-gate 	if (can_deliver) {
14160Sstevel@tonic-gate 		dmp->b_rptr = (uchar_t *)(dc + 1);
14170Sstevel@tonic-gate 		if (sctp_input_add_ancillary(sctp, &dmp, dc, fp, ipp) == 0) {
14180Sstevel@tonic-gate 			dprint(1, ("sctp_data_chunk: delivering %lu bytes\n",
14190Sstevel@tonic-gate 			    msgdsize(dmp)));
14200Sstevel@tonic-gate 			sctp->sctp_rwnd -= dlen;
14210Sstevel@tonic-gate 			new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, dmp,
14220Sstevel@tonic-gate 			    tpfinished ? 0 : SCTP_PARTIAL_DATA);
14230Sstevel@tonic-gate 			if (new_rwnd > sctp->sctp_rwnd) {
14240Sstevel@tonic-gate 				sctp->sctp_rwnd = new_rwnd;
14250Sstevel@tonic-gate 			}
14260Sstevel@tonic-gate 			SCTP_ACK_IT(sctp, tsn);
14270Sstevel@tonic-gate 		} else {
14280Sstevel@tonic-gate 			/* Just free the message if we don't have memory. */
14290Sstevel@tonic-gate 			freemsg(dmp);
14300Sstevel@tonic-gate 			return;
14310Sstevel@tonic-gate 		}
14320Sstevel@tonic-gate 	} else {
14330Sstevel@tonic-gate 		/* About to free the data */
14340Sstevel@tonic-gate 		freemsg(dmp);
14350Sstevel@tonic-gate 		SCTP_ACK_IT(sctp, tsn);
14360Sstevel@tonic-gate 	}
14370Sstevel@tonic-gate 
14380Sstevel@tonic-gate 	/*
14390Sstevel@tonic-gate 	 * data, now enqueued, may already have been processed and free'd
14400Sstevel@tonic-gate 	 * by the ULP (or we may have just freed it above, if we could not
14410Sstevel@tonic-gate 	 * deliver it), so we must not reference it (this is why we kept
14420Sstevel@tonic-gate 	 * the ssn and ubit above).
14430Sstevel@tonic-gate 	 */
14440Sstevel@tonic-gate 	if (ubit != 0) {
14450Sstevel@tonic-gate 		BUMP_LOCAL(sctp->sctp_iudchunks);
14460Sstevel@tonic-gate 		goto done;
14470Sstevel@tonic-gate 	}
14480Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_idchunks);
14490Sstevel@tonic-gate 
14500Sstevel@tonic-gate 	/*
14510Sstevel@tonic-gate 	 * If there was a partial delivery and it has not finished,
14520Sstevel@tonic-gate 	 * don't pull anything from the pqueues.
14530Sstevel@tonic-gate 	 */
14540Sstevel@tonic-gate 	if (!tpfinished) {
14550Sstevel@tonic-gate 		goto done;
14560Sstevel@tonic-gate 	}
14570Sstevel@tonic-gate 
14580Sstevel@tonic-gate 	instr->nextseq = ssn + 1;
14590Sstevel@tonic-gate 	/* Deliver any successive data chunks in the instr queue */
14600Sstevel@tonic-gate 	while (instr->istr_nmsgs > 0) {
14610Sstevel@tonic-gate 		dmp = (mblk_t *)instr->istr_msgs;
14620Sstevel@tonic-gate 		dc = (sctp_data_hdr_t *)dmp->b_rptr;
14630Sstevel@tonic-gate 		ssn = ntohs(dc->sdh_ssn);
14640Sstevel@tonic-gate 		/* Gap in the sequence */
14650Sstevel@tonic-gate 		if (ssn != instr->nextseq)
14660Sstevel@tonic-gate 			break;
14670Sstevel@tonic-gate 
14680Sstevel@tonic-gate 		/* Else deliver the data */
14690Sstevel@tonic-gate 		(instr->istr_nmsgs)--;
14700Sstevel@tonic-gate 		(instr->nextseq)++;
14710Sstevel@tonic-gate 		(sctp->sctp_istr_nmsgs)--;
14720Sstevel@tonic-gate 
14730Sstevel@tonic-gate 		instr->istr_msgs = instr->istr_msgs->b_next;
14740Sstevel@tonic-gate 		if (instr->istr_msgs != NULL)
14750Sstevel@tonic-gate 			instr->istr_msgs->b_prev = NULL;
14760Sstevel@tonic-gate 		dmp->b_next = dmp->b_prev = NULL;
14770Sstevel@tonic-gate 
14780Sstevel@tonic-gate 		dprint(2, ("data_chunk: pulling %x from pq (ssn %d)\n",
14790Sstevel@tonic-gate 		    ntohl(dc->sdh_tsn), (int)ssn));
14800Sstevel@tonic-gate 
14810Sstevel@tonic-gate 		/*
14820Sstevel@tonic-gate 		 * If this chunk was reassembled, each b_cont represents
14830Sstevel@tonic-gate 		 * another TSN; advance ftsn now.
14840Sstevel@tonic-gate 		 */
14850Sstevel@tonic-gate 		dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc);
14860Sstevel@tonic-gate 		for (pmp = dmp->b_cont; pmp; pmp = pmp->b_cont)
14870Sstevel@tonic-gate 			dlen += pmp->b_wptr - pmp->b_rptr;
14880Sstevel@tonic-gate 
14890Sstevel@tonic-gate 		ASSERT(sctp->sctp_rxqueued >= dlen);
14900Sstevel@tonic-gate 		ASSERT(sctp->sctp_rwnd >= dlen);
14910Sstevel@tonic-gate 
14920Sstevel@tonic-gate 		sctp->sctp_rxqueued -= dlen;
14930Sstevel@tonic-gate 		if (can_deliver) {
14940Sstevel@tonic-gate 			dmp->b_rptr = (uchar_t *)(dc + 1);
14950Sstevel@tonic-gate 			if (sctp_input_add_ancillary(sctp, &dmp, dc, fp,
14960Sstevel@tonic-gate 			    ipp) == 0) {
14970Sstevel@tonic-gate 				dprint(1, ("sctp_data_chunk: delivering %lu "
14980Sstevel@tonic-gate 				    "bytes\n", msgdsize(dmp)));
14990Sstevel@tonic-gate 				sctp->sctp_rwnd -= dlen;
15000Sstevel@tonic-gate 				new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd,
15010Sstevel@tonic-gate 				    dmp, tpfinished ? 0 : SCTP_PARTIAL_DATA);
15020Sstevel@tonic-gate 				if (new_rwnd > sctp->sctp_rwnd) {
15030Sstevel@tonic-gate 					sctp->sctp_rwnd = new_rwnd;
15040Sstevel@tonic-gate 				}
15050Sstevel@tonic-gate 				SCTP_ACK_IT(sctp, tsn);
15060Sstevel@tonic-gate 			} else {
15070Sstevel@tonic-gate 				freemsg(dmp);
15080Sstevel@tonic-gate 				return;
15090Sstevel@tonic-gate 			}
15100Sstevel@tonic-gate 		} else {
15110Sstevel@tonic-gate 			/* About to free the data */
15120Sstevel@tonic-gate 			freemsg(dmp);
15130Sstevel@tonic-gate 			SCTP_ACK_IT(sctp, tsn);
15140Sstevel@tonic-gate 		}
15150Sstevel@tonic-gate 	}
15160Sstevel@tonic-gate 
15170Sstevel@tonic-gate done:
15180Sstevel@tonic-gate 
15190Sstevel@tonic-gate 	/*
15200Sstevel@tonic-gate 	 * If there are gap reports pending, check if advancing
15210Sstevel@tonic-gate 	 * the ftsn here closes a gap. If so, we can advance
15220Sstevel@tonic-gate 	 * ftsn to the end of the set.
15230Sstevel@tonic-gate 	 */
15240Sstevel@tonic-gate 	if (sctp->sctp_sack_info != NULL &&
15250Sstevel@tonic-gate 	    sctp->sctp_ftsn == sctp->sctp_sack_info->begin) {
15260Sstevel@tonic-gate 		sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1;
15270Sstevel@tonic-gate 	}
15280Sstevel@tonic-gate 	/*
15290Sstevel@tonic-gate 	 * If ftsn has moved forward, maybe we can remove gap reports.
15300Sstevel@tonic-gate 	 * NB: dmp may now be NULL, so don't dereference it here.
15310Sstevel@tonic-gate 	 */
15320Sstevel@tonic-gate 	if (oftsn != sctp->sctp_ftsn && sctp->sctp_sack_info != NULL) {
15330Sstevel@tonic-gate 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
15340Sstevel@tonic-gate 		    &sctp->sctp_sack_gaps);
15350Sstevel@tonic-gate 		dprint(2, ("data_chunk: removed acks before %x (num=%d)\n",
15360Sstevel@tonic-gate 		    sctp->sctp_ftsn - 1, sctp->sctp_sack_gaps));
15370Sstevel@tonic-gate 	}
15380Sstevel@tonic-gate 
15390Sstevel@tonic-gate #ifdef	DEBUG
15400Sstevel@tonic-gate 	if (sctp->sctp_sack_info != NULL) {
15410Sstevel@tonic-gate 		ASSERT(sctp->sctp_ftsn != sctp->sctp_sack_info->begin);
15420Sstevel@tonic-gate 	}
15430Sstevel@tonic-gate #endif
15440Sstevel@tonic-gate 
15450Sstevel@tonic-gate #undef	SCTP_ACK_IT
15460Sstevel@tonic-gate }
15470Sstevel@tonic-gate 
15480Sstevel@tonic-gate void
15490Sstevel@tonic-gate sctp_fill_sack(sctp_t *sctp, unsigned char *dst, int sacklen)
15500Sstevel@tonic-gate {
15510Sstevel@tonic-gate 	sctp_chunk_hdr_t *sch;
15520Sstevel@tonic-gate 	sctp_sack_chunk_t *sc;
15530Sstevel@tonic-gate 	sctp_sack_frag_t *sf;
15540Sstevel@tonic-gate 	uint16_t num_gaps = sctp->sctp_sack_gaps;
15550Sstevel@tonic-gate 	sctp_set_t *sp;
15560Sstevel@tonic-gate 
15570Sstevel@tonic-gate 	/* Chunk hdr */
15580Sstevel@tonic-gate 	sch = (sctp_chunk_hdr_t *)dst;
15590Sstevel@tonic-gate 	sch->sch_id = CHUNK_SACK;
15600Sstevel@tonic-gate 	sch->sch_flags = 0;
15610Sstevel@tonic-gate 	sch->sch_len = htons(sacklen);
15620Sstevel@tonic-gate 
15630Sstevel@tonic-gate 	/* SACK chunk */
15640Sstevel@tonic-gate 	sctp->sctp_lastacked = sctp->sctp_ftsn - 1;
15650Sstevel@tonic-gate 
15660Sstevel@tonic-gate 	sc = (sctp_sack_chunk_t *)(sch + 1);
15670Sstevel@tonic-gate 	sc->ssc_cumtsn = htonl(sctp->sctp_lastacked);
15680Sstevel@tonic-gate 	if (sctp->sctp_rxqueued < sctp->sctp_rwnd) {
15690Sstevel@tonic-gate 		sc->ssc_a_rwnd = htonl(sctp->sctp_rwnd - sctp->sctp_rxqueued);
15700Sstevel@tonic-gate 	} else {
15710Sstevel@tonic-gate 		sc->ssc_a_rwnd = 0;
15720Sstevel@tonic-gate 	}
15730Sstevel@tonic-gate 	sc->ssc_numfrags = htons(num_gaps);
15740Sstevel@tonic-gate 	sc->ssc_numdups = 0;
15750Sstevel@tonic-gate 
15760Sstevel@tonic-gate 	/* lay in gap reports */
15770Sstevel@tonic-gate 	sf = (sctp_sack_frag_t *)(sc + 1);
15780Sstevel@tonic-gate 	for (sp = sctp->sctp_sack_info; sp; sp = sp->next) {
15790Sstevel@tonic-gate 		uint16_t offset;
15800Sstevel@tonic-gate 
15810Sstevel@tonic-gate 		/* start */
15820Sstevel@tonic-gate 		if (sp->begin > sctp->sctp_lastacked) {
15830Sstevel@tonic-gate 			offset = (uint16_t)(sp->begin - sctp->sctp_lastacked);
15840Sstevel@tonic-gate 		} else {
15850Sstevel@tonic-gate 			/* sequence number wrap */
15860Sstevel@tonic-gate 			offset = (uint16_t)(UINT32_MAX - sctp->sctp_lastacked +
15870Sstevel@tonic-gate 			    sp->begin);
15880Sstevel@tonic-gate 		}
15890Sstevel@tonic-gate 		sf->ssf_start = htons(offset);
15900Sstevel@tonic-gate 
15910Sstevel@tonic-gate 		/* end */
15920Sstevel@tonic-gate 		if (sp->end >= sp->begin) {
15930Sstevel@tonic-gate 			offset += (uint16_t)(sp->end - sp->begin);
15940Sstevel@tonic-gate 		} else {
15950Sstevel@tonic-gate 			/* sequence number wrap */
15960Sstevel@tonic-gate 			offset += (uint16_t)(UINT32_MAX - sp->begin + sp->end);
15970Sstevel@tonic-gate 		}
15980Sstevel@tonic-gate 		sf->ssf_end = htons(offset);
15990Sstevel@tonic-gate 
16000Sstevel@tonic-gate 		sf++;
16010Sstevel@tonic-gate 		/* This is just for debugging (a la the following assertion) */
16020Sstevel@tonic-gate 		num_gaps--;
16030Sstevel@tonic-gate 	}
16040Sstevel@tonic-gate 
16050Sstevel@tonic-gate 	ASSERT(num_gaps == 0);
16060Sstevel@tonic-gate 
16070Sstevel@tonic-gate 	/* If the SACK timer is running, stop it */
16080Sstevel@tonic-gate 	if (sctp->sctp_ack_timer_running) {
16090Sstevel@tonic-gate 		sctp_timer_stop(sctp->sctp_ack_mp);
16100Sstevel@tonic-gate 		sctp->sctp_ack_timer_running = B_FALSE;
16110Sstevel@tonic-gate 	}
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_obchunks);
16140Sstevel@tonic-gate }
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate mblk_t *
16170Sstevel@tonic-gate sctp_make_sack(sctp_t *sctp, sctp_faddr_t *sendto, mblk_t *dups)
16180Sstevel@tonic-gate {
16190Sstevel@tonic-gate 	mblk_t *smp;
16200Sstevel@tonic-gate 	size_t slen;
16210Sstevel@tonic-gate 	sctp_chunk_hdr_t *sch;
16220Sstevel@tonic-gate 	sctp_sack_chunk_t *sc;
16230Sstevel@tonic-gate 
16240Sstevel@tonic-gate 	if (sctp->sctp_force_sack) {
16250Sstevel@tonic-gate 		sctp->sctp_force_sack = 0;
16260Sstevel@tonic-gate 		goto checks_done;
16270Sstevel@tonic-gate 	}
16280Sstevel@tonic-gate 
16290Sstevel@tonic-gate 	if (sctp->sctp_state == SCTPS_ESTABLISHED) {
16300Sstevel@tonic-gate 		if (sctp->sctp_sack_toggle < 2) {
16310Sstevel@tonic-gate 			/* no need to SACK right now */
16320Sstevel@tonic-gate 			dprint(2, ("sctp_make_sack: %p no sack (toggle)\n",
16331676Sjpk 			    (void *)sctp));
16340Sstevel@tonic-gate 			return (NULL);
16350Sstevel@tonic-gate 		} else if (sctp->sctp_sack_toggle >= 2) {
16360Sstevel@tonic-gate 			sctp->sctp_sack_toggle = 0;
16370Sstevel@tonic-gate 		}
16380Sstevel@tonic-gate 	}
16390Sstevel@tonic-gate 
16400Sstevel@tonic-gate 	if (sctp->sctp_ftsn == sctp->sctp_lastacked + 1) {
16411676Sjpk 		dprint(2, ("sctp_make_sack: %p no sack (already)\n",
16421676Sjpk 		    (void *)sctp));
16430Sstevel@tonic-gate 		return (NULL);
16440Sstevel@tonic-gate 	}
16450Sstevel@tonic-gate 
16460Sstevel@tonic-gate checks_done:
16470Sstevel@tonic-gate 	dprint(2, ("sctp_make_sack: acking %x\n", sctp->sctp_ftsn - 1));
16480Sstevel@tonic-gate 
16490Sstevel@tonic-gate 	slen = sizeof (*sch) + sizeof (*sc) +
16500Sstevel@tonic-gate 	    (sizeof (sctp_sack_frag_t) * sctp->sctp_sack_gaps);
16510Sstevel@tonic-gate 	smp = sctp_make_mp(sctp, sendto, slen);
16520Sstevel@tonic-gate 	if (smp == NULL) {
16531735Skcpoon 		SCTP_KSTAT(sctp_send_sack_failed);
16540Sstevel@tonic-gate 		return (NULL);
16550Sstevel@tonic-gate 	}
16560Sstevel@tonic-gate 	sch = (sctp_chunk_hdr_t *)smp->b_wptr;
16570Sstevel@tonic-gate 
16580Sstevel@tonic-gate 	sctp_fill_sack(sctp, smp->b_wptr, slen);
16590Sstevel@tonic-gate 	smp->b_wptr += slen;
16600Sstevel@tonic-gate 	if (dups) {
16610Sstevel@tonic-gate 		sc = (sctp_sack_chunk_t *)(sch + 1);
16620Sstevel@tonic-gate 		sc->ssc_numdups = htons((dups->b_wptr - dups->b_rptr)
16630Sstevel@tonic-gate 		    / sizeof (uint32_t));
16640Sstevel@tonic-gate 		sch->sch_len = htons(slen + (dups->b_wptr - dups->b_rptr));
16650Sstevel@tonic-gate 		smp->b_cont = dups;
16660Sstevel@tonic-gate 	}
16670Sstevel@tonic-gate 
16680Sstevel@tonic-gate 	return (smp);
16690Sstevel@tonic-gate }
16700Sstevel@tonic-gate 
16710Sstevel@tonic-gate void
16720Sstevel@tonic-gate sctp_sack(sctp_t *sctp, mblk_t *dups)
16730Sstevel@tonic-gate {
16740Sstevel@tonic-gate 	mblk_t *smp;
16750Sstevel@tonic-gate 
16760Sstevel@tonic-gate 	/* If we are shutting down, let send_shutdown() bundle the SACK */
16770Sstevel@tonic-gate 	if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
16780Sstevel@tonic-gate 		sctp_send_shutdown(sctp, 0);
16790Sstevel@tonic-gate 	}
16800Sstevel@tonic-gate 
16810Sstevel@tonic-gate 	ASSERT(sctp->sctp_lastdata != NULL);
16820Sstevel@tonic-gate 
16830Sstevel@tonic-gate 	if ((smp = sctp_make_sack(sctp, sctp->sctp_lastdata, dups)) == NULL) {
16840Sstevel@tonic-gate 		/* The caller of sctp_sack() will not free the dups mblk. */
16850Sstevel@tonic-gate 		if (dups != NULL)
16860Sstevel@tonic-gate 			freeb(dups);
16870Sstevel@tonic-gate 		return;
16880Sstevel@tonic-gate 	}
16890Sstevel@tonic-gate 
16900Sstevel@tonic-gate 	sctp_set_iplen(sctp, smp);
16910Sstevel@tonic-gate 
16920Sstevel@tonic-gate 	dprint(2, ("sctp_sack: sending to %p %x:%x:%x:%x\n",
16931676Sjpk 	    (void *)sctp->sctp_lastdata,
16941676Sjpk 	    SCTP_PRINTADDR(sctp->sctp_lastdata->faddr)));
16950Sstevel@tonic-gate 
16960Sstevel@tonic-gate 	sctp->sctp_active = lbolt64;
16970Sstevel@tonic-gate 
16980Sstevel@tonic-gate 	BUMP_MIB(&sctp_mib, sctpOutAck);
16990Sstevel@tonic-gate 	sctp_add_sendq(sctp, smp);
17000Sstevel@tonic-gate }
17010Sstevel@tonic-gate 
17020Sstevel@tonic-gate /*
17030Sstevel@tonic-gate  * This is called if we have a message that was partially sent and is
17040Sstevel@tonic-gate  * abandoned. The cum TSN will be the last chunk sent for this message,
17050Sstevel@tonic-gate  * subsequent chunks will be marked ABANDONED. We send a Forward TSN
17060Sstevel@tonic-gate  * chunk in this case with the TSN of the last sent chunk so that the
17070Sstevel@tonic-gate  * peer can clean up its fragment list for this message. This message
17080Sstevel@tonic-gate  * will be removed from the transmit list when the peer sends a SACK
17090Sstevel@tonic-gate  * back.
17100Sstevel@tonic-gate  */
17110Sstevel@tonic-gate int
17120Sstevel@tonic-gate sctp_check_abandoned_msg(sctp_t *sctp, mblk_t *meta)
17130Sstevel@tonic-gate {
17140Sstevel@tonic-gate 	sctp_data_hdr_t	*dh;
17150Sstevel@tonic-gate 	mblk_t		*nmp;
17160Sstevel@tonic-gate 	mblk_t		*head;
17170Sstevel@tonic-gate 	int32_t		unsent = 0;
17180Sstevel@tonic-gate 	mblk_t		*mp1 = meta->b_cont;
17190Sstevel@tonic-gate 	uint32_t	adv_pap = sctp->sctp_adv_pap;
17200Sstevel@tonic-gate 	sctp_faddr_t	*fp = sctp->sctp_current;
17210Sstevel@tonic-gate 
17220Sstevel@tonic-gate 	dh = (sctp_data_hdr_t *)mp1->b_rptr;
17230Sstevel@tonic-gate 	if (SEQ_GEQ(sctp->sctp_lastack_rxd, ntohl(dh->sdh_tsn))) {
17240Sstevel@tonic-gate 		sctp_ftsn_set_t	*sets = NULL;
17250Sstevel@tonic-gate 		uint_t		nsets = 0;
17260Sstevel@tonic-gate 		uint32_t	seglen = sizeof (uint32_t);
17270Sstevel@tonic-gate 		boolean_t	ubit = SCTP_DATA_GET_UBIT(dh);
17280Sstevel@tonic-gate 
17290Sstevel@tonic-gate 		while (mp1->b_next != NULL && SCTP_CHUNK_ISSENT(mp1->b_next))
17300Sstevel@tonic-gate 			mp1 = mp1->b_next;
17310Sstevel@tonic-gate 		dh = (sctp_data_hdr_t *)mp1->b_rptr;
17320Sstevel@tonic-gate 		sctp->sctp_adv_pap = ntohl(dh->sdh_tsn);
17330Sstevel@tonic-gate 		if (!ubit &&
17340Sstevel@tonic-gate 		    !sctp_add_ftsn_set(&sets, fp, meta, &nsets, &seglen)) {
17350Sstevel@tonic-gate 			sctp->sctp_adv_pap = adv_pap;
17360Sstevel@tonic-gate 			return (ENOMEM);
17370Sstevel@tonic-gate 		}
17380Sstevel@tonic-gate 		nmp = sctp_make_ftsn_chunk(sctp, fp, sets, nsets, seglen);
17390Sstevel@tonic-gate 		sctp_free_ftsn_set(sets);
17400Sstevel@tonic-gate 		if (nmp == NULL) {
17410Sstevel@tonic-gate 			sctp->sctp_adv_pap = adv_pap;
17420Sstevel@tonic-gate 			return (ENOMEM);
17430Sstevel@tonic-gate 		}
1744252Svi117747 		head = sctp_add_proto_hdr(sctp, fp, nmp, 0, NULL);
17450Sstevel@tonic-gate 		if (head == NULL) {
17460Sstevel@tonic-gate 			sctp->sctp_adv_pap = adv_pap;
17470Sstevel@tonic-gate 			freemsg(nmp);
17481735Skcpoon 			SCTP_KSTAT(sctp_send_ftsn_failed);
17490Sstevel@tonic-gate 			return (ENOMEM);
17500Sstevel@tonic-gate 		}
17510Sstevel@tonic-gate 		SCTP_MSG_SET_ABANDONED(meta);
17520Sstevel@tonic-gate 		sctp_set_iplen(sctp, head);
17530Sstevel@tonic-gate 		sctp_add_sendq(sctp, head);
17540Sstevel@tonic-gate 		if (!fp->timer_running)
17550Sstevel@tonic-gate 			SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
17560Sstevel@tonic-gate 		mp1 = mp1->b_next;
17570Sstevel@tonic-gate 		while (mp1 != NULL) {
17580Sstevel@tonic-gate 			ASSERT(!SCTP_CHUNK_ISSENT(mp1));
17590Sstevel@tonic-gate 			ASSERT(!SCTP_CHUNK_ABANDONED(mp1));
17600Sstevel@tonic-gate 			SCTP_ABANDON_CHUNK(mp1);
17610Sstevel@tonic-gate 			dh = (sctp_data_hdr_t *)mp1->b_rptr;
17620Sstevel@tonic-gate 			unsent += ntohs(dh->sdh_len) - sizeof (*dh);
17630Sstevel@tonic-gate 			mp1 = mp1->b_next;
17640Sstevel@tonic-gate 		}
17650Sstevel@tonic-gate 		ASSERT(sctp->sctp_unsent >= unsent);
17660Sstevel@tonic-gate 		sctp->sctp_unsent -= unsent;
17670Sstevel@tonic-gate 		/*
17680Sstevel@tonic-gate 		 * Update ULP the amount of queued data, which is
17690Sstevel@tonic-gate 		 * sent-unack'ed + unsent.
17700Sstevel@tonic-gate 		 */
17710Sstevel@tonic-gate 		if (!SCTP_IS_DETACHED(sctp)) {
17720Sstevel@tonic-gate 			sctp->sctp_ulp_xmitted(sctp->sctp_ulpd,
17730Sstevel@tonic-gate 			    sctp->sctp_unacked + sctp->sctp_unsent);
17740Sstevel@tonic-gate 		}
17750Sstevel@tonic-gate 		return (0);
17760Sstevel@tonic-gate 	}
17770Sstevel@tonic-gate 	return (-1);
17780Sstevel@tonic-gate }
17790Sstevel@tonic-gate 
17800Sstevel@tonic-gate uint32_t
17810Sstevel@tonic-gate sctp_cumack(sctp_t *sctp, uint32_t tsn, mblk_t **first_unacked)
17820Sstevel@tonic-gate {
17830Sstevel@tonic-gate 	mblk_t *ump, *nump, *mp = NULL;
17840Sstevel@tonic-gate 	uint16_t chunklen;
17850Sstevel@tonic-gate 	uint32_t xtsn;
17860Sstevel@tonic-gate 	sctp_faddr_t *fp;
17870Sstevel@tonic-gate 	sctp_data_hdr_t *sdc;
17880Sstevel@tonic-gate 	uint32_t cumack_forward = 0;
17890Sstevel@tonic-gate 	sctp_msg_hdr_t	*mhdr;
17900Sstevel@tonic-gate 
17910Sstevel@tonic-gate 	ump = sctp->sctp_xmit_head;
17920Sstevel@tonic-gate 
17930Sstevel@tonic-gate 	/*
17940Sstevel@tonic-gate 	 * Free messages only when they're completely acked.
17950Sstevel@tonic-gate 	 */
17960Sstevel@tonic-gate 	while (ump != NULL) {
17970Sstevel@tonic-gate 		mhdr = (sctp_msg_hdr_t *)ump->b_rptr;
17980Sstevel@tonic-gate 		for (mp = ump->b_cont; mp != NULL; mp = mp->b_next) {
17990Sstevel@tonic-gate 			if (SCTP_CHUNK_ABANDONED(mp)) {
18000Sstevel@tonic-gate 				ASSERT(SCTP_IS_MSG_ABANDONED(ump));
18010Sstevel@tonic-gate 				mp = NULL;
18020Sstevel@tonic-gate 				break;
18030Sstevel@tonic-gate 			}
18040Sstevel@tonic-gate 			/*
18050Sstevel@tonic-gate 			 * We check for abandoned message if we are PR-SCTP
18060Sstevel@tonic-gate 			 * aware, if this is not the first chunk in the
18070Sstevel@tonic-gate 			 * message (b_cont) and if the message is marked
18080Sstevel@tonic-gate 			 * abandoned.
18090Sstevel@tonic-gate 			 */
18100Sstevel@tonic-gate 			if (!SCTP_CHUNK_ISSENT(mp)) {
18110Sstevel@tonic-gate 				if (sctp->sctp_prsctp_aware &&
18120Sstevel@tonic-gate 				    mp != ump->b_cont &&
18130Sstevel@tonic-gate 				    (SCTP_IS_MSG_ABANDONED(ump) ||
18140Sstevel@tonic-gate 				    SCTP_MSG_TO_BE_ABANDONED(ump, mhdr,
18150Sstevel@tonic-gate 				    sctp))) {
18160Sstevel@tonic-gate 					(void) sctp_check_abandoned_msg(sctp,
18170Sstevel@tonic-gate 					    ump);
18180Sstevel@tonic-gate 				}
18190Sstevel@tonic-gate 				goto cum_ack_done;
18200Sstevel@tonic-gate 			}
18210Sstevel@tonic-gate 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
18220Sstevel@tonic-gate 			xtsn = ntohl(sdc->sdh_tsn);
18230Sstevel@tonic-gate 			if (SEQ_GEQ(sctp->sctp_lastack_rxd, xtsn))
18240Sstevel@tonic-gate 				continue;
18250Sstevel@tonic-gate 			if (SEQ_GEQ(tsn, xtsn)) {
18260Sstevel@tonic-gate 				fp = SCTP_CHUNK_DEST(mp);
18270Sstevel@tonic-gate 				chunklen = ntohs(sdc->sdh_len);
18280Sstevel@tonic-gate 
18290Sstevel@tonic-gate 				if (sctp->sctp_out_time != 0 &&
18300Sstevel@tonic-gate 				    xtsn == sctp->sctp_rtt_tsn) {
18310Sstevel@tonic-gate 					/* Got a new RTT measurement */
18320Sstevel@tonic-gate 					sctp_update_rtt(sctp, fp,
18330Sstevel@tonic-gate 					    lbolt64 - sctp->sctp_out_time);
18340Sstevel@tonic-gate 					sctp->sctp_out_time = 0;
18350Sstevel@tonic-gate 				}
18360Sstevel@tonic-gate 				if (SCTP_CHUNK_ISACKED(mp))
18370Sstevel@tonic-gate 					continue;
18381735Skcpoon 				SCTP_CHUNK_SET_SACKCNT(mp, 0);
18390Sstevel@tonic-gate 				SCTP_CHUNK_ACKED(mp);
18400Sstevel@tonic-gate 				ASSERT(fp->suna >= chunklen);
18410Sstevel@tonic-gate 				fp->suna -= chunklen;
18420Sstevel@tonic-gate 				fp->acked += chunklen;
18430Sstevel@tonic-gate 				cumack_forward += chunklen;
18440Sstevel@tonic-gate 				ASSERT(sctp->sctp_unacked >=
18450Sstevel@tonic-gate 				    (chunklen - sizeof (*sdc)));
18460Sstevel@tonic-gate 				sctp->sctp_unacked -=
18470Sstevel@tonic-gate 				    (chunklen - sizeof (*sdc));
18480Sstevel@tonic-gate 				if (fp->suna == 0) {
18490Sstevel@tonic-gate 					/* all outstanding data acked */
18500Sstevel@tonic-gate 					fp->pba = 0;
18510Sstevel@tonic-gate 					SCTP_FADDR_TIMER_STOP(fp);
18520Sstevel@tonic-gate 				} else {
18530Sstevel@tonic-gate 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
18540Sstevel@tonic-gate 					    fp->rto);
18550Sstevel@tonic-gate 				}
18560Sstevel@tonic-gate 			} else {
18570Sstevel@tonic-gate 				goto cum_ack_done;
18580Sstevel@tonic-gate 			}
18590Sstevel@tonic-gate 		}
18600Sstevel@tonic-gate 		nump = ump->b_next;
18610Sstevel@tonic-gate 		if (nump != NULL)
18620Sstevel@tonic-gate 			nump->b_prev = NULL;
18630Sstevel@tonic-gate 		if (ump == sctp->sctp_xmit_tail)
18640Sstevel@tonic-gate 			sctp->sctp_xmit_tail = nump;
18650Sstevel@tonic-gate 		if (SCTP_IS_MSG_ABANDONED(ump)) {
18660Sstevel@tonic-gate 			BUMP_LOCAL(sctp->sctp_prsctpdrop);
18670Sstevel@tonic-gate 			ump->b_next = NULL;
18680Sstevel@tonic-gate 			sctp_sendfail_event(sctp, ump, 0, B_TRUE);
18690Sstevel@tonic-gate 		} else {
18700Sstevel@tonic-gate 			sctp_free_msg(ump);
18710Sstevel@tonic-gate 		}
18720Sstevel@tonic-gate 		sctp->sctp_xmit_head = ump = nump;
18730Sstevel@tonic-gate 	}
18740Sstevel@tonic-gate cum_ack_done:
18750Sstevel@tonic-gate 	*first_unacked = mp;
18760Sstevel@tonic-gate 	if (cumack_forward > 0) {
18770Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpInAck);
18780Sstevel@tonic-gate 		if (SEQ_GT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) {
18790Sstevel@tonic-gate 			sctp->sctp_recovery_tsn = sctp->sctp_lastack_rxd;
18800Sstevel@tonic-gate 		}
18810Sstevel@tonic-gate 
18820Sstevel@tonic-gate 		/*
18830Sstevel@tonic-gate 		 * Update ULP the amount of queued data, which is
18840Sstevel@tonic-gate 		 * sent-unack'ed + unsent.
18850Sstevel@tonic-gate 		 */
18860Sstevel@tonic-gate 		if (!SCTP_IS_DETACHED(sctp)) {
18870Sstevel@tonic-gate 			sctp->sctp_ulp_xmitted(sctp->sctp_ulpd,
18880Sstevel@tonic-gate 			    sctp->sctp_unacked + sctp->sctp_unsent);
18890Sstevel@tonic-gate 		}
18900Sstevel@tonic-gate 
18910Sstevel@tonic-gate 		/* Time to send a shutdown? */
18920Sstevel@tonic-gate 		if (sctp->sctp_state == SCTPS_SHUTDOWN_PENDING) {
18930Sstevel@tonic-gate 			sctp_send_shutdown(sctp, 0);
18940Sstevel@tonic-gate 		}
18950Sstevel@tonic-gate 		sctp->sctp_xmit_unacked = mp;
18960Sstevel@tonic-gate 	} else {
18970Sstevel@tonic-gate 		/* dup ack */
18980Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpInDupAck);
18990Sstevel@tonic-gate 	}
19000Sstevel@tonic-gate 	sctp->sctp_lastack_rxd = tsn;
19010Sstevel@tonic-gate 	if (SEQ_LT(sctp->sctp_adv_pap, sctp->sctp_lastack_rxd))
19020Sstevel@tonic-gate 		sctp->sctp_adv_pap = sctp->sctp_lastack_rxd;
19030Sstevel@tonic-gate 	ASSERT(sctp->sctp_xmit_head || sctp->sctp_unacked == 0);
19040Sstevel@tonic-gate 
19050Sstevel@tonic-gate 	return (cumack_forward);
19060Sstevel@tonic-gate }
19070Sstevel@tonic-gate 
19080Sstevel@tonic-gate static int
19090Sstevel@tonic-gate sctp_set_frwnd(sctp_t *sctp, uint32_t frwnd)
19100Sstevel@tonic-gate {
19110Sstevel@tonic-gate 	uint32_t orwnd;
19120Sstevel@tonic-gate 
19130Sstevel@tonic-gate 	if (sctp->sctp_unacked > frwnd) {
19140Sstevel@tonic-gate 		sctp->sctp_frwnd = 0;
19150Sstevel@tonic-gate 		return (0);
19160Sstevel@tonic-gate 	}
19170Sstevel@tonic-gate 	orwnd = sctp->sctp_frwnd;
19180Sstevel@tonic-gate 	sctp->sctp_frwnd = frwnd - sctp->sctp_unacked;
19190Sstevel@tonic-gate 	if (orwnd < sctp->sctp_frwnd) {
19200Sstevel@tonic-gate 		return (1);
19210Sstevel@tonic-gate 	} else {
19220Sstevel@tonic-gate 		return (0);
19230Sstevel@tonic-gate 	}
19240Sstevel@tonic-gate }
19250Sstevel@tonic-gate 
19260Sstevel@tonic-gate /*
19270Sstevel@tonic-gate  * For un-ordered messages.
19280Sstevel@tonic-gate  * Walk the sctp->sctp_uo_frag list and remove any fragments with TSN
19290Sstevel@tonic-gate  * less than/equal to ftsn. Fragments for un-ordered messages are
19300Sstevel@tonic-gate  * strictly in sequence (w.r.t TSN).
19310Sstevel@tonic-gate  */
19320Sstevel@tonic-gate static int
19330Sstevel@tonic-gate sctp_ftsn_check_uo_frag(sctp_t *sctp, uint32_t ftsn)
19340Sstevel@tonic-gate {
19350Sstevel@tonic-gate 	mblk_t		*hmp;
19360Sstevel@tonic-gate 	mblk_t		*hmp_next;
19370Sstevel@tonic-gate 	sctp_data_hdr_t	*dc;
19380Sstevel@tonic-gate 	int		dlen = 0;
19390Sstevel@tonic-gate 
19400Sstevel@tonic-gate 	hmp = sctp->sctp_uo_frags;
19410Sstevel@tonic-gate 	while (hmp != NULL) {
19420Sstevel@tonic-gate 		hmp_next = hmp->b_next;
19430Sstevel@tonic-gate 		dc = (sctp_data_hdr_t *)hmp->b_rptr;
19440Sstevel@tonic-gate 		if (SEQ_GT(ntohl(dc->sdh_tsn), ftsn))
19450Sstevel@tonic-gate 			return (dlen);
19460Sstevel@tonic-gate 		sctp->sctp_uo_frags = hmp_next;
19470Sstevel@tonic-gate 		if (hmp_next != NULL)
19480Sstevel@tonic-gate 			hmp_next->b_prev = NULL;
19490Sstevel@tonic-gate 		hmp->b_next = NULL;
19500Sstevel@tonic-gate 		dlen += ntohs(dc->sdh_len) - sizeof (*dc);
19510Sstevel@tonic-gate 		freeb(hmp);
19520Sstevel@tonic-gate 		hmp = hmp_next;
19530Sstevel@tonic-gate 	}
19540Sstevel@tonic-gate 	return (dlen);
19550Sstevel@tonic-gate }
19560Sstevel@tonic-gate 
19570Sstevel@tonic-gate /*
19580Sstevel@tonic-gate  * For ordered messages.
19590Sstevel@tonic-gate  * Check for existing fragments for an sid-ssn pair reported as abandoned,
19600Sstevel@tonic-gate  * hence will not receive, in the Forward TSN. If there are fragments, then
19610Sstevel@tonic-gate  * we just nuke them. If and when Partial Delivery API is supported, we
19620Sstevel@tonic-gate  * would need to send a notification to the upper layer about this.
19630Sstevel@tonic-gate  */
19640Sstevel@tonic-gate static int
19650Sstevel@tonic-gate sctp_ftsn_check_frag(sctp_t *sctp, uint16_t ssn, sctp_instr_t *sip)
19660Sstevel@tonic-gate {
19670Sstevel@tonic-gate 	sctp_reass_t	*srp;
19680Sstevel@tonic-gate 	mblk_t		*hmp;
19690Sstevel@tonic-gate 	mblk_t		*dmp;
19700Sstevel@tonic-gate 	mblk_t		*hmp_next;
19710Sstevel@tonic-gate 	sctp_data_hdr_t	*dc;
19720Sstevel@tonic-gate 	int		dlen = 0;
19730Sstevel@tonic-gate 
19740Sstevel@tonic-gate 	hmp = sip->istr_reass;
19750Sstevel@tonic-gate 	while (hmp != NULL) {
19760Sstevel@tonic-gate 		hmp_next = hmp->b_next;
19770Sstevel@tonic-gate 		srp = (sctp_reass_t *)DB_BASE(hmp);
19780Sstevel@tonic-gate 		if (SSN_GT(srp->ssn, ssn))
19790Sstevel@tonic-gate 			return (dlen);
19800Sstevel@tonic-gate 		/*
19810Sstevel@tonic-gate 		 * If we had sent part of this message up, send a partial
19820Sstevel@tonic-gate 		 * delivery event. Since this is ordered delivery, we should
19830Sstevel@tonic-gate 		 * have sent partial message only for the next in sequence,
19840Sstevel@tonic-gate 		 * hence the ASSERT. See comments in sctp_data_chunk() for
19850Sstevel@tonic-gate 		 * trypartial.
19860Sstevel@tonic-gate 		 */
19870Sstevel@tonic-gate 		if (srp->partial_delivered) {
19880Sstevel@tonic-gate 			ASSERT(sip->nextseq == srp->ssn);
19890Sstevel@tonic-gate 			sctp_partial_delivery_event(sctp);
19900Sstevel@tonic-gate 		}
19910Sstevel@tonic-gate 		/* Take it out of the reass queue */
19920Sstevel@tonic-gate 		sip->istr_reass = hmp_next;
19930Sstevel@tonic-gate 		if (hmp_next != NULL)
19940Sstevel@tonic-gate 			hmp_next->b_prev = NULL;
19950Sstevel@tonic-gate 		hmp->b_next = NULL;
19960Sstevel@tonic-gate 		ASSERT(hmp->b_prev == NULL);
19970Sstevel@tonic-gate 		dmp = hmp;
19980Sstevel@tonic-gate 		if (DB_TYPE(hmp) == M_CTL) {
19990Sstevel@tonic-gate 			dmp = hmp->b_cont;
20000Sstevel@tonic-gate 			hmp->b_cont = NULL;
20010Sstevel@tonic-gate 			freeb(hmp);
20020Sstevel@tonic-gate 			hmp = dmp;
20030Sstevel@tonic-gate 		}
20040Sstevel@tonic-gate 		while (dmp != NULL) {
20050Sstevel@tonic-gate 			dc = (sctp_data_hdr_t *)dmp->b_rptr;
20060Sstevel@tonic-gate 			dlen += ntohs(dc->sdh_len) - sizeof (*dc);
20070Sstevel@tonic-gate 			dmp = dmp->b_cont;
20080Sstevel@tonic-gate 		}
20090Sstevel@tonic-gate 		freemsg(hmp);
20100Sstevel@tonic-gate 		hmp = hmp_next;
20110Sstevel@tonic-gate 	}
20120Sstevel@tonic-gate 	return (dlen);
20130Sstevel@tonic-gate }
20140Sstevel@tonic-gate 
20150Sstevel@tonic-gate /*
20160Sstevel@tonic-gate  * Update sctp_ftsn to the cumulative TSN from the Forward TSN chunk. Remove
20170Sstevel@tonic-gate  * any SACK gaps less than the newly updated sctp_ftsn. Walk through the
20180Sstevel@tonic-gate  * sid-ssn pair in the Forward TSN and for each, clean the fragment list
20190Sstevel@tonic-gate  * for this pair, if needed, and check if we can deliver subsequent
20200Sstevel@tonic-gate  * messages, if any, from the instream queue (that were waiting for this
20210Sstevel@tonic-gate  * sid-ssn message to show up). Once we are done try to update the SACK
20220Sstevel@tonic-gate  * info. We could get a duplicate Forward TSN, in which case just send
20230Sstevel@tonic-gate  * a SACK. If any of the sid values in the the Forward TSN is invalid,
20240Sstevel@tonic-gate  * send back an "Invalid Stream Identifier" error and continue processing
20250Sstevel@tonic-gate  * the rest.
20260Sstevel@tonic-gate  */
20270Sstevel@tonic-gate static void
20280Sstevel@tonic-gate sctp_process_forward_tsn(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp,
20290Sstevel@tonic-gate     ip6_pkt_t *ipp)
20300Sstevel@tonic-gate {
20310Sstevel@tonic-gate 	uint32_t	*ftsn = (uint32_t *)(ch + 1);
20320Sstevel@tonic-gate 	ftsn_entry_t	*ftsn_entry;
20330Sstevel@tonic-gate 	sctp_instr_t	*instr;
20340Sstevel@tonic-gate 	boolean_t	can_deliver = B_TRUE;
20350Sstevel@tonic-gate 	size_t		dlen;
20360Sstevel@tonic-gate 	int		flen;
20370Sstevel@tonic-gate 	mblk_t		*dmp;
20380Sstevel@tonic-gate 	mblk_t		*pmp;
20390Sstevel@tonic-gate 	sctp_data_hdr_t	*dc;
20400Sstevel@tonic-gate 	ssize_t		remaining;
20410Sstevel@tonic-gate 
20420Sstevel@tonic-gate 	*ftsn = ntohl(*ftsn);
20430Sstevel@tonic-gate 	remaining =  ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*ftsn);
20440Sstevel@tonic-gate 
20450Sstevel@tonic-gate 	if (SCTP_IS_DETACHED(sctp)) {
20460Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpInClosed);
20470Sstevel@tonic-gate 		can_deliver = B_FALSE;
20480Sstevel@tonic-gate 	}
20490Sstevel@tonic-gate 	/*
20500Sstevel@tonic-gate 	 * un-ordered messages don't have SID-SSN pair entries, we check
20510Sstevel@tonic-gate 	 * for any fragments (for un-ordered message) to be discarded using
20520Sstevel@tonic-gate 	 * the cumulative FTSN.
20530Sstevel@tonic-gate 	 */
20540Sstevel@tonic-gate 	flen = sctp_ftsn_check_uo_frag(sctp, *ftsn);
20550Sstevel@tonic-gate 	if (flen > 0) {
20560Sstevel@tonic-gate 		ASSERT(sctp->sctp_rxqueued >= flen);
20570Sstevel@tonic-gate 		sctp->sctp_rxqueued -= flen;
20580Sstevel@tonic-gate 	}
20590Sstevel@tonic-gate 	ftsn_entry = (ftsn_entry_t *)(ftsn + 1);
20600Sstevel@tonic-gate 	while (remaining >= sizeof (*ftsn_entry)) {
20610Sstevel@tonic-gate 		ftsn_entry->ftsn_sid = ntohs(ftsn_entry->ftsn_sid);
20620Sstevel@tonic-gate 		ftsn_entry->ftsn_ssn = ntohs(ftsn_entry->ftsn_ssn);
20630Sstevel@tonic-gate 		if (ftsn_entry->ftsn_sid >= sctp->sctp_num_istr) {
20640Sstevel@tonic-gate 			uint16_t	inval_parm[2];
20650Sstevel@tonic-gate 			mblk_t		*errmp;
20660Sstevel@tonic-gate 
20670Sstevel@tonic-gate 			inval_parm[0] = htons(ftsn_entry->ftsn_sid);
20680Sstevel@tonic-gate 			/* RESERVED to be ignored at the receiving end */
20690Sstevel@tonic-gate 			inval_parm[1] = 0;
20700Sstevel@tonic-gate 			errmp = sctp_make_err(sctp, SCTP_ERR_BAD_SID,
20710Sstevel@tonic-gate 			    (char *)inval_parm, sizeof (inval_parm));
20720Sstevel@tonic-gate 			if (errmp != NULL)
20730Sstevel@tonic-gate 				sctp_send_err(sctp, errmp, NULL);
20740Sstevel@tonic-gate 			ftsn_entry++;
20750Sstevel@tonic-gate 			remaining -= sizeof (*ftsn_entry);
20760Sstevel@tonic-gate 			continue;
20770Sstevel@tonic-gate 		}
20780Sstevel@tonic-gate 		instr = &sctp->sctp_instr[ftsn_entry->ftsn_sid];
20790Sstevel@tonic-gate 		flen = sctp_ftsn_check_frag(sctp, ftsn_entry->ftsn_ssn, instr);
20800Sstevel@tonic-gate 		/* Indicates frags were nuked, update rxqueued */
20810Sstevel@tonic-gate 		if (flen > 0) {
20820Sstevel@tonic-gate 			ASSERT(sctp->sctp_rxqueued >= flen);
20830Sstevel@tonic-gate 			sctp->sctp_rxqueued -= flen;
20840Sstevel@tonic-gate 		}
20850Sstevel@tonic-gate 		/*
20860Sstevel@tonic-gate 		 * It is possible to receive an FTSN chunk with SSN smaller
20870Sstevel@tonic-gate 		 * than then nextseq if this chunk is a retransmission because
20880Sstevel@tonic-gate 		 * of incomplete processing when it was first processed.
20890Sstevel@tonic-gate 		 */
20900Sstevel@tonic-gate 		if (SSN_GE(ftsn_entry->ftsn_ssn, instr->nextseq))
20910Sstevel@tonic-gate 			instr->nextseq = ftsn_entry->ftsn_ssn + 1;
20920Sstevel@tonic-gate 		while (instr->istr_nmsgs > 0) {
20930Sstevel@tonic-gate 			mblk_t	*next;
20940Sstevel@tonic-gate 
20950Sstevel@tonic-gate 			dmp = (mblk_t *)instr->istr_msgs;
20960Sstevel@tonic-gate 			dc = (sctp_data_hdr_t *)dmp->b_rptr;
20970Sstevel@tonic-gate 			if (ntohs(dc->sdh_ssn) != instr->nextseq)
20980Sstevel@tonic-gate 				break;
20990Sstevel@tonic-gate 
21000Sstevel@tonic-gate 			next = dmp->b_next;
21010Sstevel@tonic-gate 			dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc);
21020Sstevel@tonic-gate 			for (pmp = dmp->b_cont; pmp != NULL;
21030Sstevel@tonic-gate 			    pmp = pmp->b_cont) {
21040Sstevel@tonic-gate 				dlen += pmp->b_wptr - pmp->b_rptr;
21050Sstevel@tonic-gate 			}
21060Sstevel@tonic-gate 			if (can_deliver) {
21070Sstevel@tonic-gate 				int32_t	nrwnd;
21080Sstevel@tonic-gate 
21090Sstevel@tonic-gate 				dmp->b_rptr = (uchar_t *)(dc + 1);
21100Sstevel@tonic-gate 				dmp->b_next = NULL;
21110Sstevel@tonic-gate 				ASSERT(dmp->b_prev == NULL);
21120Sstevel@tonic-gate 				if (sctp_input_add_ancillary(sctp,
21130Sstevel@tonic-gate 				    &dmp, dc, fp, ipp) == 0) {
21140Sstevel@tonic-gate 					sctp->sctp_rxqueued -= dlen;
21150Sstevel@tonic-gate 					sctp->sctp_rwnd -= dlen;
21160Sstevel@tonic-gate 					nrwnd = sctp->sctp_ulp_recv(
21170Sstevel@tonic-gate 					    sctp->sctp_ulpd, dmp, 0);
21180Sstevel@tonic-gate 					if (nrwnd > sctp->sctp_rwnd)
21190Sstevel@tonic-gate 						sctp->sctp_rwnd = nrwnd;
21200Sstevel@tonic-gate 				} else {
21210Sstevel@tonic-gate 					/*
21220Sstevel@tonic-gate 					 * We will resume processing when
21230Sstevel@tonic-gate 					 * the FTSN chunk is re-xmitted.
21240Sstevel@tonic-gate 					 */
21250Sstevel@tonic-gate 					dmp->b_rptr = (uchar_t *)dc;
21260Sstevel@tonic-gate 					dmp->b_next = next;
21270Sstevel@tonic-gate 					dprint(0,
21280Sstevel@tonic-gate 					    ("FTSN dequeuing %u failed\n",
21290Sstevel@tonic-gate 					    ntohs(dc->sdh_ssn)));
21300Sstevel@tonic-gate 					return;
21310Sstevel@tonic-gate 				}
21320Sstevel@tonic-gate 			} else {
21330Sstevel@tonic-gate 				sctp->sctp_rxqueued -= dlen;
21340Sstevel@tonic-gate 				ASSERT(dmp->b_prev == NULL);
21350Sstevel@tonic-gate 				dmp->b_next = NULL;
21360Sstevel@tonic-gate 				freemsg(dmp);
21370Sstevel@tonic-gate 			}
21380Sstevel@tonic-gate 			instr->istr_nmsgs--;
21390Sstevel@tonic-gate 			instr->nextseq++;
21400Sstevel@tonic-gate 			sctp->sctp_istr_nmsgs--;
21410Sstevel@tonic-gate 			if (next != NULL)
21420Sstevel@tonic-gate 				next->b_prev = NULL;
21430Sstevel@tonic-gate 			instr->istr_msgs = next;
21440Sstevel@tonic-gate 		}
21450Sstevel@tonic-gate 		ftsn_entry++;
21460Sstevel@tonic-gate 		remaining -= sizeof (*ftsn_entry);
21470Sstevel@tonic-gate 	}
21480Sstevel@tonic-gate 	/* Duplicate FTSN */
21490Sstevel@tonic-gate 	if (*ftsn <= (sctp->sctp_ftsn - 1)) {
21500Sstevel@tonic-gate 		sctp->sctp_force_sack = 1;
21510Sstevel@tonic-gate 		return;
21520Sstevel@tonic-gate 	}
21530Sstevel@tonic-gate 	/* Advance cum TSN to that reported in the Forward TSN chunk */
21540Sstevel@tonic-gate 	sctp->sctp_ftsn = *ftsn + 1;
21550Sstevel@tonic-gate 
21560Sstevel@tonic-gate 	/* Remove all the SACK gaps before the new cum TSN */
21570Sstevel@tonic-gate 	if (sctp->sctp_sack_info != NULL) {
21580Sstevel@tonic-gate 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
21590Sstevel@tonic-gate 		    &sctp->sctp_sack_gaps);
21600Sstevel@tonic-gate 	}
21610Sstevel@tonic-gate 	/*
21620Sstevel@tonic-gate 	 * If there are gap reports pending, check if advancing
21630Sstevel@tonic-gate 	 * the ftsn here closes a gap. If so, we can advance
21640Sstevel@tonic-gate 	 * ftsn to the end of the set.
21650Sstevel@tonic-gate 	 * If ftsn has moved forward, maybe we can remove gap reports.
21660Sstevel@tonic-gate 	 */
21670Sstevel@tonic-gate 	if (sctp->sctp_sack_info != NULL &&
21680Sstevel@tonic-gate 	    sctp->sctp_ftsn == sctp->sctp_sack_info->begin) {
21690Sstevel@tonic-gate 		sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1;
21700Sstevel@tonic-gate 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
21710Sstevel@tonic-gate 		    &sctp->sctp_sack_gaps);
21720Sstevel@tonic-gate 	}
21730Sstevel@tonic-gate }
21740Sstevel@tonic-gate 
21750Sstevel@tonic-gate /*
21760Sstevel@tonic-gate  * When we have processed a SACK we check to see if we can advance the
21770Sstevel@tonic-gate  * cumulative TSN if there are abandoned chunks immediately following
21780Sstevel@tonic-gate  * the updated cumulative TSN. If there are, we attempt to send a
21790Sstevel@tonic-gate  * Forward TSN chunk.
21800Sstevel@tonic-gate  */
21810Sstevel@tonic-gate static void
21820Sstevel@tonic-gate sctp_check_abandoned_data(sctp_t *sctp, sctp_faddr_t *fp)
21830Sstevel@tonic-gate {
21840Sstevel@tonic-gate 	mblk_t		*meta = sctp->sctp_xmit_head;
21850Sstevel@tonic-gate 	mblk_t		*mp;
21860Sstevel@tonic-gate 	mblk_t		*nmp;
21870Sstevel@tonic-gate 	uint32_t	seglen;
21880Sstevel@tonic-gate 	uint32_t	adv_pap = sctp->sctp_adv_pap;
21890Sstevel@tonic-gate 
21900Sstevel@tonic-gate 	/*
21910Sstevel@tonic-gate 	 * We only check in the first meta since otherwise we can't
21920Sstevel@tonic-gate 	 * advance the cumulative ack point. We just look for chunks
21930Sstevel@tonic-gate 	 * marked for retransmission, else we might prematurely
21940Sstevel@tonic-gate 	 * send an FTSN for a sent, but unacked, chunk.
21950Sstevel@tonic-gate 	 */
21960Sstevel@tonic-gate 	for (mp = meta->b_cont; mp != NULL; mp = mp->b_next) {
21970Sstevel@tonic-gate 		if (!SCTP_CHUNK_ISSENT(mp))
21980Sstevel@tonic-gate 			return;
21990Sstevel@tonic-gate 		if (SCTP_CHUNK_WANT_REXMIT(mp))
22000Sstevel@tonic-gate 			break;
22010Sstevel@tonic-gate 	}
22020Sstevel@tonic-gate 	if (mp == NULL)
22030Sstevel@tonic-gate 		return;
22040Sstevel@tonic-gate 	sctp_check_adv_ack_pt(sctp, meta, mp);
22050Sstevel@tonic-gate 	if (SEQ_GT(sctp->sctp_adv_pap, adv_pap)) {
22060Sstevel@tonic-gate 		sctp_make_ftsns(sctp, meta, mp, &nmp, fp, &seglen);
22070Sstevel@tonic-gate 		if (nmp == NULL) {
22080Sstevel@tonic-gate 			sctp->sctp_adv_pap = adv_pap;
22090Sstevel@tonic-gate 			if (!fp->timer_running)
22100Sstevel@tonic-gate 				SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
22110Sstevel@tonic-gate 			return;
22120Sstevel@tonic-gate 		}
22130Sstevel@tonic-gate 		sctp_set_iplen(sctp, nmp);
22140Sstevel@tonic-gate 		sctp_add_sendq(sctp, nmp);
22150Sstevel@tonic-gate 		if (!fp->timer_running)
22160Sstevel@tonic-gate 			SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
22170Sstevel@tonic-gate 	}
22180Sstevel@tonic-gate }
22190Sstevel@tonic-gate 
2220852Svi117747 /*
2221852Svi117747  * The processing here follows the same logic in sctp_got_sack(), the reason
2222852Svi117747  * we do this separately is because, usually, gap blocks are ordered and
2223852Svi117747  * we can process it in sctp_got_sack(). However if they aren't we would
2224852Svi117747  * need to do some additional non-optimal stuff when we start processing the
2225852Svi117747  * unordered gaps. To that effect sctp_got_sack() does the processing in the
2226852Svi117747  * simple case and this does the same in the more involved case.
2227852Svi117747  */
2228852Svi117747 static uint32_t
2229852Svi117747 sctp_process_uo_gaps(sctp_t *sctp, uint32_t ctsn, sctp_sack_frag_t *ssf,
2230852Svi117747     int num_gaps, mblk_t *umphead, mblk_t *mphead, int *trysend,
2231852Svi117747     boolean_t *fast_recovery, uint32_t fr_xtsn)
2232852Svi117747 {
2233852Svi117747 	uint32_t		xtsn;
2234852Svi117747 	uint32_t		gapstart = 0;
2235852Svi117747 	uint32_t		gapend = 0;
2236852Svi117747 	int			gapcnt;
2237852Svi117747 	uint16_t		chunklen;
2238852Svi117747 	sctp_data_hdr_t		*sdc;
2239852Svi117747 	int			gstart;
2240852Svi117747 	mblk_t			*ump = umphead;
2241852Svi117747 	mblk_t			*mp = mphead;
2242852Svi117747 	sctp_faddr_t		*fp;
2243852Svi117747 	uint32_t		acked = 0;
2244852Svi117747 
2245852Svi117747 	/*
2246852Svi117747 	 * gstart tracks the last (in the order of TSN) gapstart that
2247852Svi117747 	 * we process in this SACK gaps walk.
2248852Svi117747 	 */
2249852Svi117747 	gstart = ctsn;
2250852Svi117747 
2251852Svi117747 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2252852Svi117747 	xtsn = ntohl(sdc->sdh_tsn);
2253852Svi117747 	for (gapcnt = 0; gapcnt < num_gaps; gapcnt++, ssf++) {
2254852Svi117747 		if (gapstart != 0) {
2255852Svi117747 			/*
2256852Svi117747 			 * If we have reached the end of the transmit list or
2257852Svi117747 			 * hit an unsent chunk or encountered an unordered gap
2258852Svi117747 			 * block start from the ctsn again.
2259852Svi117747 			 */
2260852Svi117747 			if (ump == NULL || !SCTP_CHUNK_ISSENT(mp) ||
2261852Svi117747 			    SEQ_LT(ctsn + ntohs(ssf->ssf_start), xtsn)) {
2262852Svi117747 				ump = umphead;
2263852Svi117747 				mp = mphead;
2264852Svi117747 				sdc = (sctp_data_hdr_t *)mp->b_rptr;
2265852Svi117747 				xtsn = ntohl(sdc->sdh_tsn);
2266852Svi117747 			}
2267852Svi117747 		}
2268852Svi117747 
2269852Svi117747 		gapstart = ctsn + ntohs(ssf->ssf_start);
2270852Svi117747 		gapend = ctsn + ntohs(ssf->ssf_end);
2271852Svi117747 
2272852Svi117747 		/* SACK for TSN we have not sent - ABORT */
2273852Svi117747 		if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) ||
2274852Svi117747 		    SEQ_GT(gapend, sctp->sctp_ltsn - 1)) {
2275852Svi117747 			BUMP_MIB(&sctp_mib, sctpInAckUnsent);
2276852Svi117747 			*trysend = -1;
2277852Svi117747 			return (acked);
2278852Svi117747 		} else if (SEQ_LT(gapend, gapstart)) {
2279852Svi117747 			break;
2280852Svi117747 		}
2281852Svi117747 		/*
2282852Svi117747 		 * The xtsn can be the TSN processed for the last gap
2283852Svi117747 		 * (gapend) or it could be the cumulative TSN. We continue
2284852Svi117747 		 * with the last xtsn as long as the gaps are ordered, when
2285852Svi117747 		 * we hit an unordered gap, we re-start from the cumulative
2286852Svi117747 		 * TSN. For the first gap it is always the cumulative TSN.
2287852Svi117747 		 */
2288852Svi117747 		while (xtsn != gapstart) {
2289852Svi117747 			/*
2290852Svi117747 			 * We can't reliably check for reneged chunks
2291852Svi117747 			 * when walking the unordered list, so we don't.
2292852Svi117747 			 * In case the peer reneges then we will end up
2293852Svi117747 			 * sending the reneged chunk via timeout.
2294852Svi117747 			 */
2295852Svi117747 			mp = mp->b_next;
2296852Svi117747 			if (mp == NULL) {
2297852Svi117747 				ump = ump->b_next;
2298852Svi117747 				/*
2299852Svi117747 				 * ump can't be NULL because of the sanity
2300852Svi117747 				 * check above.
2301852Svi117747 				 */
2302852Svi117747 				ASSERT(ump != NULL);
2303852Svi117747 				mp = ump->b_cont;
2304852Svi117747 			}
2305852Svi117747 			/*
2306852Svi117747 			 * mp can't be unsent because of the sanity check
2307852Svi117747 			 * above.
2308852Svi117747 			 */
2309852Svi117747 			ASSERT(SCTP_CHUNK_ISSENT(mp));
2310852Svi117747 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2311852Svi117747 			xtsn = ntohl(sdc->sdh_tsn);
2312852Svi117747 		}
2313852Svi117747 		/*
2314852Svi117747 		 * Now that we have found the chunk with TSN == 'gapstart',
2315852Svi117747 		 * let's walk till we hit the chunk with TSN == 'gapend'.
2316852Svi117747 		 * All intermediate chunks will be marked ACKED, if they
2317852Svi117747 		 * haven't already been.
2318852Svi117747 		 */
2319852Svi117747 		while (SEQ_LEQ(xtsn, gapend)) {
2320852Svi117747 			/*
2321852Svi117747 			 * SACKed
2322852Svi117747 			 */
2323852Svi117747 			SCTP_CHUNK_SET_SACKCNT(mp, 0);
2324852Svi117747 			if (!SCTP_CHUNK_ISACKED(mp)) {
2325852Svi117747 				SCTP_CHUNK_ACKED(mp);
2326852Svi117747 
2327852Svi117747 				fp = SCTP_CHUNK_DEST(mp);
2328852Svi117747 				chunklen = ntohs(sdc->sdh_len);
2329852Svi117747 				ASSERT(fp->suna >= chunklen);
2330852Svi117747 				fp->suna -= chunklen;
2331852Svi117747 				if (fp->suna == 0) {
2332852Svi117747 					/* All outstanding data acked. */
2333852Svi117747 					fp->pba = 0;
2334852Svi117747 					SCTP_FADDR_TIMER_STOP(fp);
2335852Svi117747 				}
2336852Svi117747 				fp->acked += chunklen;
2337852Svi117747 				acked += chunklen;
2338852Svi117747 				sctp->sctp_unacked -= chunklen - sizeof (*sdc);
2339852Svi117747 				ASSERT(sctp->sctp_unacked >= 0);
2340852Svi117747 			}
2341852Svi117747 			/*
2342852Svi117747 			 * Move to the next message in the transmit list
2343852Svi117747 			 * if we are done with all the chunks from the current
2344852Svi117747 			 * message. Note, it is possible to hit the end of the
2345852Svi117747 			 * transmit list here, i.e. if we have already completed
2346852Svi117747 			 * processing the gap block.
2347852Svi117747 			 */
2348852Svi117747 			mp = mp->b_next;
2349852Svi117747 			if (mp == NULL) {
2350852Svi117747 				ump = ump->b_next;
2351852Svi117747 				if (ump == NULL) {
2352852Svi117747 					ASSERT(xtsn == gapend);
2353852Svi117747 					break;
2354852Svi117747 				}
2355852Svi117747 				mp = ump->b_cont;
2356852Svi117747 			}
2357852Svi117747 			/*
2358852Svi117747 			 * Likewise, we can hit an unsent chunk once we have
2359852Svi117747 			 * completed processing the gap block.
2360852Svi117747 			 */
2361852Svi117747 			if (!SCTP_CHUNK_ISSENT(mp)) {
2362852Svi117747 				ASSERT(xtsn == gapend);
2363852Svi117747 				break;
2364852Svi117747 			}
2365852Svi117747 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2366852Svi117747 			xtsn = ntohl(sdc->sdh_tsn);
2367852Svi117747 		}
2368852Svi117747 		/*
2369852Svi117747 		 * We keep track of the last gap we successfully processed
2370852Svi117747 		 * so that we can terminate the walk below for incrementing
2371852Svi117747 		 * the SACK count.
2372852Svi117747 		 */
2373852Svi117747 		if (SEQ_LT(gstart, gapstart))
2374852Svi117747 			gstart = gapstart;
2375852Svi117747 	}
2376852Svi117747 	/*
2377852Svi117747 	 * Check if have incremented the SACK count for all unacked TSNs in
2378852Svi117747 	 * sctp_got_sack(), if so we are done.
2379852Svi117747 	 */
2380852Svi117747 	if (SEQ_LEQ(gstart, fr_xtsn))
2381852Svi117747 		return (acked);
2382852Svi117747 
2383852Svi117747 	ump = umphead;
2384852Svi117747 	mp = mphead;
2385852Svi117747 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2386852Svi117747 	xtsn = ntohl(sdc->sdh_tsn);
2387852Svi117747 	while (SEQ_LT(xtsn, gstart)) {
2388852Svi117747 		/*
2389852Svi117747 		 * We have incremented SACK count for TSNs less than fr_tsn
2390852Svi117747 		 * in sctp_got_sack(), so don't increment them again here.
2391852Svi117747 		 */
2392852Svi117747 		if (SEQ_GT(xtsn, fr_xtsn) && !SCTP_CHUNK_ISACKED(mp)) {
2393852Svi117747 			SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1);
2394852Svi117747 			if (SCTP_CHUNK_SACKCNT(mp) == sctp_fast_rxt_thresh) {
2395852Svi117747 				SCTP_CHUNK_REXMIT(mp);
2396852Svi117747 				sctp->sctp_chk_fast_rexmit = B_TRUE;
2397852Svi117747 				*trysend = 1;
2398852Svi117747 				if (!*fast_recovery) {
2399852Svi117747 					/*
2400852Svi117747 					 * Entering fast recovery.
2401852Svi117747 					 */
2402852Svi117747 					fp = SCTP_CHUNK_DEST(mp);
2403852Svi117747 					fp->ssthresh = fp->cwnd / 2;
2404852Svi117747 					if (fp->ssthresh < 2 * fp->sfa_pmss) {
2405852Svi117747 						fp->ssthresh =
2406852Svi117747 						    2 * fp->sfa_pmss;
2407852Svi117747 					}
2408852Svi117747 					fp->cwnd = fp->ssthresh;
2409852Svi117747 					fp->pba = 0;
2410852Svi117747 					sctp->sctp_recovery_tsn =
2411852Svi117747 					    sctp->sctp_ltsn - 1;
2412852Svi117747 					*fast_recovery = B_TRUE;
2413852Svi117747 				}
2414852Svi117747 			}
2415852Svi117747 		}
2416852Svi117747 		mp = mp->b_next;
2417852Svi117747 		if (mp == NULL) {
2418852Svi117747 			ump = ump->b_next;
2419852Svi117747 			/* We can't get to the end of the transmit list here */
2420852Svi117747 			ASSERT(ump != NULL);
2421852Svi117747 			mp = ump->b_cont;
2422852Svi117747 		}
2423852Svi117747 		/* We can't hit an unsent chunk here */
2424852Svi117747 		ASSERT(SCTP_CHUNK_ISSENT(mp));
2425852Svi117747 		sdc = (sctp_data_hdr_t *)mp->b_rptr;
2426852Svi117747 		xtsn = ntohl(sdc->sdh_tsn);
2427852Svi117747 	}
2428852Svi117747 	return (acked);
2429852Svi117747 }
2430852Svi117747 
24310Sstevel@tonic-gate static int
24320Sstevel@tonic-gate sctp_got_sack(sctp_t *sctp, sctp_chunk_hdr_t *sch)
24330Sstevel@tonic-gate {
24340Sstevel@tonic-gate 	sctp_sack_chunk_t	*sc;
24350Sstevel@tonic-gate 	sctp_data_hdr_t		*sdc;
24360Sstevel@tonic-gate 	sctp_sack_frag_t	*ssf;
24370Sstevel@tonic-gate 	mblk_t			*ump;
24380Sstevel@tonic-gate 	mblk_t			*mp;
2439852Svi117747 	mblk_t			*mp1;
2440852Svi117747 	uint32_t		cumtsn;
24410Sstevel@tonic-gate 	uint32_t		xtsn;
2442852Svi117747 	uint32_t		gapstart = 0;
2443852Svi117747 	uint32_t		gapend = 0;
24440Sstevel@tonic-gate 	uint32_t		acked = 0;
24450Sstevel@tonic-gate 	uint16_t		chunklen;
24460Sstevel@tonic-gate 	sctp_faddr_t		*fp;
24470Sstevel@tonic-gate 	int			num_gaps;
24480Sstevel@tonic-gate 	int			trysend = 0;
24490Sstevel@tonic-gate 	int			i;
24500Sstevel@tonic-gate 	boolean_t		fast_recovery = B_FALSE;
24510Sstevel@tonic-gate 	boolean_t		cumack_forward = B_FALSE;
24520Sstevel@tonic-gate 	boolean_t		fwd_tsn = B_FALSE;
24530Sstevel@tonic-gate 
24540Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_ibchunks);
24550Sstevel@tonic-gate 	chunklen = ntohs(sch->sch_len);
24560Sstevel@tonic-gate 	if (chunklen < (sizeof (*sch) + sizeof (*sc)))
24570Sstevel@tonic-gate 		return (0);
24580Sstevel@tonic-gate 
24590Sstevel@tonic-gate 	sc = (sctp_sack_chunk_t *)(sch + 1);
2460852Svi117747 	cumtsn = ntohl(sc->ssc_cumtsn);
2461852Svi117747 
2462852Svi117747 	dprint(2, ("got sack cumtsn %x -> %x\n", sctp->sctp_lastack_rxd,
2463852Svi117747 	    cumtsn));
24640Sstevel@tonic-gate 
24650Sstevel@tonic-gate 	/* out of order */
2466852Svi117747 	if (SEQ_LT(cumtsn, sctp->sctp_lastack_rxd))
24670Sstevel@tonic-gate 		return (0);
24680Sstevel@tonic-gate 
2469852Svi117747 	if (SEQ_GT(cumtsn, sctp->sctp_ltsn - 1)) {
24700Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpInAckUnsent);
2471852Svi117747 		/* Send an ABORT */
2472852Svi117747 		return (-1);
24730Sstevel@tonic-gate 	}
24740Sstevel@tonic-gate 
24750Sstevel@tonic-gate 	/*
24760Sstevel@tonic-gate 	 * Cwnd only done when not in fast recovery mode.
24770Sstevel@tonic-gate 	 */
24780Sstevel@tonic-gate 	if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn))
24790Sstevel@tonic-gate 		fast_recovery = B_TRUE;
24800Sstevel@tonic-gate 
24810Sstevel@tonic-gate 	/*
24820Sstevel@tonic-gate 	 * .. and if the cum TSN is not moving ahead on account Forward TSN
24830Sstevel@tonic-gate 	 */
24840Sstevel@tonic-gate 	if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_adv_pap))
24850Sstevel@tonic-gate 		fwd_tsn = B_TRUE;
24860Sstevel@tonic-gate 
2487852Svi117747 	if (cumtsn == sctp->sctp_lastack_rxd &&
24880Sstevel@tonic-gate 	    (sctp->sctp_xmit_unacked == NULL ||
24890Sstevel@tonic-gate 	    !SCTP_CHUNK_ABANDONED(sctp->sctp_xmit_unacked))) {
24900Sstevel@tonic-gate 		if (sctp->sctp_xmit_unacked != NULL)
24910Sstevel@tonic-gate 			mp = sctp->sctp_xmit_unacked;
24920Sstevel@tonic-gate 		else if (sctp->sctp_xmit_head != NULL)
24930Sstevel@tonic-gate 			mp = sctp->sctp_xmit_head->b_cont;
24940Sstevel@tonic-gate 		else
24950Sstevel@tonic-gate 			mp = NULL;
24960Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpInDupAck);
2497*1932Svi117747 		/*
2498*1932Svi117747 		 * If we were doing a zero win probe and the win
2499*1932Svi117747 		 * has now opened to at least MSS, re-transmit the
2500*1932Svi117747 		 * zero win probe via sctp_rexmit_packet().
2501*1932Svi117747 		 */
2502*1932Svi117747 		if (mp != NULL && sctp->sctp_zero_win_probe &&
2503*1932Svi117747 		    ntohl(sc->ssc_a_rwnd) >= sctp->sctp_current->sfa_pmss) {
2504*1932Svi117747 			mblk_t	*pkt;
2505*1932Svi117747 			uint_t	pkt_len;
2506*1932Svi117747 			mblk_t	*mp1 = mp;
2507*1932Svi117747 			mblk_t	*meta = sctp->sctp_xmit_head;
2508*1932Svi117747 
2509*1932Svi117747 			/*
2510*1932Svi117747 			 * Reset the RTO since we have been backing-off
2511*1932Svi117747 			 * to send the ZWP.
2512*1932Svi117747 			 */
2513*1932Svi117747 			fp = sctp->sctp_current;
2514*1932Svi117747 			fp->rto = fp->srtt + 4 * fp->rttvar;
2515*1932Svi117747 			/* Resend the ZWP */
2516*1932Svi117747 			pkt = sctp_rexmit_packet(sctp, &meta, &mp1, fp,
2517*1932Svi117747 			    &pkt_len);
2518*1932Svi117747 			if (pkt == NULL) {
2519*1932Svi117747 				SCTP_KSTAT(sctp_ss_rexmit_failed);
2520*1932Svi117747 				return (0);
2521*1932Svi117747 			}
2522*1932Svi117747 			ASSERT(pkt_len <= fp->sfa_pmss);
2523*1932Svi117747 			sctp->sctp_zero_win_probe = B_FALSE;
2524*1932Svi117747 			sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
2525*1932Svi117747 			sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
2526*1932Svi117747 			sctp_set_iplen(sctp, pkt);
2527*1932Svi117747 			sctp_add_sendq(sctp, pkt);
2528*1932Svi117747 		}
25290Sstevel@tonic-gate 	} else {
2530*1932Svi117747 		if (sctp->sctp_zero_win_probe) {
2531*1932Svi117747 			/*
2532*1932Svi117747 			 * Reset the RTO since we have been backing-off
2533*1932Svi117747 			 * to send the ZWP.
2534*1932Svi117747 			 */
2535*1932Svi117747 			fp = sctp->sctp_current;
2536*1932Svi117747 			fp->rto = fp->srtt + 4 * fp->rttvar;
2537*1932Svi117747 			sctp->sctp_zero_win_probe = B_FALSE;
2538*1932Svi117747 			/* This is probably not required */
2539*1932Svi117747 			if (!sctp->sctp_rexmitting) {
2540*1932Svi117747 				sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
2541*1932Svi117747 				sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
2542*1932Svi117747 			}
2543*1932Svi117747 		}
2544852Svi117747 		acked = sctp_cumack(sctp, cumtsn, &mp);
25450Sstevel@tonic-gate 		sctp->sctp_xmit_unacked = mp;
25460Sstevel@tonic-gate 		if (acked > 0) {
25470Sstevel@tonic-gate 			trysend = 1;
25480Sstevel@tonic-gate 			cumack_forward = B_TRUE;
25490Sstevel@tonic-gate 			if (fwd_tsn && SEQ_GEQ(sctp->sctp_lastack_rxd,
25500Sstevel@tonic-gate 			    sctp->sctp_adv_pap)) {
25510Sstevel@tonic-gate 				cumack_forward = B_FALSE;
25520Sstevel@tonic-gate 			}
25530Sstevel@tonic-gate 		}
25540Sstevel@tonic-gate 	}
25550Sstevel@tonic-gate 	num_gaps = ntohs(sc->ssc_numfrags);
25560Sstevel@tonic-gate 	if (num_gaps == 0 || mp == NULL || !SCTP_CHUNK_ISSENT(mp) ||
25570Sstevel@tonic-gate 	    chunklen < (sizeof (*sch) + sizeof (*sc) +
25580Sstevel@tonic-gate 	    num_gaps * sizeof (*ssf))) {
25590Sstevel@tonic-gate 		goto ret;
25600Sstevel@tonic-gate 	}
2561852Svi117747 #ifdef	DEBUG
2562852Svi117747 	/*
2563852Svi117747 	 * Since we delete any message that has been acked completely,
2564852Svi117747 	 * the unacked chunk must belong to sctp_xmit_head (as
2565852Svi117747 	 * we don't have a back pointer from the mp to the meta data
2566852Svi117747 	 * we do this).
2567852Svi117747 	 */
2568852Svi117747 	{
2569852Svi117747 		mblk_t	*mp2 = sctp->sctp_xmit_head->b_cont;
2570852Svi117747 
2571852Svi117747 		while (mp2 != NULL) {
2572852Svi117747 			if (mp2 == mp)
2573852Svi117747 				break;
2574852Svi117747 			mp2 = mp2->b_next;
2575852Svi117747 		}
2576852Svi117747 		ASSERT(mp2 != NULL);
2577852Svi117747 	}
2578852Svi117747 #endif
25790Sstevel@tonic-gate 	ump = sctp->sctp_xmit_head;
25800Sstevel@tonic-gate 
25810Sstevel@tonic-gate 	/*
2582852Svi117747 	 * Just remember where we started from, in case we need to call
2583852Svi117747 	 * sctp_process_uo_gaps() if the gap blocks are unordered.
2584852Svi117747 	 */
2585852Svi117747 	mp1 = mp;
2586852Svi117747 
2587852Svi117747 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2588852Svi117747 	xtsn = ntohl(sdc->sdh_tsn);
2589852Svi117747 	ASSERT(xtsn == cumtsn + 1);
2590852Svi117747 
2591852Svi117747 	/*
25920Sstevel@tonic-gate 	 * Go through SACK gaps. They are ordered based on start TSN.
25930Sstevel@tonic-gate 	 */
25940Sstevel@tonic-gate 	ssf = (sctp_sack_frag_t *)(sc + 1);
2595852Svi117747 	for (i = 0; i < num_gaps; i++, ssf++) {
2596852Svi117747 		if (gapstart != 0) {
2597852Svi117747 			/* check for unordered gap */
2598852Svi117747 			if (SEQ_LEQ(cumtsn + ntohs(ssf->ssf_start), gapstart)) {
2599852Svi117747 				acked += sctp_process_uo_gaps(sctp,
2600852Svi117747 				    cumtsn, ssf, num_gaps - i,
2601852Svi117747 				    sctp->sctp_xmit_head, mp1,
2602852Svi117747 				    &trysend, &fast_recovery, gapstart);
2603852Svi117747 				if (trysend < 0) {
2604852Svi117747 					BUMP_MIB(&sctp_mib, sctpInAckUnsent);
2605852Svi117747 					return (-1);
2606852Svi117747 				}
2607852Svi117747 				break;
2608852Svi117747 			}
2609852Svi117747 		}
2610852Svi117747 		gapstart = cumtsn + ntohs(ssf->ssf_start);
2611852Svi117747 		gapend = cumtsn + ntohs(ssf->ssf_end);
2612852Svi117747 
2613852Svi117747 		/* SACK for TSN we have not sent - ABORT */
2614852Svi117747 		if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) ||
2615852Svi117747 		    SEQ_GT(gapend, sctp->sctp_ltsn - 1)) {
2616852Svi117747 			BUMP_MIB(&sctp_mib, sctpInAckUnsent);
2617852Svi117747 			return (-1);
2618852Svi117747 		} else if (SEQ_LT(gapend, gapstart)) {
2619852Svi117747 			break;
2620852Svi117747 		}
2621852Svi117747 		/*
2622852Svi117747 		 * Let's start at the current TSN (for the 1st gap we start
2623852Svi117747 		 * from the cumulative TSN, for subsequent ones we start from
2624852Svi117747 		 * where the previous gapend was found - second while loop
2625852Svi117747 		 * below) and walk the transmit list till we find the TSN
2626852Svi117747 		 * corresponding to gapstart. All the unacked chunks till we
2627852Svi117747 		 * get to the chunk with TSN == gapstart will have their
2628852Svi117747 		 * SACKCNT incremented by 1. Note since the gap blocks are
2629852Svi117747 		 * ordered, we won't be incrementing the SACKCNT for an
2630852Svi117747 		 * unacked chunk by more than one while processing the gap
2631852Svi117747 		 * blocks. If the SACKCNT for any unacked chunk exceeds
2632852Svi117747 		 * the fast retransmit threshold, we will fast retransmit
2633852Svi117747 		 * after processing all the gap blocks.
2634852Svi117747 		 */
2635852Svi117747 		ASSERT(SEQ_LT(xtsn, gapstart));
26360Sstevel@tonic-gate 		while (xtsn != gapstart) {
26370Sstevel@tonic-gate 			SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1);
26380Sstevel@tonic-gate 			if (SCTP_CHUNK_SACKCNT(mp) == sctp_fast_rxt_thresh) {
26390Sstevel@tonic-gate 				SCTP_CHUNK_REXMIT(mp);
26400Sstevel@tonic-gate 				sctp->sctp_chk_fast_rexmit = B_TRUE;
26410Sstevel@tonic-gate 				trysend = 1;
26420Sstevel@tonic-gate 				if (!fast_recovery) {
26430Sstevel@tonic-gate 					/*
26440Sstevel@tonic-gate 					 * Entering fast recovery.
26450Sstevel@tonic-gate 					 */
26460Sstevel@tonic-gate 					fp = SCTP_CHUNK_DEST(mp);
26470Sstevel@tonic-gate 					fp->ssthresh = fp->cwnd / 2;
26480Sstevel@tonic-gate 					if (fp->ssthresh < 2 * fp->sfa_pmss) {
26490Sstevel@tonic-gate 						fp->ssthresh =
26500Sstevel@tonic-gate 						    2 * fp->sfa_pmss;
26510Sstevel@tonic-gate 					}
26520Sstevel@tonic-gate 					fp->cwnd = fp->ssthresh;
26530Sstevel@tonic-gate 					fp->pba = 0;
26540Sstevel@tonic-gate 					sctp->sctp_recovery_tsn =
26550Sstevel@tonic-gate 					    sctp->sctp_ltsn - 1;
26560Sstevel@tonic-gate 					fast_recovery = B_TRUE;
26570Sstevel@tonic-gate 				}
26580Sstevel@tonic-gate 			}
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate 			/*
26610Sstevel@tonic-gate 			 * Peer may have reneged on this chunk, so un-sack
26620Sstevel@tonic-gate 			 * it now. If the peer did renege, we need to
26630Sstevel@tonic-gate 			 * readjust unacked.
26640Sstevel@tonic-gate 			 */
26650Sstevel@tonic-gate 			if (SCTP_CHUNK_ISACKED(mp)) {
26660Sstevel@tonic-gate 				chunklen = ntohs(sdc->sdh_len);
26670Sstevel@tonic-gate 				fp = SCTP_CHUNK_DEST(mp);
26680Sstevel@tonic-gate 				fp->suna += chunklen;
26690Sstevel@tonic-gate 				sctp->sctp_unacked += chunklen - sizeof (*sdc);
26700Sstevel@tonic-gate 				SCTP_CHUNK_CLEAR_ACKED(mp);
26710Sstevel@tonic-gate 				if (!fp->timer_running) {
26720Sstevel@tonic-gate 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
26730Sstevel@tonic-gate 					    fp->rto);
26740Sstevel@tonic-gate 				}
26750Sstevel@tonic-gate 			}
26760Sstevel@tonic-gate 
26770Sstevel@tonic-gate 			mp = mp->b_next;
26780Sstevel@tonic-gate 			if (mp == NULL) {
26790Sstevel@tonic-gate 				ump = ump->b_next;
2680852Svi117747 				/*
2681852Svi117747 				 * ump can't be NULL given the sanity check
2682852Svi117747 				 * above.
2683852Svi117747 				 */
2684852Svi117747 				ASSERT(ump != NULL);
26850Sstevel@tonic-gate 				mp = ump->b_cont;
26860Sstevel@tonic-gate 			}
2687852Svi117747 			/*
2688852Svi117747 			 * mp can't be unsent given the sanity check above.
2689852Svi117747 			 */
2690852Svi117747 			ASSERT(SCTP_CHUNK_ISSENT(mp));
26910Sstevel@tonic-gate 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
26920Sstevel@tonic-gate 			xtsn = ntohl(sdc->sdh_tsn);
26930Sstevel@tonic-gate 		}
2694852Svi117747 		/*
2695852Svi117747 		 * Now that we have found the chunk with TSN == 'gapstart',
2696852Svi117747 		 * let's walk till we hit the chunk with TSN == 'gapend'.
2697852Svi117747 		 * All intermediate chunks will be marked ACKED, if they
2698852Svi117747 		 * haven't already been.
2699852Svi117747 		 */
27000Sstevel@tonic-gate 		while (SEQ_LEQ(xtsn, gapend)) {
27010Sstevel@tonic-gate 			/*
27020Sstevel@tonic-gate 			 * SACKed
27030Sstevel@tonic-gate 			 */
27040Sstevel@tonic-gate 			SCTP_CHUNK_SET_SACKCNT(mp, 0);
27050Sstevel@tonic-gate 			if (!SCTP_CHUNK_ISACKED(mp)) {
27060Sstevel@tonic-gate 				SCTP_CHUNK_ACKED(mp);
27070Sstevel@tonic-gate 
27080Sstevel@tonic-gate 				fp = SCTP_CHUNK_DEST(mp);
27090Sstevel@tonic-gate 				chunklen = ntohs(sdc->sdh_len);
27100Sstevel@tonic-gate 				ASSERT(fp->suna >= chunklen);
27110Sstevel@tonic-gate 				fp->suna -= chunklen;
27120Sstevel@tonic-gate 				if (fp->suna == 0) {
27130Sstevel@tonic-gate 					/* All outstanding data acked. */
27140Sstevel@tonic-gate 					fp->pba = 0;
27150Sstevel@tonic-gate 					SCTP_FADDR_TIMER_STOP(fp);
27160Sstevel@tonic-gate 				}
27170Sstevel@tonic-gate 				fp->acked += chunklen;
27180Sstevel@tonic-gate 				acked += chunklen;
27190Sstevel@tonic-gate 				sctp->sctp_unacked -= chunklen - sizeof (*sdc);
27200Sstevel@tonic-gate 				ASSERT(sctp->sctp_unacked >= 0);
27210Sstevel@tonic-gate 			}
2722852Svi117747 			/* Go to the next chunk of the current message */
27230Sstevel@tonic-gate 			mp = mp->b_next;
2724852Svi117747 			/*
2725852Svi117747 			 * Move to the next message in the transmit list
2726852Svi117747 			 * if we are done with all the chunks from the current
2727852Svi117747 			 * message. Note, it is possible to hit the end of the
2728852Svi117747 			 * transmit list here, i.e. if we have already completed
2729852Svi117747 			 * processing the gap block.
2730852Svi117747 			 * Also, note that we break here, which means we
2731852Svi117747 			 * continue processing gap blocks, if any. In case of
2732852Svi117747 			 * ordered gap blocks there can't be any following
2733852Svi117747 			 * this (if there is it will fail the sanity check
2734852Svi117747 			 * above). In case of un-ordered gap blocks we will
2735852Svi117747 			 * switch to sctp_process_uo_gaps().  In either case
2736852Svi117747 			 * it should be fine to continue with NULL ump/mp,
2737852Svi117747 			 * but we just reset it to xmit_head.
2738852Svi117747 			 */
27390Sstevel@tonic-gate 			if (mp == NULL) {
27400Sstevel@tonic-gate 				ump = ump->b_next;
27410Sstevel@tonic-gate 				if (ump == NULL) {
2742852Svi117747 					ASSERT(xtsn == gapend);
2743852Svi117747 					ump = sctp->sctp_xmit_head;
2744852Svi117747 					mp = mp1;
2745852Svi117747 					sdc = (sctp_data_hdr_t *)mp->b_rptr;
2746852Svi117747 					xtsn = ntohl(sdc->sdh_tsn);
2747852Svi117747 					break;
27480Sstevel@tonic-gate 				}
27490Sstevel@tonic-gate 				mp = ump->b_cont;
27500Sstevel@tonic-gate 			}
2751852Svi117747 			/*
2752852Svi117747 			 * Likewise, we could hit an unsent chunk once we have
2753852Svi117747 			 * completed processing the gap block. Again, it is
2754852Svi117747 			 * fine to continue processing gap blocks with mp
2755852Svi117747 			 * pointing to the unsent chunk, because if there
2756852Svi117747 			 * are more ordered gap blocks, they will fail the
2757852Svi117747 			 * sanity check, and if there are un-ordered gap blocks,
2758852Svi117747 			 * we will continue processing in sctp_process_uo_gaps()
2759852Svi117747 			 * We just reset the mp to the one we started with.
2760852Svi117747 			 */
27610Sstevel@tonic-gate 			if (!SCTP_CHUNK_ISSENT(mp)) {
2762852Svi117747 				ASSERT(xtsn == gapend);
2763852Svi117747 				ump = sctp->sctp_xmit_head;
2764852Svi117747 				mp = mp1;
2765852Svi117747 				sdc = (sctp_data_hdr_t *)mp->b_rptr;
2766852Svi117747 				xtsn = ntohl(sdc->sdh_tsn);
2767852Svi117747 				break;
27680Sstevel@tonic-gate 			}
27690Sstevel@tonic-gate 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
27700Sstevel@tonic-gate 			xtsn = ntohl(sdc->sdh_tsn);
27710Sstevel@tonic-gate 		}
27720Sstevel@tonic-gate 	}
27730Sstevel@tonic-gate 	if (sctp->sctp_prsctp_aware)
27740Sstevel@tonic-gate 		sctp_check_abandoned_data(sctp, sctp->sctp_current);
27750Sstevel@tonic-gate 	if (sctp->sctp_chk_fast_rexmit)
27760Sstevel@tonic-gate 		sctp_fast_rexmit(sctp);
27770Sstevel@tonic-gate ret:
27780Sstevel@tonic-gate 	trysend += sctp_set_frwnd(sctp, ntohl(sc->ssc_a_rwnd));
27790Sstevel@tonic-gate 
27800Sstevel@tonic-gate 	/*
27810Sstevel@tonic-gate 	 * If receive window is closed while there is unsent data,
27820Sstevel@tonic-gate 	 * set a timer for doing zero window probes.
27830Sstevel@tonic-gate 	 */
27840Sstevel@tonic-gate 	if (sctp->sctp_frwnd == 0 && sctp->sctp_unacked == 0 &&
27850Sstevel@tonic-gate 	    sctp->sctp_unsent != 0) {
27860Sstevel@tonic-gate 		SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current,
27870Sstevel@tonic-gate 		    sctp->sctp_current->rto);
27880Sstevel@tonic-gate 	}
27890Sstevel@tonic-gate 
27900Sstevel@tonic-gate 	/*
27910Sstevel@tonic-gate 	 * Set cwnd for all destinations.
27920Sstevel@tonic-gate 	 * Congestion window gets increased only when cumulative
27930Sstevel@tonic-gate 	 * TSN moves forward, we're not in fast recovery, and
27940Sstevel@tonic-gate 	 * cwnd has been fully utilized (almost fully, need to allow
27950Sstevel@tonic-gate 	 * some leeway due to non-MSS sized messages).
27960Sstevel@tonic-gate 	 */
27970Sstevel@tonic-gate 	if (sctp->sctp_current->acked == acked) {
27980Sstevel@tonic-gate 		/*
27990Sstevel@tonic-gate 		 * Fast-path, only data sent to sctp_current got acked.
28000Sstevel@tonic-gate 		 */
28010Sstevel@tonic-gate 		fp = sctp->sctp_current;
28020Sstevel@tonic-gate 		if (cumack_forward && !fast_recovery &&
28030Sstevel@tonic-gate 		    (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) {
28040Sstevel@tonic-gate 			if (fp->cwnd < fp->ssthresh) {
28050Sstevel@tonic-gate 				/*
28060Sstevel@tonic-gate 				 * Slow start
28070Sstevel@tonic-gate 				 */
28080Sstevel@tonic-gate 				if (fp->acked > fp->sfa_pmss) {
28090Sstevel@tonic-gate 					fp->cwnd += fp->sfa_pmss;
28100Sstevel@tonic-gate 				} else {
28110Sstevel@tonic-gate 					fp->cwnd += fp->acked;
28120Sstevel@tonic-gate 				}
28130Sstevel@tonic-gate 				fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max);
28140Sstevel@tonic-gate 			} else {
28150Sstevel@tonic-gate 				/*
28160Sstevel@tonic-gate 				 * Congestion avoidance
28170Sstevel@tonic-gate 				 */
28180Sstevel@tonic-gate 				fp->pba += fp->acked;
28190Sstevel@tonic-gate 				if (fp->pba >= fp->cwnd) {
28200Sstevel@tonic-gate 					fp->pba -= fp->cwnd;
28210Sstevel@tonic-gate 					fp->cwnd += fp->sfa_pmss;
28220Sstevel@tonic-gate 					fp->cwnd = MIN(fp->cwnd,
28230Sstevel@tonic-gate 					    sctp->sctp_cwnd_max);
28240Sstevel@tonic-gate 				}
28250Sstevel@tonic-gate 			}
28260Sstevel@tonic-gate 		}
28270Sstevel@tonic-gate 		/*
28280Sstevel@tonic-gate 		 * Limit the burst of transmitted data segments.
28290Sstevel@tonic-gate 		 */
28300Sstevel@tonic-gate 		if (fp->suna + sctp_maxburst * fp->sfa_pmss < fp->cwnd) {
28310Sstevel@tonic-gate 			fp->cwnd = fp->suna + sctp_maxburst * fp->sfa_pmss;
28320Sstevel@tonic-gate 		}
28330Sstevel@tonic-gate 		fp->acked = 0;
28341735Skcpoon 		goto check_ss_rxmit;
28350Sstevel@tonic-gate 	}
2836*1932Svi117747 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
28370Sstevel@tonic-gate 		if (cumack_forward && fp->acked && !fast_recovery &&
28380Sstevel@tonic-gate 		    (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) {
28390Sstevel@tonic-gate 			if (fp->cwnd < fp->ssthresh) {
28400Sstevel@tonic-gate 				if (fp->acked > fp->sfa_pmss) {
28410Sstevel@tonic-gate 					fp->cwnd += fp->sfa_pmss;
28420Sstevel@tonic-gate 				} else {
28430Sstevel@tonic-gate 					fp->cwnd += fp->acked;
28440Sstevel@tonic-gate 				}
28450Sstevel@tonic-gate 				fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max);
28460Sstevel@tonic-gate 			} else {
28470Sstevel@tonic-gate 				fp->pba += fp->acked;
28480Sstevel@tonic-gate 				if (fp->pba >= fp->cwnd) {
28490Sstevel@tonic-gate 					fp->pba -= fp->cwnd;
28500Sstevel@tonic-gate 					fp->cwnd += fp->sfa_pmss;
28510Sstevel@tonic-gate 					fp->cwnd = MIN(fp->cwnd,
28520Sstevel@tonic-gate 					    sctp->sctp_cwnd_max);
28530Sstevel@tonic-gate 				}
28540Sstevel@tonic-gate 			}
28550Sstevel@tonic-gate 		}
28560Sstevel@tonic-gate 		if (fp->suna + sctp_maxburst * fp->sfa_pmss < fp->cwnd) {
28570Sstevel@tonic-gate 			fp->cwnd = fp->suna + sctp_maxburst * fp->sfa_pmss;
28580Sstevel@tonic-gate 		}
28590Sstevel@tonic-gate 		fp->acked = 0;
28600Sstevel@tonic-gate 	}
28611735Skcpoon check_ss_rxmit:
28621735Skcpoon 	/*
28631735Skcpoon 	 * If this is a SACK following a timeout, check if there are
28641735Skcpoon 	 * still unacked chunks (sent before the timeout) that we can
28651735Skcpoon 	 * send.
28661735Skcpoon 	 */
28671735Skcpoon 	if (sctp->sctp_rexmitting) {
28681735Skcpoon 		if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_rxt_maxtsn)) {
28691735Skcpoon 			/*
28701735Skcpoon 			 * As we are in retransmission phase, we may get a
28711735Skcpoon 			 * SACK which indicates some new chunks are received
28721735Skcpoon 			 * but cum_tsn does not advance.  During this
28731735Skcpoon 			 * phase, the other side advances cum_tsn only because
28741735Skcpoon 			 * it receives our retransmitted chunks.  Only
28751735Skcpoon 			 * this signals that some chunks are still
28761735Skcpoon 			 * missing.
28771735Skcpoon 			 */
28781735Skcpoon 			if (cumack_forward)
28791735Skcpoon 				sctp_ss_rexmit(sctp);
28801735Skcpoon 		} else {
28811735Skcpoon 			sctp->sctp_rexmitting = B_FALSE;
28821735Skcpoon 			sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
28831735Skcpoon 			sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
28841735Skcpoon 		}
28851735Skcpoon 	}
28860Sstevel@tonic-gate 	return (trysend);
28870Sstevel@tonic-gate }
28880Sstevel@tonic-gate 
28890Sstevel@tonic-gate /*
28900Sstevel@tonic-gate  * Returns 0 if the caller should stop processing any more chunks,
28910Sstevel@tonic-gate  * 1 if the caller should skip this chunk and continue processing.
28920Sstevel@tonic-gate  */
28930Sstevel@tonic-gate static int
28940Sstevel@tonic-gate sctp_strange_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp)
28950Sstevel@tonic-gate {
28960Sstevel@tonic-gate 	mblk_t *errmp;
28970Sstevel@tonic-gate 	size_t len;
28980Sstevel@tonic-gate 
28990Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_ibchunks);
29000Sstevel@tonic-gate 	/* check top two bits for action required */
29010Sstevel@tonic-gate 	if (ch->sch_id & 0x40) {	/* also matches 0xc0 */
29020Sstevel@tonic-gate 		len = ntohs(ch->sch_len);
29030Sstevel@tonic-gate 		errmp = sctp_make_err(sctp, SCTP_ERR_UNREC_CHUNK, ch, len);
29040Sstevel@tonic-gate 		if (errmp != NULL)
29050Sstevel@tonic-gate 			sctp_send_err(sctp, errmp, fp);
29060Sstevel@tonic-gate 		if ((ch->sch_id & 0xc0) == 0xc0) {
29070Sstevel@tonic-gate 			/* skip and continue */
29080Sstevel@tonic-gate 			return (1);
29090Sstevel@tonic-gate 		} else {
29100Sstevel@tonic-gate 			/* stop processing */
29110Sstevel@tonic-gate 			return (0);
29120Sstevel@tonic-gate 		}
29130Sstevel@tonic-gate 	}
29140Sstevel@tonic-gate 	if (ch->sch_id & 0x80) {
29150Sstevel@tonic-gate 		/* skip and continue, no error */
29160Sstevel@tonic-gate 		return (1);
29170Sstevel@tonic-gate 	}
29180Sstevel@tonic-gate 	/* top two bits are clear; stop processing and no error */
29190Sstevel@tonic-gate 	return (0);
29200Sstevel@tonic-gate }
29210Sstevel@tonic-gate 
29220Sstevel@tonic-gate /*
29230Sstevel@tonic-gate  * Basic sanity checks on all input chunks and parameters: they must
29240Sstevel@tonic-gate  * be of legitimate size for their purported type, and must follow
29250Sstevel@tonic-gate  * ordering conventions as defined in rfc2960.
29260Sstevel@tonic-gate  *
29270Sstevel@tonic-gate  * Returns 1 if the chunk and all encloded params are legitimate,
29280Sstevel@tonic-gate  * 0 otherwise.
29290Sstevel@tonic-gate  */
29300Sstevel@tonic-gate /*ARGSUSED*/
29310Sstevel@tonic-gate static int
29320Sstevel@tonic-gate sctp_check_input(sctp_t *sctp, sctp_chunk_hdr_t *ch, ssize_t len, int first)
29330Sstevel@tonic-gate {
29340Sstevel@tonic-gate 	sctp_parm_hdr_t	*ph;
29350Sstevel@tonic-gate 	void		*p = NULL;
29360Sstevel@tonic-gate 	ssize_t		clen;
29370Sstevel@tonic-gate 	uint16_t	ch_len;
29380Sstevel@tonic-gate 
29390Sstevel@tonic-gate 	ch_len = ntohs(ch->sch_len);
29400Sstevel@tonic-gate 	if (ch_len > len) {
29410Sstevel@tonic-gate 		return (0);
29420Sstevel@tonic-gate 	}
29430Sstevel@tonic-gate 
29440Sstevel@tonic-gate 	switch (ch->sch_id) {
29450Sstevel@tonic-gate 	case CHUNK_DATA:
29460Sstevel@tonic-gate 		if (ch_len < sizeof (sctp_data_hdr_t)) {
29470Sstevel@tonic-gate 			return (0);
29480Sstevel@tonic-gate 		}
29490Sstevel@tonic-gate 		return (1);
29500Sstevel@tonic-gate 	case CHUNK_INIT:
29510Sstevel@tonic-gate 	case CHUNK_INIT_ACK:
29520Sstevel@tonic-gate 		{
29530Sstevel@tonic-gate 			ssize_t	remlen = len;
29540Sstevel@tonic-gate 
29550Sstevel@tonic-gate 			/*
29560Sstevel@tonic-gate 			 * INIT and INIT-ACK chunks must not be bundled with
29570Sstevel@tonic-gate 			 * any other.
29580Sstevel@tonic-gate 			 */
29590Sstevel@tonic-gate 			if (!first || sctp_next_chunk(ch, &remlen) != NULL ||
29600Sstevel@tonic-gate 			    (ch_len < (sizeof (*ch) +
29610Sstevel@tonic-gate 			    sizeof (sctp_init_chunk_t)))) {
29620Sstevel@tonic-gate 				return (0);
29630Sstevel@tonic-gate 			}
29640Sstevel@tonic-gate 			/* may have params that need checking */
29650Sstevel@tonic-gate 			p = (char *)(ch + 1) + sizeof (sctp_init_chunk_t);
29660Sstevel@tonic-gate 			clen = ch_len - (sizeof (*ch) +
29670Sstevel@tonic-gate 			    sizeof (sctp_init_chunk_t));
29680Sstevel@tonic-gate 		}
29690Sstevel@tonic-gate 		break;
29700Sstevel@tonic-gate 	case CHUNK_SACK:
29710Sstevel@tonic-gate 		if (ch_len < (sizeof (*ch) + sizeof (sctp_sack_chunk_t))) {
29720Sstevel@tonic-gate 			return (0);
29730Sstevel@tonic-gate 		}
29740Sstevel@tonic-gate 		/* dup and gap reports checked by got_sack() */
29750Sstevel@tonic-gate 		return (1);
29760Sstevel@tonic-gate 	case CHUNK_SHUTDOWN:
29770Sstevel@tonic-gate 		if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) {
29780Sstevel@tonic-gate 			return (0);
29790Sstevel@tonic-gate 		}
29800Sstevel@tonic-gate 		return (1);
29810Sstevel@tonic-gate 	case CHUNK_ABORT:
29820Sstevel@tonic-gate 	case CHUNK_ERROR:
29830Sstevel@tonic-gate 		if (ch_len < sizeof (*ch)) {
29840Sstevel@tonic-gate 			return (0);
29850Sstevel@tonic-gate 		}
29860Sstevel@tonic-gate 		/* may have params that need checking */
29870Sstevel@tonic-gate 		p = ch + 1;
29880Sstevel@tonic-gate 		clen = ch_len - sizeof (*ch);
29890Sstevel@tonic-gate 		break;
29900Sstevel@tonic-gate 	case CHUNK_ECNE:
29910Sstevel@tonic-gate 	case CHUNK_CWR:
29920Sstevel@tonic-gate 	case CHUNK_HEARTBEAT:
29930Sstevel@tonic-gate 	case CHUNK_HEARTBEAT_ACK:
29940Sstevel@tonic-gate 	/* Full ASCONF chunk and parameter checks are in asconf.c */
29950Sstevel@tonic-gate 	case CHUNK_ASCONF:
29960Sstevel@tonic-gate 	case CHUNK_ASCONF_ACK:
29970Sstevel@tonic-gate 		if (ch_len < sizeof (*ch)) {
29980Sstevel@tonic-gate 			return (0);
29990Sstevel@tonic-gate 		}
30000Sstevel@tonic-gate 		/* heartbeat data checked by process_heartbeat() */
30010Sstevel@tonic-gate 		return (1);
30020Sstevel@tonic-gate 	case CHUNK_SHUTDOWN_COMPLETE:
30030Sstevel@tonic-gate 		{
30040Sstevel@tonic-gate 			ssize_t remlen = len;
30050Sstevel@tonic-gate 
30060Sstevel@tonic-gate 			/*
30070Sstevel@tonic-gate 			 * SHUTDOWN-COMPLETE chunk must not be bundled with any
30080Sstevel@tonic-gate 			 * other
30090Sstevel@tonic-gate 			 */
30100Sstevel@tonic-gate 			if (!first || sctp_next_chunk(ch, &remlen) != NULL ||
30110Sstevel@tonic-gate 			    ch_len < sizeof (*ch)) {
30120Sstevel@tonic-gate 				return (0);
30130Sstevel@tonic-gate 			}
30140Sstevel@tonic-gate 		}
30150Sstevel@tonic-gate 		return (1);
30160Sstevel@tonic-gate 	case CHUNK_COOKIE:
30170Sstevel@tonic-gate 	case CHUNK_COOKIE_ACK:
30180Sstevel@tonic-gate 	case CHUNK_SHUTDOWN_ACK:
30190Sstevel@tonic-gate 		if (ch_len < sizeof (*ch) || !first) {
30200Sstevel@tonic-gate 			return (0);
30210Sstevel@tonic-gate 		}
30220Sstevel@tonic-gate 		return (1);
30230Sstevel@tonic-gate 	case CHUNK_FORWARD_TSN:
30240Sstevel@tonic-gate 		if (ch_len < (sizeof (*ch) + sizeof (uint32_t)))
30250Sstevel@tonic-gate 			return (0);
30260Sstevel@tonic-gate 		return (1);
30270Sstevel@tonic-gate 	default:
30280Sstevel@tonic-gate 		return (1);	/* handled by strange_chunk() */
30290Sstevel@tonic-gate 	}
30300Sstevel@tonic-gate 
30310Sstevel@tonic-gate 	/* check and byteorder parameters */
30320Sstevel@tonic-gate 	if (clen <= 0) {
30330Sstevel@tonic-gate 		return (1);
30340Sstevel@tonic-gate 	}
30350Sstevel@tonic-gate 	ASSERT(p != NULL);
30360Sstevel@tonic-gate 
30370Sstevel@tonic-gate 	ph = p;
30380Sstevel@tonic-gate 	while (ph != NULL && clen > 0) {
30390Sstevel@tonic-gate 		ch_len = ntohs(ph->sph_len);
30400Sstevel@tonic-gate 		if (ch_len > len || ch_len < sizeof (*ph)) {
30410Sstevel@tonic-gate 			return (0);
30420Sstevel@tonic-gate 		}
30430Sstevel@tonic-gate 		ph = sctp_next_parm(ph, &clen);
30440Sstevel@tonic-gate 	}
30450Sstevel@tonic-gate 
30460Sstevel@tonic-gate 	/* All OK */
30470Sstevel@tonic-gate 	return (1);
30480Sstevel@tonic-gate }
30490Sstevel@tonic-gate 
30500Sstevel@tonic-gate /* ARGSUSED */
30510Sstevel@tonic-gate static sctp_hdr_t *
30520Sstevel@tonic-gate find_sctp_hdrs(mblk_t *mp, in6_addr_t *src, in6_addr_t *dst,
30530Sstevel@tonic-gate     uint_t *ifindex, uint_t *ip_hdr_len, ip6_pkt_t *ipp, in_pktinfo_t *pinfo)
30540Sstevel@tonic-gate {
30550Sstevel@tonic-gate 	uchar_t	*rptr;
30560Sstevel@tonic-gate 	ipha_t	*ip4h;
30570Sstevel@tonic-gate 	ip6_t	*ip6h;
30580Sstevel@tonic-gate 	mblk_t	*mp1;
30590Sstevel@tonic-gate 
30600Sstevel@tonic-gate 	rptr = mp->b_rptr;
30610Sstevel@tonic-gate 	if (IPH_HDR_VERSION(rptr) == IPV4_VERSION) {
30620Sstevel@tonic-gate 		*ip_hdr_len = IPH_HDR_LENGTH(rptr);
30630Sstevel@tonic-gate 		ip4h = (ipha_t *)rptr;
30640Sstevel@tonic-gate 		IN6_IPADDR_TO_V4MAPPED(ip4h->ipha_src, src);
30650Sstevel@tonic-gate 		IN6_IPADDR_TO_V4MAPPED(ip4h->ipha_dst, dst);
30660Sstevel@tonic-gate 
30670Sstevel@tonic-gate 		ipp->ipp_fields |= IPPF_HOPLIMIT;
30680Sstevel@tonic-gate 		ipp->ipp_hoplimit = ((ipha_t *)rptr)->ipha_ttl;
30690Sstevel@tonic-gate 		if (pinfo != NULL && (pinfo->in_pkt_flags & IPF_RECVIF)) {
30700Sstevel@tonic-gate 			ipp->ipp_fields |= IPPF_IFINDEX;
30710Sstevel@tonic-gate 			ipp->ipp_ifindex = pinfo->in_pkt_ifindex;
30720Sstevel@tonic-gate 		}
30730Sstevel@tonic-gate 	} else {
30740Sstevel@tonic-gate 		ASSERT(IPH_HDR_VERSION(rptr) == IPV6_VERSION);
30750Sstevel@tonic-gate 		ip6h = (ip6_t *)rptr;
30760Sstevel@tonic-gate 		ipp->ipp_fields = IPPF_HOPLIMIT;
30770Sstevel@tonic-gate 		ipp->ipp_hoplimit = ip6h->ip6_hops;
30780Sstevel@tonic-gate 
30790Sstevel@tonic-gate 		if (ip6h->ip6_nxt != IPPROTO_SCTP) {
30800Sstevel@tonic-gate 			/* Look for ifindex information */
30810Sstevel@tonic-gate 			if (ip6h->ip6_nxt == IPPROTO_RAW) {
30820Sstevel@tonic-gate 				ip6i_t *ip6i = (ip6i_t *)ip6h;
30830Sstevel@tonic-gate 
30840Sstevel@tonic-gate 				if (ip6i->ip6i_flags & IP6I_IFINDEX) {
30850Sstevel@tonic-gate 					ASSERT(ip6i->ip6i_ifindex != 0);
30860Sstevel@tonic-gate 					ipp->ipp_fields |= IPPF_IFINDEX;
30870Sstevel@tonic-gate 					ipp->ipp_ifindex = ip6i->ip6i_ifindex;
30880Sstevel@tonic-gate 				}
30890Sstevel@tonic-gate 				rptr = (uchar_t *)&ip6i[1];
30900Sstevel@tonic-gate 				mp->b_rptr = rptr;
30910Sstevel@tonic-gate 				if (rptr == mp->b_wptr) {
30920Sstevel@tonic-gate 					mp1 = mp->b_cont;
30930Sstevel@tonic-gate 					freeb(mp);
30940Sstevel@tonic-gate 					mp = mp1;
30950Sstevel@tonic-gate 					rptr = mp->b_rptr;
30960Sstevel@tonic-gate 				}
30970Sstevel@tonic-gate 				ASSERT(mp->b_wptr - rptr >=
30980Sstevel@tonic-gate 				    IPV6_HDR_LEN + sizeof (sctp_hdr_t));
30990Sstevel@tonic-gate 				ip6h = (ip6_t *)rptr;
31000Sstevel@tonic-gate 			}
31010Sstevel@tonic-gate 			/*
31020Sstevel@tonic-gate 			 * Find any potentially interesting extension headers
31030Sstevel@tonic-gate 			 * as well as the length of the IPv6 + extension
31040Sstevel@tonic-gate 			 * headers.
31050Sstevel@tonic-gate 			 */
31060Sstevel@tonic-gate 			*ip_hdr_len = ip_find_hdr_v6(mp, ip6h, ipp, NULL);
31070Sstevel@tonic-gate 		} else {
31080Sstevel@tonic-gate 			*ip_hdr_len = IPV6_HDR_LEN;
31090Sstevel@tonic-gate 		}
31100Sstevel@tonic-gate 		*src = ip6h->ip6_src;
31110Sstevel@tonic-gate 		*dst = ip6h->ip6_dst;
31120Sstevel@tonic-gate 	}
31130Sstevel@tonic-gate 	ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
31140Sstevel@tonic-gate 	return ((sctp_hdr_t *)&rptr[*ip_hdr_len]);
31150Sstevel@tonic-gate #undef IPVER
31160Sstevel@tonic-gate }
31170Sstevel@tonic-gate 
31180Sstevel@tonic-gate static mblk_t *
31190Sstevel@tonic-gate sctp_check_in_policy(mblk_t *mp, mblk_t *ipsec_mp)
31200Sstevel@tonic-gate {
31210Sstevel@tonic-gate 	ipsec_in_t *ii;
31220Sstevel@tonic-gate 	boolean_t check = B_TRUE;
31230Sstevel@tonic-gate 	boolean_t policy_present;
31240Sstevel@tonic-gate 	ipha_t *ipha;
31250Sstevel@tonic-gate 	ip6_t *ip6h;
31260Sstevel@tonic-gate 
31270Sstevel@tonic-gate 	ii = (ipsec_in_t *)ipsec_mp->b_rptr;
31280Sstevel@tonic-gate 	ASSERT(ii->ipsec_in_type == IPSEC_IN);
31290Sstevel@tonic-gate 	if (ii->ipsec_in_dont_check) {
31300Sstevel@tonic-gate 		check = B_FALSE;
31310Sstevel@tonic-gate 		if (!ii->ipsec_in_secure) {
31320Sstevel@tonic-gate 			freeb(ipsec_mp);
31330Sstevel@tonic-gate 			ipsec_mp = NULL;
31340Sstevel@tonic-gate 		}
31350Sstevel@tonic-gate 	}
31360Sstevel@tonic-gate 	if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
31370Sstevel@tonic-gate 		policy_present = ipsec_inbound_v4_policy_present;
31380Sstevel@tonic-gate 		ipha = (ipha_t *)mp->b_rptr;
31390Sstevel@tonic-gate 		ip6h = NULL;
31400Sstevel@tonic-gate 	} else {
31410Sstevel@tonic-gate 		policy_present = ipsec_inbound_v6_policy_present;
31420Sstevel@tonic-gate 		ipha = NULL;
31430Sstevel@tonic-gate 		ip6h = (ip6_t *)mp->b_rptr;
31440Sstevel@tonic-gate 	}
31450Sstevel@tonic-gate 
31460Sstevel@tonic-gate 	if (check && policy_present) {
31470Sstevel@tonic-gate 		/*
31480Sstevel@tonic-gate 		 * The conn_t parameter is NULL because we already know
31490Sstevel@tonic-gate 		 * nobody's home.
31500Sstevel@tonic-gate 		 */
31510Sstevel@tonic-gate 		ipsec_mp = ipsec_check_global_policy(ipsec_mp, (conn_t *)NULL,
31520Sstevel@tonic-gate 		    ipha, ip6h, B_TRUE);
31530Sstevel@tonic-gate 		if (ipsec_mp == NULL)
31540Sstevel@tonic-gate 			return (NULL);
31550Sstevel@tonic-gate 	}
31560Sstevel@tonic-gate 	if (ipsec_mp != NULL)
31570Sstevel@tonic-gate 		freeb(ipsec_mp);
31580Sstevel@tonic-gate 	return (mp);
31590Sstevel@tonic-gate }
31600Sstevel@tonic-gate 
31610Sstevel@tonic-gate /* Handle out-of-the-blue packets */
31620Sstevel@tonic-gate void
31630Sstevel@tonic-gate sctp_ootb_input(mblk_t *mp, ill_t *recv_ill, uint_t ipif_seqid,
31640Sstevel@tonic-gate     zoneid_t zoneid, boolean_t mctl_present)
31650Sstevel@tonic-gate {
31660Sstevel@tonic-gate 	sctp_t			*sctp;
31670Sstevel@tonic-gate 	sctp_chunk_hdr_t	*ch;
31680Sstevel@tonic-gate 	sctp_hdr_t		*sctph;
31690Sstevel@tonic-gate 	in6_addr_t		src, dst;
31700Sstevel@tonic-gate 	uint_t			ip_hdr_len;
31710Sstevel@tonic-gate 	uint_t			ifindex;
31720Sstevel@tonic-gate 	ip6_pkt_t		ipp;
31730Sstevel@tonic-gate 	ssize_t			mlen;
31740Sstevel@tonic-gate 	in_pktinfo_t		*pinfo = NULL;
31750Sstevel@tonic-gate 	mblk_t			*first_mp;
31760Sstevel@tonic-gate 
31770Sstevel@tonic-gate 	BUMP_MIB(&sctp_mib, sctpOutOfBlue);
31780Sstevel@tonic-gate 	BUMP_MIB(&sctp_mib, sctpInSCTPPkts);
31790Sstevel@tonic-gate 
31800Sstevel@tonic-gate 	first_mp = mp;
31810Sstevel@tonic-gate 	if (mctl_present)
31820Sstevel@tonic-gate 		mp = mp->b_cont;
31830Sstevel@tonic-gate 
31840Sstevel@tonic-gate 	/* Initiate IPPf processing, if needed. */
31850Sstevel@tonic-gate 	if (IPP_ENABLED(IPP_LOCAL_IN)) {
31860Sstevel@tonic-gate 		ip_process(IPP_LOCAL_IN, &mp,
31870Sstevel@tonic-gate 		    recv_ill->ill_phyint->phyint_ifindex);
31880Sstevel@tonic-gate 		if (mp == NULL) {
31890Sstevel@tonic-gate 			if (mctl_present)
31900Sstevel@tonic-gate 				freeb(first_mp);
31910Sstevel@tonic-gate 			return;
31920Sstevel@tonic-gate 		}
31930Sstevel@tonic-gate 	}
31940Sstevel@tonic-gate 
31950Sstevel@tonic-gate 	if (mp->b_cont != NULL) {
31960Sstevel@tonic-gate 		/*
31970Sstevel@tonic-gate 		 * All subsequent code is vastly simplified if it can
31980Sstevel@tonic-gate 		 * assume a single contiguous chunk of data.
31990Sstevel@tonic-gate 		 */
32000Sstevel@tonic-gate 		if (pullupmsg(mp, -1) == 0) {
32010Sstevel@tonic-gate 			BUMP_MIB(&ip_mib, ipInDiscards);
32020Sstevel@tonic-gate 			freemsg(first_mp);
32030Sstevel@tonic-gate 			return;
32040Sstevel@tonic-gate 		}
32050Sstevel@tonic-gate 	}
32060Sstevel@tonic-gate 
32070Sstevel@tonic-gate 	/*
32080Sstevel@tonic-gate 	 * We don't really need to call this function...  Need to
32090Sstevel@tonic-gate 	 * optimize later.
32100Sstevel@tonic-gate 	 */
32110Sstevel@tonic-gate 	sctph = find_sctp_hdrs(mp, &src, &dst, &ifindex, &ip_hdr_len,
32120Sstevel@tonic-gate 	    &ipp, pinfo);
32130Sstevel@tonic-gate 	mlen = mp->b_wptr - (uchar_t *)(sctph + 1);
32140Sstevel@tonic-gate 	if ((ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen)) == NULL) {
32150Sstevel@tonic-gate 		dprint(3, ("sctp_ootb_input: invalid packet\n"));
32160Sstevel@tonic-gate 		BUMP_MIB(&ip_mib, ipInDiscards);
32170Sstevel@tonic-gate 		freemsg(first_mp);
32180Sstevel@tonic-gate 		return;
32190Sstevel@tonic-gate 	}
32200Sstevel@tonic-gate 
32210Sstevel@tonic-gate 	switch (ch->sch_id) {
32220Sstevel@tonic-gate 	case CHUNK_INIT:
32230Sstevel@tonic-gate 		/* no listener; send abort  */
32240Sstevel@tonic-gate 		if (mctl_present && sctp_check_in_policy(mp, first_mp) == NULL)
32250Sstevel@tonic-gate 			return;
32260Sstevel@tonic-gate 		sctp_send_abort(gsctp, sctp_init2vtag(ch), 0,
32270Sstevel@tonic-gate 		    NULL, 0, mp, 0, B_TRUE);
32280Sstevel@tonic-gate 		break;
32290Sstevel@tonic-gate 	case CHUNK_INIT_ACK:
32300Sstevel@tonic-gate 		/* check for changed src addr */
32310Sstevel@tonic-gate 		sctp = sctp_addrlist2sctp(mp, sctph, ch, ipif_seqid, zoneid);
32320Sstevel@tonic-gate 		if (sctp != NULL) {
32330Sstevel@tonic-gate 			/* success; proceed to normal path */
32340Sstevel@tonic-gate 			mutex_enter(&sctp->sctp_lock);
32350Sstevel@tonic-gate 			if (sctp->sctp_running) {
32360Sstevel@tonic-gate 				if (!sctp_add_recvq(sctp, mp, B_FALSE)) {
32370Sstevel@tonic-gate 					BUMP_MIB(&ip_mib, ipInDiscards);
32380Sstevel@tonic-gate 					freemsg(mp);
32390Sstevel@tonic-gate 				}
32400Sstevel@tonic-gate 				mutex_exit(&sctp->sctp_lock);
32410Sstevel@tonic-gate 			} else {
32420Sstevel@tonic-gate 				/*
32430Sstevel@tonic-gate 				 * If the source address is changed, we
32440Sstevel@tonic-gate 				 * don't need to worry too much about
32450Sstevel@tonic-gate 				 * out of order processing.  So we don't
32460Sstevel@tonic-gate 				 * check if the recvq is empty or not here.
32470Sstevel@tonic-gate 				 */
32480Sstevel@tonic-gate 				sctp->sctp_running = B_TRUE;
32490Sstevel@tonic-gate 				mutex_exit(&sctp->sctp_lock);
32500Sstevel@tonic-gate 				sctp_input_data(sctp, mp, NULL);
32510Sstevel@tonic-gate 				WAKE_SCTP(sctp);
32520Sstevel@tonic-gate 				sctp_process_sendq(sctp);
32530Sstevel@tonic-gate 			}
32540Sstevel@tonic-gate 			SCTP_REFRELE(sctp);
32550Sstevel@tonic-gate 			return;
32560Sstevel@tonic-gate 		}
32570Sstevel@tonic-gate 		if (mctl_present)
32580Sstevel@tonic-gate 			freeb(first_mp);
32590Sstevel@tonic-gate 		/* else bogus init ack; drop it */
32600Sstevel@tonic-gate 		break;
32610Sstevel@tonic-gate 	case CHUNK_SHUTDOWN_ACK:
32620Sstevel@tonic-gate 		if (mctl_present && sctp_check_in_policy(mp, first_mp) == NULL)
32630Sstevel@tonic-gate 			return;
32640Sstevel@tonic-gate 		sctp_ootb_shutdown_ack(gsctp, mp, ip_hdr_len);
32650Sstevel@tonic-gate 		sctp_process_sendq(gsctp);
32660Sstevel@tonic-gate 		return;
32670Sstevel@tonic-gate 	case CHUNK_ERROR:
32680Sstevel@tonic-gate 	case CHUNK_ABORT:
32690Sstevel@tonic-gate 	case CHUNK_COOKIE_ACK:
32700Sstevel@tonic-gate 	case CHUNK_SHUTDOWN_COMPLETE:
32710Sstevel@tonic-gate 		if (mctl_present)
32720Sstevel@tonic-gate 			freeb(first_mp);
32730Sstevel@tonic-gate 		break;
32740Sstevel@tonic-gate 	default:
32750Sstevel@tonic-gate 		if (mctl_present && sctp_check_in_policy(mp, first_mp) == NULL)
32760Sstevel@tonic-gate 			return;
32770Sstevel@tonic-gate 		sctp_send_abort(gsctp, sctph->sh_verf, 0, NULL, 0, mp, 0,
32780Sstevel@tonic-gate 		    B_TRUE);
32790Sstevel@tonic-gate 		break;
32800Sstevel@tonic-gate 	}
32810Sstevel@tonic-gate 	sctp_process_sendq(gsctp);
32820Sstevel@tonic-gate 	freemsg(mp);
32830Sstevel@tonic-gate }
32840Sstevel@tonic-gate 
32850Sstevel@tonic-gate void
32860Sstevel@tonic-gate sctp_input(conn_t *connp, ipha_t *ipha, mblk_t *mp, mblk_t *first_mp,
32870Sstevel@tonic-gate     ill_t *recv_ill, boolean_t isv4, boolean_t mctl_present)
32880Sstevel@tonic-gate {
32890Sstevel@tonic-gate 	sctp_t *sctp = CONN2SCTP(connp);
32900Sstevel@tonic-gate 
32910Sstevel@tonic-gate 	/*
32920Sstevel@tonic-gate 	 * We check some fields in conn_t without holding a lock.
32930Sstevel@tonic-gate 	 * This should be fine.
32940Sstevel@tonic-gate 	 */
32950Sstevel@tonic-gate 	if (CONN_INBOUND_POLICY_PRESENT(connp) || mctl_present) {
32960Sstevel@tonic-gate 		first_mp = ipsec_check_inbound_policy(first_mp, connp,
32970Sstevel@tonic-gate 		    ipha, NULL, mctl_present);
32980Sstevel@tonic-gate 		if (first_mp == NULL) {
32990Sstevel@tonic-gate 			SCTP_REFRELE(sctp);
33000Sstevel@tonic-gate 			return;
33010Sstevel@tonic-gate 		}
33020Sstevel@tonic-gate 	}
33030Sstevel@tonic-gate 
33040Sstevel@tonic-gate 	/* Initiate IPPF processing for fastpath */
33050Sstevel@tonic-gate 	if (IPP_ENABLED(IPP_LOCAL_IN)) {
33060Sstevel@tonic-gate 		ip_process(IPP_LOCAL_IN, &mp,
33070Sstevel@tonic-gate 		    recv_ill->ill_phyint->phyint_ifindex);
33080Sstevel@tonic-gate 		if (mp == NULL) {
33090Sstevel@tonic-gate 			SCTP_REFRELE(sctp);
33100Sstevel@tonic-gate 			if (mctl_present)
33110Sstevel@tonic-gate 				freeb(first_mp);
33120Sstevel@tonic-gate 			return;
33130Sstevel@tonic-gate 		} else if (mctl_present) {
33140Sstevel@tonic-gate 			/*
33150Sstevel@tonic-gate 			 * ip_process might return a new mp.
33160Sstevel@tonic-gate 			 */
33170Sstevel@tonic-gate 			ASSERT(first_mp != mp);
33180Sstevel@tonic-gate 			first_mp->b_cont = mp;
33190Sstevel@tonic-gate 		} else {
33200Sstevel@tonic-gate 			first_mp = mp;
33210Sstevel@tonic-gate 		}
33220Sstevel@tonic-gate 	}
33230Sstevel@tonic-gate 
33240Sstevel@tonic-gate 	if (connp->conn_recvif || connp->conn_recvslla ||
33250Sstevel@tonic-gate 	    connp->conn_ipv6_recvpktinfo) {
33260Sstevel@tonic-gate 		int in_flags = 0;
33270Sstevel@tonic-gate 
33280Sstevel@tonic-gate 		if (connp->conn_recvif || connp->conn_ipv6_recvpktinfo) {
33290Sstevel@tonic-gate 			in_flags = IPF_RECVIF;
33300Sstevel@tonic-gate 		}
33310Sstevel@tonic-gate 		if (connp->conn_recvslla) {
33320Sstevel@tonic-gate 			in_flags |= IPF_RECVSLLA;
33330Sstevel@tonic-gate 		}
33340Sstevel@tonic-gate 		if (isv4) {
33350Sstevel@tonic-gate 			mp = ip_add_info(mp, recv_ill, in_flags);
33360Sstevel@tonic-gate 		} else {
33370Sstevel@tonic-gate 			mp = ip_add_info_v6(mp, recv_ill,
33380Sstevel@tonic-gate 			    &(((ip6_t *)ipha)->ip6_dst));
33390Sstevel@tonic-gate 		}
33400Sstevel@tonic-gate 		if (mp == NULL) {
33410Sstevel@tonic-gate 			SCTP_REFRELE(sctp);
33420Sstevel@tonic-gate 			if (mctl_present)
33430Sstevel@tonic-gate 				freeb(first_mp);
33440Sstevel@tonic-gate 			return;
33450Sstevel@tonic-gate 		} else if (mctl_present) {
33460Sstevel@tonic-gate 			/*
33470Sstevel@tonic-gate 			 * ip_add_info might return a new mp.
33480Sstevel@tonic-gate 			 */
33490Sstevel@tonic-gate 			ASSERT(first_mp != mp);
33500Sstevel@tonic-gate 			first_mp->b_cont = mp;
33510Sstevel@tonic-gate 		} else {
33520Sstevel@tonic-gate 			first_mp = mp;
33530Sstevel@tonic-gate 		}
33540Sstevel@tonic-gate 	}
33550Sstevel@tonic-gate 
33560Sstevel@tonic-gate 	mutex_enter(&sctp->sctp_lock);
33570Sstevel@tonic-gate 	if (sctp->sctp_running) {
33580Sstevel@tonic-gate 		if (mctl_present)
33590Sstevel@tonic-gate 			mp->b_prev = first_mp;
33600Sstevel@tonic-gate 		if (!sctp_add_recvq(sctp, mp, B_FALSE)) {
33610Sstevel@tonic-gate 			BUMP_MIB(&ip_mib, ipInDiscards);
33620Sstevel@tonic-gate 			freemsg(first_mp);
33630Sstevel@tonic-gate 		}
33640Sstevel@tonic-gate 		mutex_exit(&sctp->sctp_lock);
33650Sstevel@tonic-gate 		SCTP_REFRELE(sctp);
33660Sstevel@tonic-gate 		return;
33670Sstevel@tonic-gate 	} else {
33680Sstevel@tonic-gate 		sctp->sctp_running = B_TRUE;
33690Sstevel@tonic-gate 		mutex_exit(&sctp->sctp_lock);
33700Sstevel@tonic-gate 
33710Sstevel@tonic-gate 		mutex_enter(&sctp->sctp_recvq_lock);
33720Sstevel@tonic-gate 		if (sctp->sctp_recvq != NULL) {
33730Sstevel@tonic-gate 			if (mctl_present)
33740Sstevel@tonic-gate 				mp->b_prev = first_mp;
33750Sstevel@tonic-gate 			if (!sctp_add_recvq(sctp, mp, B_TRUE)) {
33760Sstevel@tonic-gate 				BUMP_MIB(&ip_mib, ipInDiscards);
33770Sstevel@tonic-gate 				freemsg(first_mp);
33780Sstevel@tonic-gate 			}
33790Sstevel@tonic-gate 			mutex_exit(&sctp->sctp_recvq_lock);
33800Sstevel@tonic-gate 			WAKE_SCTP(sctp);
33810Sstevel@tonic-gate 			SCTP_REFRELE(sctp);
33820Sstevel@tonic-gate 			return;
33830Sstevel@tonic-gate 		}
33840Sstevel@tonic-gate 	}
33850Sstevel@tonic-gate 	mutex_exit(&sctp->sctp_recvq_lock);
33860Sstevel@tonic-gate 	sctp_input_data(sctp, mp, (mctl_present ? first_mp : NULL));
33870Sstevel@tonic-gate 	WAKE_SCTP(sctp);
33880Sstevel@tonic-gate 	sctp_process_sendq(sctp);
33890Sstevel@tonic-gate 	SCTP_REFRELE(sctp);
33900Sstevel@tonic-gate }
33910Sstevel@tonic-gate 
33920Sstevel@tonic-gate static void
33930Sstevel@tonic-gate sctp_process_abort(sctp_t *sctp, sctp_chunk_hdr_t *ch, int err)
33940Sstevel@tonic-gate {
33950Sstevel@tonic-gate 	BUMP_MIB(&sctp_mib, sctpAborted);
33960Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_ibchunks);
33970Sstevel@tonic-gate 
33980Sstevel@tonic-gate 	sctp_assoc_event(sctp, SCTP_COMM_LOST,
33990Sstevel@tonic-gate 	    ntohs(((sctp_parm_hdr_t *)(ch + 1))->sph_type), ch);
34000Sstevel@tonic-gate 	sctp_clean_death(sctp, err);
34010Sstevel@tonic-gate }
34020Sstevel@tonic-gate 
34030Sstevel@tonic-gate void
34040Sstevel@tonic-gate sctp_input_data(sctp_t *sctp, mblk_t *mp, mblk_t *ipsec_mp)
34050Sstevel@tonic-gate {
34060Sstevel@tonic-gate 	sctp_chunk_hdr_t	*ch;
34070Sstevel@tonic-gate 	ssize_t			mlen;
34080Sstevel@tonic-gate 	int			gotdata;
34090Sstevel@tonic-gate 	int			trysend;
34100Sstevel@tonic-gate 	sctp_faddr_t		*fp;
34110Sstevel@tonic-gate 	sctp_init_chunk_t	*iack;
34120Sstevel@tonic-gate 	uint32_t		tsn;
34130Sstevel@tonic-gate 	sctp_data_hdr_t		*sdc;
34140Sstevel@tonic-gate 	ip6_pkt_t		ipp;
34150Sstevel@tonic-gate 	in6_addr_t		src;
34160Sstevel@tonic-gate 	in6_addr_t		dst;
34170Sstevel@tonic-gate 	uint_t			ifindex;
34180Sstevel@tonic-gate 	sctp_hdr_t		*sctph;
34190Sstevel@tonic-gate 	uint_t			ip_hdr_len;
34200Sstevel@tonic-gate 	mblk_t			*dups = NULL;
34210Sstevel@tonic-gate 	int			recv_adaption;
34220Sstevel@tonic-gate 	boolean_t		wake_eager = B_FALSE;
34230Sstevel@tonic-gate 	mblk_t			*pinfo_mp;
34240Sstevel@tonic-gate 	in_pktinfo_t		*pinfo = NULL;
34250Sstevel@tonic-gate 	in6_addr_t		peer_src;
34260Sstevel@tonic-gate 	int64_t			now;
34270Sstevel@tonic-gate 
34280Sstevel@tonic-gate 	if (DB_TYPE(mp) != M_DATA) {
34290Sstevel@tonic-gate 		ASSERT(DB_TYPE(mp) == M_CTL);
34300Sstevel@tonic-gate 		if (MBLKL(mp) == sizeof (in_pktinfo_t) &&
34310Sstevel@tonic-gate 		    ((in_pktinfo_t *)mp->b_rptr)->in_pkt_ulp_type ==
34320Sstevel@tonic-gate 		    IN_PKTINFO) {
34330Sstevel@tonic-gate 			pinfo = (in_pktinfo_t *)mp->b_rptr;
34340Sstevel@tonic-gate 			pinfo_mp = mp;
34350Sstevel@tonic-gate 			mp = mp->b_cont;
34360Sstevel@tonic-gate 		} else {
34370Sstevel@tonic-gate 			if (ipsec_mp != NULL)
34380Sstevel@tonic-gate 				freeb(ipsec_mp);
34390Sstevel@tonic-gate 			sctp_icmp_error(sctp, mp);
34400Sstevel@tonic-gate 			return;
34410Sstevel@tonic-gate 		}
34420Sstevel@tonic-gate 	}
34430Sstevel@tonic-gate 	ASSERT(DB_TYPE(mp) == M_DATA);
34440Sstevel@tonic-gate 
34450Sstevel@tonic-gate 	if (mp->b_cont != NULL) {
34460Sstevel@tonic-gate 		/*
34470Sstevel@tonic-gate 		 * All subsequent code is vastly simplified if it can
34480Sstevel@tonic-gate 		 * assume a single contiguous chunk of data.
34490Sstevel@tonic-gate 		 */
34500Sstevel@tonic-gate 		if (pullupmsg(mp, -1) == 0) {
34510Sstevel@tonic-gate 			BUMP_MIB(&ip_mib, ipInDiscards);
34520Sstevel@tonic-gate 			if (ipsec_mp != NULL)
34530Sstevel@tonic-gate 				freeb(ipsec_mp);
34540Sstevel@tonic-gate 			if (pinfo != NULL)
34550Sstevel@tonic-gate 				freeb(pinfo_mp);
34560Sstevel@tonic-gate 			freemsg(mp);
34570Sstevel@tonic-gate 			return;
34580Sstevel@tonic-gate 		}
34590Sstevel@tonic-gate 	}
34600Sstevel@tonic-gate 
34610Sstevel@tonic-gate 	BUMP_LOCAL(sctp->sctp_ipkts);
34620Sstevel@tonic-gate 	sctph = find_sctp_hdrs(mp, &src, &dst, &ifindex, &ip_hdr_len,
34630Sstevel@tonic-gate 	    &ipp, pinfo);
34640Sstevel@tonic-gate 	if (pinfo != NULL)
34650Sstevel@tonic-gate 		freeb(pinfo_mp);
34660Sstevel@tonic-gate 	mlen = mp->b_wptr - (uchar_t *)(sctph + 1);
34670Sstevel@tonic-gate 	ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen);
34680Sstevel@tonic-gate 	if (ch == NULL) {
34690Sstevel@tonic-gate 		BUMP_MIB(&ip_mib, ipInDiscards);
34700Sstevel@tonic-gate 		if (ipsec_mp != NULL)
34710Sstevel@tonic-gate 			freeb(ipsec_mp);
34720Sstevel@tonic-gate 		freemsg(mp);
34730Sstevel@tonic-gate 		return;
34740Sstevel@tonic-gate 	}
34750Sstevel@tonic-gate 
34760Sstevel@tonic-gate 	if (!sctp_check_input(sctp, ch, mlen, 1)) {
34770Sstevel@tonic-gate 		BUMP_MIB(&ip_mib, ipInDiscards);
34780Sstevel@tonic-gate 		goto done;
34790Sstevel@tonic-gate 	}
34800Sstevel@tonic-gate 	/*
34810Sstevel@tonic-gate 	 * Check verfication tag (special handling for INIT,
34820Sstevel@tonic-gate 	 * COOKIE, SHUTDOWN_COMPLETE and SHUTDOWN_ACK chunks).
34830Sstevel@tonic-gate 	 * ABORTs are handled in the chunk processing loop, since
34840Sstevel@tonic-gate 	 * may not appear first. All other checked chunks must
34850Sstevel@tonic-gate 	 * appear first, or will have been dropped by check_input().
34860Sstevel@tonic-gate 	 */
34870Sstevel@tonic-gate 	switch (ch->sch_id) {
34880Sstevel@tonic-gate 	case CHUNK_INIT:
34890Sstevel@tonic-gate 		if (sctph->sh_verf != 0) {
34900Sstevel@tonic-gate 			/* drop it */
34910Sstevel@tonic-gate 			goto done;
34920Sstevel@tonic-gate 		}
34930Sstevel@tonic-gate 		break;
34940Sstevel@tonic-gate 	case CHUNK_SHUTDOWN_COMPLETE:
34950Sstevel@tonic-gate 		if (sctph->sh_verf == sctp->sctp_lvtag)
34960Sstevel@tonic-gate 			break;
34970Sstevel@tonic-gate 		if (sctph->sh_verf == sctp->sctp_fvtag &&
34980Sstevel@tonic-gate 		    SCTP_GET_TBIT(ch)) {
34990Sstevel@tonic-gate 			break;
35000Sstevel@tonic-gate 		}
35010Sstevel@tonic-gate 		/* else drop it */
35020Sstevel@tonic-gate 		goto done;
35030Sstevel@tonic-gate 	case CHUNK_ABORT:
35040Sstevel@tonic-gate 	case CHUNK_COOKIE:
35050Sstevel@tonic-gate 		/* handled below */
35060Sstevel@tonic-gate 		break;
35070Sstevel@tonic-gate 	case CHUNK_SHUTDOWN_ACK:
35080Sstevel@tonic-gate 		if (sctp->sctp_state > SCTPS_BOUND &&
35090Sstevel@tonic-gate 		    sctp->sctp_state < SCTPS_ESTABLISHED) {
35100Sstevel@tonic-gate 			/* treat as OOTB */
35110Sstevel@tonic-gate 			sctp_ootb_shutdown_ack(sctp, mp, ip_hdr_len);
35120Sstevel@tonic-gate 			if (ipsec_mp != NULL)
35130Sstevel@tonic-gate 				freeb(ipsec_mp);
35140Sstevel@tonic-gate 			return;
35150Sstevel@tonic-gate 		}
35160Sstevel@tonic-gate 		/* else fallthru */
35170Sstevel@tonic-gate 	default:
35180Sstevel@tonic-gate 		/*
35190Sstevel@tonic-gate 		 * All other packets must have a valid
35200Sstevel@tonic-gate 		 * verification tag, however if this is a
35210Sstevel@tonic-gate 		 * listener, we use a refined version of
35220Sstevel@tonic-gate 		 * out-of-the-blue logic.
35230Sstevel@tonic-gate 		 */
35240Sstevel@tonic-gate 		if (sctph->sh_verf != sctp->sctp_lvtag &&
35250Sstevel@tonic-gate 		    sctp->sctp_state != SCTPS_LISTEN) {
35260Sstevel@tonic-gate 			/* drop it */
35270Sstevel@tonic-gate 			goto done;
35280Sstevel@tonic-gate 		}
35290Sstevel@tonic-gate 		break;
35300Sstevel@tonic-gate 	}
35310Sstevel@tonic-gate 
35320Sstevel@tonic-gate 	/* Have a valid sctp for this packet */
35330Sstevel@tonic-gate 	fp = sctp_lookup_faddr(sctp, &src);
35341676Sjpk 	dprint(2, ("sctp_dispatch_rput: mp=%p fp=%p sctp=%p\n", (void *)mp,
35351676Sjpk 	    (void *)fp, (void *)sctp));
35360Sstevel@tonic-gate 
35370Sstevel@tonic-gate 	gotdata = 0;
35380Sstevel@tonic-gate 	trysend = 0;
35390Sstevel@tonic-gate 
35400Sstevel@tonic-gate 	now = lbolt64;
35410Sstevel@tonic-gate 	/* Process the chunks */
35420Sstevel@tonic-gate 	do {
35430Sstevel@tonic-gate 		dprint(3, ("sctp_dispatch_rput: state=%d, chunk id=%d\n",
35440Sstevel@tonic-gate 		    sctp->sctp_state, (int)(ch->sch_id)));
35450Sstevel@tonic-gate 
35460Sstevel@tonic-gate 		if (ch->sch_id == CHUNK_ABORT) {
35470Sstevel@tonic-gate 			if (sctph->sh_verf != sctp->sctp_lvtag &&
35480Sstevel@tonic-gate 			    sctph->sh_verf != sctp->sctp_fvtag) {
35490Sstevel@tonic-gate 				/* drop it */
35500Sstevel@tonic-gate 				goto done;
35510Sstevel@tonic-gate 			}
35520Sstevel@tonic-gate 		}
35530Sstevel@tonic-gate 
35540Sstevel@tonic-gate 		switch (sctp->sctp_state) {
35550Sstevel@tonic-gate 
35560Sstevel@tonic-gate 		case SCTPS_ESTABLISHED:
35570Sstevel@tonic-gate 		case SCTPS_SHUTDOWN_PENDING:
35580Sstevel@tonic-gate 		case SCTPS_SHUTDOWN_SENT:
35590Sstevel@tonic-gate 			switch (ch->sch_id) {
35600Sstevel@tonic-gate 			case CHUNK_DATA:
35610Sstevel@tonic-gate 				/* 0-length data chunks are not allowed */
35620Sstevel@tonic-gate 				if (ntohs(ch->sch_len) == sizeof (*sdc)) {
35630Sstevel@tonic-gate 					sdc = (sctp_data_hdr_t *)ch;
35640Sstevel@tonic-gate 					tsn = sdc->sdh_tsn;
35650Sstevel@tonic-gate 					sctp_send_abort(sctp, sctp->sctp_fvtag,
35660Sstevel@tonic-gate 					    SCTP_ERR_NO_USR_DATA, (char *)&tsn,
35670Sstevel@tonic-gate 					    sizeof (tsn), mp, 0, B_FALSE);
35680Sstevel@tonic-gate 					sctp_assoc_event(sctp, SCTP_COMM_LOST,
35690Sstevel@tonic-gate 					    0, NULL);
35700Sstevel@tonic-gate 					sctp_clean_death(sctp, ECONNABORTED);
35710Sstevel@tonic-gate 					goto done;
35720Sstevel@tonic-gate 				}
35730Sstevel@tonic-gate 
35740Sstevel@tonic-gate 				ASSERT(fp != NULL);
35750Sstevel@tonic-gate 				sctp->sctp_lastdata = fp;
35760Sstevel@tonic-gate 				sctp_data_chunk(sctp, ch, mp, &dups, fp, &ipp);
35770Sstevel@tonic-gate 				gotdata = 1;
35780Sstevel@tonic-gate 				/* Restart shutdown timer if shutting down */
35790Sstevel@tonic-gate 				if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
35800Sstevel@tonic-gate 					/*
35810Sstevel@tonic-gate 					 * If we have exceeded our max
35820Sstevel@tonic-gate 					 * wait bound for waiting for a
35830Sstevel@tonic-gate 					 * shutdown ack from the peer,
35840Sstevel@tonic-gate 					 * abort the association.
35850Sstevel@tonic-gate 					 */
35860Sstevel@tonic-gate 					if (sctp_shutack_wait_bound != 0 &&
35870Sstevel@tonic-gate 					    TICK_TO_MSEC(now -
35880Sstevel@tonic-gate 					    sctp->sctp_out_time) >
35890Sstevel@tonic-gate 					    sctp_shutack_wait_bound) {
35900Sstevel@tonic-gate 						sctp_send_abort(sctp,
35910Sstevel@tonic-gate 						    sctp->sctp_fvtag, 0, NULL,
35920Sstevel@tonic-gate 						    0, mp, 0, B_FALSE);
35930Sstevel@tonic-gate 						sctp_assoc_event(sctp,
35940Sstevel@tonic-gate 						    SCTP_COMM_LOST, 0, NULL);
35950Sstevel@tonic-gate 						sctp_clean_death(sctp,
35960Sstevel@tonic-gate 						    ECONNABORTED);
35970Sstevel@tonic-gate 						goto done;
35980Sstevel@tonic-gate 					}
35990Sstevel@tonic-gate 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
36000Sstevel@tonic-gate 					    fp->rto);
36010Sstevel@tonic-gate 				}
36020Sstevel@tonic-gate 				break;
36030Sstevel@tonic-gate 			case CHUNK_SACK:
36040Sstevel@tonic-gate 				ASSERT(fp != NULL);
36050Sstevel@tonic-gate 				/*
36060Sstevel@tonic-gate 				 * Peer is real and alive if it can ack our
36070Sstevel@tonic-gate 				 * data.
36080Sstevel@tonic-gate 				 */
36090Sstevel@tonic-gate 				sctp_faddr_alive(sctp, fp);
36100Sstevel@tonic-gate 				trysend = sctp_got_sack(sctp, ch);
3611852Svi117747 				if (trysend < 0) {
3612852Svi117747 					sctp_send_abort(sctp, sctph->sh_verf,
3613852Svi117747 					    0, NULL, 0, mp, 0, B_FALSE);
3614852Svi117747 					sctp_assoc_event(sctp,
3615852Svi117747 					    SCTP_COMM_LOST, 0, NULL);
3616852Svi117747 					sctp_clean_death(sctp,
3617852Svi117747 					    ECONNABORTED);
3618852Svi117747 					goto done;
3619852Svi117747 				}
36200Sstevel@tonic-gate 				break;
36210Sstevel@tonic-gate 			case CHUNK_HEARTBEAT:
36220Sstevel@tonic-gate 				sctp_return_heartbeat(sctp, ch, mp);
36230Sstevel@tonic-gate 				break;
36240Sstevel@tonic-gate 			case CHUNK_HEARTBEAT_ACK:
36250Sstevel@tonic-gate 				sctp_process_heartbeat(sctp, ch);
36260Sstevel@tonic-gate 				break;
36270Sstevel@tonic-gate 			case CHUNK_SHUTDOWN:
36280Sstevel@tonic-gate 				sctp_shutdown_event(sctp);
36290Sstevel@tonic-gate 				trysend = sctp_shutdown_received(sctp, ch,
36301735Skcpoon 				    B_FALSE, B_FALSE, fp);
36310Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
36320Sstevel@tonic-gate 				break;
36330Sstevel@tonic-gate 			case CHUNK_SHUTDOWN_ACK:
36340Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
36350Sstevel@tonic-gate 				if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
36360Sstevel@tonic-gate 					sctp_shutdown_complete(sctp);
36370Sstevel@tonic-gate 					BUMP_MIB(&sctp_mib, sctpShutdowns);
36380Sstevel@tonic-gate 					sctp_assoc_event(sctp,
36390Sstevel@tonic-gate 					    SCTP_SHUTDOWN_COMP, 0, NULL);
36400Sstevel@tonic-gate 					sctp_clean_death(sctp, 0);
36410Sstevel@tonic-gate 					goto done;
36420Sstevel@tonic-gate 				}
36430Sstevel@tonic-gate 				break;
36440Sstevel@tonic-gate 			case CHUNK_ABORT: {
36450Sstevel@tonic-gate 				sctp_saddr_ipif_t *sp;
36460Sstevel@tonic-gate 
36470Sstevel@tonic-gate 				/* Ignore if delete pending */
3648852Svi117747 				sp = sctp_saddr_lookup(sctp, &dst, 0);
36490Sstevel@tonic-gate 				ASSERT(sp != NULL);
36500Sstevel@tonic-gate 				if (sp->saddr_ipif_delete_pending) {
36510Sstevel@tonic-gate 					BUMP_LOCAL(sctp->sctp_ibchunks);
36520Sstevel@tonic-gate 					break;
36530Sstevel@tonic-gate 				}
36540Sstevel@tonic-gate 
36550Sstevel@tonic-gate 				sctp_process_abort(sctp, ch, ECONNRESET);
36560Sstevel@tonic-gate 				goto done;
36570Sstevel@tonic-gate 			}
36580Sstevel@tonic-gate 			case CHUNK_INIT:
36590Sstevel@tonic-gate 				sctp_send_initack(sctp, ch, mp);
36600Sstevel@tonic-gate 				break;
36610Sstevel@tonic-gate 			case CHUNK_COOKIE:
36620Sstevel@tonic-gate 				if (sctp_process_cookie(sctp, ch, mp, &iack,
36630Sstevel@tonic-gate 				    sctph, &recv_adaption, NULL) != -1) {
36640Sstevel@tonic-gate 					sctp_send_cookie_ack(sctp);
36650Sstevel@tonic-gate 					sctp_assoc_event(sctp, SCTP_RESTART,
36660Sstevel@tonic-gate 					    0, NULL);
36670Sstevel@tonic-gate 					if (recv_adaption) {
36680Sstevel@tonic-gate 						sctp->sctp_recv_adaption = 1;
36690Sstevel@tonic-gate 						sctp_adaption_event(sctp);
36700Sstevel@tonic-gate 					}
36710Sstevel@tonic-gate 				} else {
36720Sstevel@tonic-gate 					BUMP_MIB(&sctp_mib,
36730Sstevel@tonic-gate 					    sctpInInvalidCookie);
36740Sstevel@tonic-gate 				}
36750Sstevel@tonic-gate 				break;
36760Sstevel@tonic-gate 			case CHUNK_ERROR: {
36770Sstevel@tonic-gate 				int error;
36780Sstevel@tonic-gate 
36790Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
36800Sstevel@tonic-gate 				error = sctp_handle_error(sctp, sctph, ch, mp);
36810Sstevel@tonic-gate 				if (error != 0) {
36820Sstevel@tonic-gate 					sctp_clean_death(sctp, error);
36830Sstevel@tonic-gate 					goto done;
36840Sstevel@tonic-gate 				}
36850Sstevel@tonic-gate 				break;
36860Sstevel@tonic-gate 			}
36870Sstevel@tonic-gate 			case CHUNK_ASCONF:
36880Sstevel@tonic-gate 				ASSERT(fp != NULL);
36890Sstevel@tonic-gate 				sctp_input_asconf(sctp, ch, fp);
36900Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
36910Sstevel@tonic-gate 				break;
36920Sstevel@tonic-gate 			case CHUNK_ASCONF_ACK:
36930Sstevel@tonic-gate 				ASSERT(fp != NULL);
36940Sstevel@tonic-gate 				sctp_faddr_alive(sctp, fp);
36950Sstevel@tonic-gate 				sctp_input_asconf_ack(sctp, ch, fp);
36960Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
36970Sstevel@tonic-gate 				break;
36980Sstevel@tonic-gate 			case CHUNK_FORWARD_TSN:
36990Sstevel@tonic-gate 				ASSERT(fp != NULL);
37000Sstevel@tonic-gate 				sctp->sctp_lastdata = fp;
37010Sstevel@tonic-gate 				sctp_process_forward_tsn(sctp, ch, fp, &ipp);
37020Sstevel@tonic-gate 				gotdata = 1;
37030Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
37040Sstevel@tonic-gate 				break;
37050Sstevel@tonic-gate 			default:
37060Sstevel@tonic-gate 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
37070Sstevel@tonic-gate 					goto nomorechunks;
37080Sstevel@tonic-gate 				} /* else skip and continue processing */
37090Sstevel@tonic-gate 				break;
37100Sstevel@tonic-gate 			}
37110Sstevel@tonic-gate 			break;
37120Sstevel@tonic-gate 
37130Sstevel@tonic-gate 		case SCTPS_LISTEN:
37140Sstevel@tonic-gate 			switch (ch->sch_id) {
37150Sstevel@tonic-gate 			case CHUNK_INIT:
37160Sstevel@tonic-gate 				sctp_send_initack(sctp, ch, mp);
37170Sstevel@tonic-gate 				break;
37180Sstevel@tonic-gate 			case CHUNK_COOKIE: {
37190Sstevel@tonic-gate 				sctp_t *eager;
37200Sstevel@tonic-gate 
37210Sstevel@tonic-gate 				if (sctp_process_cookie(sctp, ch, mp, &iack,
37220Sstevel@tonic-gate 				    sctph, &recv_adaption, &peer_src) == -1) {
37230Sstevel@tonic-gate 					BUMP_MIB(&sctp_mib,
37240Sstevel@tonic-gate 					    sctpInInvalidCookie);
37250Sstevel@tonic-gate 					goto done;
37260Sstevel@tonic-gate 				}
37270Sstevel@tonic-gate 
37280Sstevel@tonic-gate 				/*
37290Sstevel@tonic-gate 				 * The cookie is good; ensure that
37300Sstevel@tonic-gate 				 * the peer used the verification
37310Sstevel@tonic-gate 				 * tag from the init ack in the header.
37320Sstevel@tonic-gate 				 */
37330Sstevel@tonic-gate 				if (iack->sic_inittag != sctph->sh_verf)
37340Sstevel@tonic-gate 					goto done;
37350Sstevel@tonic-gate 
37360Sstevel@tonic-gate 				eager = sctp_conn_request(sctp, mp, ifindex,
37370Sstevel@tonic-gate 				    ip_hdr_len, iack, ipsec_mp);
37380Sstevel@tonic-gate 				if (eager == NULL) {
37390Sstevel@tonic-gate 					sctp_send_abort(sctp, sctph->sh_verf,
37400Sstevel@tonic-gate 					    SCTP_ERR_NO_RESOURCES, NULL, 0, mp,
37410Sstevel@tonic-gate 					    0, B_FALSE);
37420Sstevel@tonic-gate 					goto done;
37430Sstevel@tonic-gate 				}
37440Sstevel@tonic-gate 
37450Sstevel@tonic-gate 				/*
37460Sstevel@tonic-gate 				 * If there were extra chunks
37470Sstevel@tonic-gate 				 * bundled with the cookie,
37480Sstevel@tonic-gate 				 * they must be processed
37490Sstevel@tonic-gate 				 * on the eager's queue. We
37500Sstevel@tonic-gate 				 * accomplish this by refeeding
37510Sstevel@tonic-gate 				 * the whole packet into the
37520Sstevel@tonic-gate 				 * state machine on the right
37530Sstevel@tonic-gate 				 * q. The packet (mp) gets
37540Sstevel@tonic-gate 				 * there via the eager's
37550Sstevel@tonic-gate 				 * cookie_mp field (overloaded
37560Sstevel@tonic-gate 				 * with the active open role).
37570Sstevel@tonic-gate 				 * This is picked up when
37580Sstevel@tonic-gate 				 * processing the null bind
37590Sstevel@tonic-gate 				 * request put on the eager's
37600Sstevel@tonic-gate 				 * q by sctp_accept(). We must
37610Sstevel@tonic-gate 				 * first revert the cookie
37620Sstevel@tonic-gate 				 * chunk's length field to network
37630Sstevel@tonic-gate 				 * byteorder so it can be
37640Sstevel@tonic-gate 				 * properly reprocessed on the
37650Sstevel@tonic-gate 				 * eager's queue.
37660Sstevel@tonic-gate 				 */
37670Sstevel@tonic-gate 				BUMP_MIB(&sctp_mib, sctpPassiveEstab);
37680Sstevel@tonic-gate 				if (mlen > ntohs(ch->sch_len)) {
37690Sstevel@tonic-gate 					eager->sctp_cookie_mp = dupb(mp);
37701676Sjpk 					mblk_setcred(eager->sctp_cookie_mp,
37711676Sjpk 					    CONN_CRED(eager->sctp_connp));
37720Sstevel@tonic-gate 					/*
37730Sstevel@tonic-gate 					 * If no mem, just let
37740Sstevel@tonic-gate 					 * the peer retransmit.
37750Sstevel@tonic-gate 					 */
37760Sstevel@tonic-gate 				}
37770Sstevel@tonic-gate 				sctp_assoc_event(eager, SCTP_COMM_UP, 0, NULL);
37780Sstevel@tonic-gate 				if (recv_adaption) {
37790Sstevel@tonic-gate 					eager->sctp_recv_adaption = 1;
37800Sstevel@tonic-gate 					eager->sctp_rx_adaption_code =
37810Sstevel@tonic-gate 					    sctp->sctp_rx_adaption_code;
37820Sstevel@tonic-gate 					sctp_adaption_event(eager);
37830Sstevel@tonic-gate 				}
37840Sstevel@tonic-gate 
37850Sstevel@tonic-gate 				eager->sctp_active = now;
37860Sstevel@tonic-gate 				sctp_send_cookie_ack(eager);
37870Sstevel@tonic-gate 
37880Sstevel@tonic-gate 				wake_eager = B_TRUE;
37890Sstevel@tonic-gate 
37900Sstevel@tonic-gate 				/*
37910Sstevel@tonic-gate 				 * Process rest of the chunks with eager.
37920Sstevel@tonic-gate 				 */
37930Sstevel@tonic-gate 				sctp = eager;
37940Sstevel@tonic-gate 				fp = sctp_lookup_faddr(sctp, &peer_src);
37950Sstevel@tonic-gate 				/*
37960Sstevel@tonic-gate 				 * Confirm peer's original source.  fp can
37970Sstevel@tonic-gate 				 * only be NULL if peer does not use the
37980Sstevel@tonic-gate 				 * original source as one of its addresses...
37990Sstevel@tonic-gate 				 */
38000Sstevel@tonic-gate 				if (fp == NULL)
38010Sstevel@tonic-gate 					fp = sctp_lookup_faddr(sctp, &src);
38020Sstevel@tonic-gate 				else
38030Sstevel@tonic-gate 					sctp_faddr_alive(sctp, fp);
38040Sstevel@tonic-gate 
38050Sstevel@tonic-gate 				/*
38060Sstevel@tonic-gate 				 * Validate the peer addresses.  It also starts
38070Sstevel@tonic-gate 				 * the heartbeat timer.
38080Sstevel@tonic-gate 				 */
38090Sstevel@tonic-gate 				sctp_validate_peer(sctp);
38100Sstevel@tonic-gate 				break;
38110Sstevel@tonic-gate 			}
38120Sstevel@tonic-gate 			/* Anything else is considered out-of-the-blue */
38130Sstevel@tonic-gate 			case CHUNK_ERROR:
38140Sstevel@tonic-gate 			case CHUNK_ABORT:
38150Sstevel@tonic-gate 			case CHUNK_COOKIE_ACK:
38160Sstevel@tonic-gate 			case CHUNK_SHUTDOWN_COMPLETE:
38170Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
38180Sstevel@tonic-gate 				goto done;
38190Sstevel@tonic-gate 			default:
38200Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
38210Sstevel@tonic-gate 				sctp_send_abort(sctp, sctph->sh_verf, 0, NULL,
38220Sstevel@tonic-gate 				    0, mp, 0, B_TRUE);
38230Sstevel@tonic-gate 				goto done;
38240Sstevel@tonic-gate 			}
38250Sstevel@tonic-gate 			break;
38260Sstevel@tonic-gate 
38270Sstevel@tonic-gate 		case SCTPS_COOKIE_WAIT:
38280Sstevel@tonic-gate 			switch (ch->sch_id) {
38290Sstevel@tonic-gate 			case CHUNK_INIT_ACK:
38300Sstevel@tonic-gate 				sctp_stop_faddr_timers(sctp);
38310Sstevel@tonic-gate 				sctp_faddr_alive(sctp, sctp->sctp_current);
38320Sstevel@tonic-gate 				sctp_send_cookie_echo(sctp, ch, mp);
38330Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
38340Sstevel@tonic-gate 				break;
38350Sstevel@tonic-gate 			case CHUNK_ABORT:
38360Sstevel@tonic-gate 				sctp_process_abort(sctp, ch, ECONNREFUSED);
38370Sstevel@tonic-gate 				goto done;
38380Sstevel@tonic-gate 			case CHUNK_INIT:
38390Sstevel@tonic-gate 				sctp_send_initack(sctp, ch, mp);
38400Sstevel@tonic-gate 				break;
38410Sstevel@tonic-gate 			case CHUNK_COOKIE:
38420Sstevel@tonic-gate 				if (sctp_process_cookie(sctp, ch, mp, &iack,
38430Sstevel@tonic-gate 				    sctph, &recv_adaption, NULL) == -1) {
38440Sstevel@tonic-gate 					BUMP_MIB(&sctp_mib,
38450Sstevel@tonic-gate 					    sctpInInvalidCookie);
38460Sstevel@tonic-gate 					break;
38470Sstevel@tonic-gate 				}
38480Sstevel@tonic-gate 				sctp_send_cookie_ack(sctp);
38490Sstevel@tonic-gate 				sctp_stop_faddr_timers(sctp);
38500Sstevel@tonic-gate 				if (!SCTP_IS_DETACHED(sctp)) {
38510Sstevel@tonic-gate 				    sctp->sctp_ulp_connected(sctp->sctp_ulpd);
38520Sstevel@tonic-gate 				    sctp_set_ulp_prop(sctp);
38530Sstevel@tonic-gate 				}
38540Sstevel@tonic-gate 				sctp->sctp_state = SCTPS_ESTABLISHED;
38550Sstevel@tonic-gate 				sctp->sctp_assoc_start_time = (uint32_t)lbolt;
38560Sstevel@tonic-gate 				BUMP_MIB(&sctp_mib, sctpActiveEstab);
38570Sstevel@tonic-gate 				if (sctp->sctp_cookie_mp) {
38580Sstevel@tonic-gate 					freemsg(sctp->sctp_cookie_mp);
38590Sstevel@tonic-gate 					sctp->sctp_cookie_mp = NULL;
38600Sstevel@tonic-gate 				}
38610Sstevel@tonic-gate 
38620Sstevel@tonic-gate 				/* Validate the peer addresses. */
38630Sstevel@tonic-gate 				sctp->sctp_active = now;
38640Sstevel@tonic-gate 				sctp_validate_peer(sctp);
38650Sstevel@tonic-gate 
38660Sstevel@tonic-gate 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
38670Sstevel@tonic-gate 				if (recv_adaption) {
38680Sstevel@tonic-gate 					sctp->sctp_recv_adaption = 1;
38690Sstevel@tonic-gate 					sctp_adaption_event(sctp);
38700Sstevel@tonic-gate 				}
38710Sstevel@tonic-gate 				/* Try sending queued data, or ASCONFs */
38720Sstevel@tonic-gate 				trysend = 1;
38730Sstevel@tonic-gate 				break;
38740Sstevel@tonic-gate 			default:
38750Sstevel@tonic-gate 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
38760Sstevel@tonic-gate 					goto nomorechunks;
38770Sstevel@tonic-gate 				} /* else skip and continue processing */
38780Sstevel@tonic-gate 				break;
38790Sstevel@tonic-gate 			}
38800Sstevel@tonic-gate 			break;
38810Sstevel@tonic-gate 
38820Sstevel@tonic-gate 		case SCTPS_COOKIE_ECHOED:
38830Sstevel@tonic-gate 			switch (ch->sch_id) {
38840Sstevel@tonic-gate 			case CHUNK_COOKIE_ACK:
38850Sstevel@tonic-gate 				if (!SCTP_IS_DETACHED(sctp)) {
38860Sstevel@tonic-gate 				    sctp->sctp_ulp_connected(sctp->sctp_ulpd);
38870Sstevel@tonic-gate 				    sctp_set_ulp_prop(sctp);
38880Sstevel@tonic-gate 				}
38890Sstevel@tonic-gate 				if (sctp->sctp_unacked == 0)
38900Sstevel@tonic-gate 					sctp_stop_faddr_timers(sctp);
38910Sstevel@tonic-gate 				sctp->sctp_state = SCTPS_ESTABLISHED;
38920Sstevel@tonic-gate 				sctp->sctp_assoc_start_time = (uint32_t)lbolt;
38930Sstevel@tonic-gate 				BUMP_MIB(&sctp_mib, sctpActiveEstab);
38940Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
38950Sstevel@tonic-gate 				if (sctp->sctp_cookie_mp) {
38960Sstevel@tonic-gate 					freemsg(sctp->sctp_cookie_mp);
38970Sstevel@tonic-gate 					sctp->sctp_cookie_mp = NULL;
38980Sstevel@tonic-gate 				}
38990Sstevel@tonic-gate 				sctp_faddr_alive(sctp, fp);
39000Sstevel@tonic-gate 				/* Validate the peer addresses. */
39010Sstevel@tonic-gate 				sctp->sctp_active = now;
39020Sstevel@tonic-gate 				sctp_validate_peer(sctp);
39030Sstevel@tonic-gate 
39040Sstevel@tonic-gate 				/* Try sending queued data, or ASCONFs */
39050Sstevel@tonic-gate 				trysend = 1;
39060Sstevel@tonic-gate 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
39070Sstevel@tonic-gate 				sctp_adaption_event(sctp);
39080Sstevel@tonic-gate 				break;
39090Sstevel@tonic-gate 			case CHUNK_ABORT:
39100Sstevel@tonic-gate 				sctp_process_abort(sctp, ch, ECONNREFUSED);
39110Sstevel@tonic-gate 				goto done;
39120Sstevel@tonic-gate 			case CHUNK_COOKIE:
39130Sstevel@tonic-gate 				if (sctp_process_cookie(sctp, ch, mp, &iack,
39140Sstevel@tonic-gate 				    sctph, &recv_adaption, NULL) == -1) {
39150Sstevel@tonic-gate 					BUMP_MIB(&sctp_mib,
39160Sstevel@tonic-gate 					    sctpInInvalidCookie);
39170Sstevel@tonic-gate 					break;
39180Sstevel@tonic-gate 				}
39190Sstevel@tonic-gate 				sctp_send_cookie_ack(sctp);
39200Sstevel@tonic-gate 
39210Sstevel@tonic-gate 				if (!SCTP_IS_DETACHED(sctp)) {
39220Sstevel@tonic-gate 				    sctp->sctp_ulp_connected(sctp->sctp_ulpd);
39230Sstevel@tonic-gate 				    sctp_set_ulp_prop(sctp);
39240Sstevel@tonic-gate 				}
39250Sstevel@tonic-gate 				if (sctp->sctp_unacked == 0)
39260Sstevel@tonic-gate 					sctp_stop_faddr_timers(sctp);
39270Sstevel@tonic-gate 				sctp->sctp_state = SCTPS_ESTABLISHED;
39280Sstevel@tonic-gate 				sctp->sctp_assoc_start_time = (uint32_t)lbolt;
39290Sstevel@tonic-gate 				BUMP_MIB(&sctp_mib, sctpActiveEstab);
39300Sstevel@tonic-gate 				if (sctp->sctp_cookie_mp) {
39310Sstevel@tonic-gate 					freemsg(sctp->sctp_cookie_mp);
39320Sstevel@tonic-gate 					sctp->sctp_cookie_mp = NULL;
39330Sstevel@tonic-gate 				}
39340Sstevel@tonic-gate 				/* Validate the peer addresses. */
39350Sstevel@tonic-gate 				sctp->sctp_active = now;
39360Sstevel@tonic-gate 				sctp_validate_peer(sctp);
39370Sstevel@tonic-gate 
39380Sstevel@tonic-gate 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
39390Sstevel@tonic-gate 				if (recv_adaption) {
39400Sstevel@tonic-gate 					sctp->sctp_recv_adaption = 1;
39410Sstevel@tonic-gate 					sctp_adaption_event(sctp);
39420Sstevel@tonic-gate 				}
39430Sstevel@tonic-gate 				/* Try sending queued data, or ASCONFs */
39440Sstevel@tonic-gate 				trysend = 1;
39450Sstevel@tonic-gate 				break;
39460Sstevel@tonic-gate 			case CHUNK_INIT:
39470Sstevel@tonic-gate 				sctp_send_initack(sctp, ch, mp);
39480Sstevel@tonic-gate 				break;
39490Sstevel@tonic-gate 			case CHUNK_ERROR: {
39500Sstevel@tonic-gate 				sctp_parm_hdr_t *p;
39510Sstevel@tonic-gate 
39520Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
39530Sstevel@tonic-gate 				/* check for a stale cookie */
39540Sstevel@tonic-gate 				if (ntohs(ch->sch_len) >=
39550Sstevel@tonic-gate 				    (sizeof (*p) + sizeof (*ch)) +
39560Sstevel@tonic-gate 				    sizeof (uint32_t)) {
39570Sstevel@tonic-gate 
39580Sstevel@tonic-gate 					p = (sctp_parm_hdr_t *)(ch + 1);
39590Sstevel@tonic-gate 					if (p->sph_type ==
39600Sstevel@tonic-gate 					    htons(SCTP_ERR_STALE_COOKIE)) {
39610Sstevel@tonic-gate 						BUMP_MIB(&sctp_mib,
39620Sstevel@tonic-gate 						    sctpAborted);
39630Sstevel@tonic-gate 						sctp_error_event(sctp, ch);
39640Sstevel@tonic-gate 						sctp_clean_death(sctp,
39650Sstevel@tonic-gate 						    ECONNREFUSED);
39660Sstevel@tonic-gate 						goto done;
39670Sstevel@tonic-gate 					}
39680Sstevel@tonic-gate 				}
39690Sstevel@tonic-gate 				break;
39700Sstevel@tonic-gate 			}
39710Sstevel@tonic-gate 			case CHUNK_HEARTBEAT:
39720Sstevel@tonic-gate 				sctp_return_heartbeat(sctp, ch, mp);
39730Sstevel@tonic-gate 				break;
39740Sstevel@tonic-gate 			default:
39750Sstevel@tonic-gate 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
39760Sstevel@tonic-gate 					goto nomorechunks;
39770Sstevel@tonic-gate 				} /* else skip and continue processing */
39780Sstevel@tonic-gate 			} /* switch (ch->sch_id) */
39790Sstevel@tonic-gate 			break;
39800Sstevel@tonic-gate 
39810Sstevel@tonic-gate 		case SCTPS_SHUTDOWN_ACK_SENT:
39820Sstevel@tonic-gate 			switch (ch->sch_id) {
39830Sstevel@tonic-gate 			case CHUNK_ABORT:
39840Sstevel@tonic-gate 				/* Pass gathered wisdom to IP for keeping */
39851735Skcpoon 				sctp_update_ire(sctp);
39860Sstevel@tonic-gate 				sctp_process_abort(sctp, ch, 0);
39870Sstevel@tonic-gate 				goto done;
39880Sstevel@tonic-gate 			case CHUNK_SHUTDOWN_COMPLETE:
39890Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
39900Sstevel@tonic-gate 				BUMP_MIB(&sctp_mib, sctpShutdowns);
39910Sstevel@tonic-gate 				sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0,
39920Sstevel@tonic-gate 				    NULL);
39930Sstevel@tonic-gate 
39940Sstevel@tonic-gate 				/* Pass gathered wisdom to IP for keeping */
39951735Skcpoon 				sctp_update_ire(sctp);
39960Sstevel@tonic-gate 				sctp_clean_death(sctp, 0);
39970Sstevel@tonic-gate 				goto done;
39980Sstevel@tonic-gate 			case CHUNK_SHUTDOWN_ACK:
39990Sstevel@tonic-gate 				sctp_shutdown_complete(sctp);
40000Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
40010Sstevel@tonic-gate 				BUMP_MIB(&sctp_mib, sctpShutdowns);
40020Sstevel@tonic-gate 				sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0,
40030Sstevel@tonic-gate 				    NULL);
40040Sstevel@tonic-gate 				sctp_clean_death(sctp, 0);
40050Sstevel@tonic-gate 				goto done;
40060Sstevel@tonic-gate 			case CHUNK_COOKIE:
40070Sstevel@tonic-gate 				(void) sctp_shutdown_received(sctp, NULL,
40081735Skcpoon 				    B_TRUE, B_FALSE, fp);
40090Sstevel@tonic-gate 				BUMP_LOCAL(sctp->sctp_ibchunks);
40100Sstevel@tonic-gate 				break;
40110Sstevel@tonic-gate 			case CHUNK_HEARTBEAT:
40120Sstevel@tonic-gate 				sctp_return_heartbeat(sctp, ch, mp);
40130Sstevel@tonic-gate 				break;
40140Sstevel@tonic-gate 			default:
40150Sstevel@tonic-gate 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
40160Sstevel@tonic-gate 					goto nomorechunks;
40170Sstevel@tonic-gate 				} /* else skip and continue processing */
40180Sstevel@tonic-gate 				break;
40190Sstevel@tonic-gate 			}
40200Sstevel@tonic-gate 			break;
40210Sstevel@tonic-gate 
40220Sstevel@tonic-gate 		case SCTPS_SHUTDOWN_RECEIVED:
40230Sstevel@tonic-gate 			switch (ch->sch_id) {
40240Sstevel@tonic-gate 			case CHUNK_SHUTDOWN:
40250Sstevel@tonic-gate 				trysend = sctp_shutdown_received(sctp, ch,
40261735Skcpoon 				    B_FALSE, B_FALSE, fp);
40270Sstevel@tonic-gate 				break;
40280Sstevel@tonic-gate 			case CHUNK_SACK:
40290Sstevel@tonic-gate 				trysend = sctp_got_sack(sctp, ch);
4030852Svi117747 				if (trysend < 0) {
4031852Svi117747 					sctp_send_abort(sctp, sctph->sh_verf,
4032852Svi117747 					    0, NULL, 0, mp, 0, B_FALSE);
4033852Svi117747 					sctp_assoc_event(sctp,
4034852Svi117747 					    SCTP_COMM_LOST, 0, NULL);
4035852Svi117747 					sctp_clean_death(sctp,
4036852Svi117747 					    ECONNABORTED);
4037852Svi117747 					goto done;
4038852Svi117747 				}
40390Sstevel@tonic-gate 				break;
40400Sstevel@tonic-gate 			case CHUNK_ABORT:
40410Sstevel@tonic-gate 				sctp_process_abort(sctp, ch, ECONNRESET);
40420Sstevel@tonic-gate 				goto done;
40430Sstevel@tonic-gate 			case CHUNK_HEARTBEAT:
40440Sstevel@tonic-gate 				sctp_return_heartbeat(sctp, ch, mp);
40450Sstevel@tonic-gate 				break;
40460Sstevel@tonic-gate 			default:
40470Sstevel@tonic-gate 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
40480Sstevel@tonic-gate 					goto nomorechunks;
40490Sstevel@tonic-gate 				} /* else skip and continue processing */
40500Sstevel@tonic-gate 				break;
40510Sstevel@tonic-gate 			}
40520Sstevel@tonic-gate 			break;
40530Sstevel@tonic-gate 
40540Sstevel@tonic-gate 		default:
4055*1932Svi117747 			/*
4056*1932Svi117747 			 * The only remaining states are SCTPS_IDLE and
4057*1932Svi117747 			 * SCTPS_BOUND, and we should not be getting here
4058*1932Svi117747 			 * for these.
4059*1932Svi117747 			 */
4060*1932Svi117747 			ASSERT(0);
40610Sstevel@tonic-gate 		} /* switch (sctp->sctp_state) */
40620Sstevel@tonic-gate 
40630Sstevel@tonic-gate 		ch = sctp_next_chunk(ch, &mlen);
40640Sstevel@tonic-gate 		if (ch != NULL && !sctp_check_input(sctp, ch, mlen, 0))
40650Sstevel@tonic-gate 			goto done;
40660Sstevel@tonic-gate 	} while (ch != NULL);
40670Sstevel@tonic-gate 
40680Sstevel@tonic-gate 	/* Finished processing all chunks in packet */
40690Sstevel@tonic-gate 
40700Sstevel@tonic-gate nomorechunks:
40710Sstevel@tonic-gate 	/* SACK if necessary */
40720Sstevel@tonic-gate 	if (gotdata) {
40730Sstevel@tonic-gate 		(sctp->sctp_sack_toggle)++;
40740Sstevel@tonic-gate 		sctp_sack(sctp, dups);
40750Sstevel@tonic-gate 		dups = NULL;
40760Sstevel@tonic-gate 
40770Sstevel@tonic-gate 		if (!sctp->sctp_ack_timer_running) {
40780Sstevel@tonic-gate 			sctp->sctp_ack_timer_running = B_TRUE;
40790Sstevel@tonic-gate 			sctp_timer(sctp, sctp->sctp_ack_mp,
40800Sstevel@tonic-gate 			    MSEC_TO_TICK(sctp_deferred_ack_interval));
40810Sstevel@tonic-gate 		}
40820Sstevel@tonic-gate 	}
40830Sstevel@tonic-gate 
40840Sstevel@tonic-gate 	if (trysend) {
40850Sstevel@tonic-gate 		sctp_output(sctp);
40860Sstevel@tonic-gate 		if (sctp->sctp_cxmit_list != NULL)
40870Sstevel@tonic-gate 			sctp_wput_asconf(sctp, NULL);
40880Sstevel@tonic-gate 	}
40890Sstevel@tonic-gate 	/* If there is unsent data, make sure a timer is running */
40900Sstevel@tonic-gate 	if (sctp->sctp_unsent > 0 && !sctp->sctp_current->timer_running) {
40910Sstevel@tonic-gate 		SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current,
40920Sstevel@tonic-gate 		    sctp->sctp_current->rto);
40930Sstevel@tonic-gate 	}
40940Sstevel@tonic-gate 
40950Sstevel@tonic-gate done:
40960Sstevel@tonic-gate 	if (dups != NULL)
40970Sstevel@tonic-gate 		freeb(dups);
40980Sstevel@tonic-gate 	if (ipsec_mp != NULL)
40990Sstevel@tonic-gate 		freeb(ipsec_mp);
41000Sstevel@tonic-gate 	freemsg(mp);
41010Sstevel@tonic-gate 
41020Sstevel@tonic-gate 	if (wake_eager) {
41030Sstevel@tonic-gate 		/*
41040Sstevel@tonic-gate 		 * sctp points to newly created control block, need to
41050Sstevel@tonic-gate 		 * release it before exiting.  Before releasing it and
41060Sstevel@tonic-gate 		 * processing the sendq, need to grab a hold on it.
41070Sstevel@tonic-gate 		 * Otherwise, another thread can close it while processing
41080Sstevel@tonic-gate 		 * the sendq.
41090Sstevel@tonic-gate 		 */
41100Sstevel@tonic-gate 		SCTP_REFHOLD(sctp);
41110Sstevel@tonic-gate 		WAKE_SCTP(sctp);
41120Sstevel@tonic-gate 		sctp_process_sendq(sctp);
41130Sstevel@tonic-gate 		SCTP_REFRELE(sctp);
41140Sstevel@tonic-gate 	}
41150Sstevel@tonic-gate }
41160Sstevel@tonic-gate 
41170Sstevel@tonic-gate /*
41180Sstevel@tonic-gate  * Some amount of data got removed from rx q.
41190Sstevel@tonic-gate  * Check if we should send a window update.
41200Sstevel@tonic-gate  *
41210Sstevel@tonic-gate  * Due to way sctp_rwnd updates are made, ULP can give reports out-of-order.
41220Sstevel@tonic-gate  * To keep from dropping incoming data due to this, we only update
41230Sstevel@tonic-gate  * sctp_rwnd when if it's larger than what we've reported to peer earlier.
41240Sstevel@tonic-gate  */
41250Sstevel@tonic-gate void
41260Sstevel@tonic-gate sctp_recvd(sctp_t *sctp, int len)
41270Sstevel@tonic-gate {
41280Sstevel@tonic-gate 	int32_t old, new;
41290Sstevel@tonic-gate 
41300Sstevel@tonic-gate 	ASSERT(sctp != NULL);
41310Sstevel@tonic-gate 	RUN_SCTP(sctp);
41320Sstevel@tonic-gate 
41330Sstevel@tonic-gate 	if (len < sctp->sctp_rwnd) {
41340Sstevel@tonic-gate 		WAKE_SCTP(sctp);
41350Sstevel@tonic-gate 		return;
41360Sstevel@tonic-gate 	}
41370Sstevel@tonic-gate 	ASSERT(sctp->sctp_rwnd >= sctp->sctp_rxqueued);
41380Sstevel@tonic-gate 	old = sctp->sctp_rwnd - sctp->sctp_rxqueued;
41390Sstevel@tonic-gate 	new = len - sctp->sctp_rxqueued;
41400Sstevel@tonic-gate 	sctp->sctp_rwnd = len;
41410Sstevel@tonic-gate 
41420Sstevel@tonic-gate 	if (sctp->sctp_state >= SCTPS_ESTABLISHED &&
41430Sstevel@tonic-gate 	    ((old <= new >> 1) || (old < sctp->sctp_mss))) {
41440Sstevel@tonic-gate 		sctp->sctp_force_sack = 1;
41450Sstevel@tonic-gate 		BUMP_MIB(&sctp_mib, sctpOutWinUpdate);
41460Sstevel@tonic-gate 		sctp_sack(sctp, NULL);
41470Sstevel@tonic-gate 		old = 1;
41480Sstevel@tonic-gate 	} else {
41490Sstevel@tonic-gate 		old = 0;
41500Sstevel@tonic-gate 	}
41510Sstevel@tonic-gate 	WAKE_SCTP(sctp);
41520Sstevel@tonic-gate 	if (old > 0) {
41530Sstevel@tonic-gate 		sctp_process_sendq(sctp);
41540Sstevel@tonic-gate 	}
41550Sstevel@tonic-gate }
4156