xref: /netbsd-src/sys/netinet/sctp_indata.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: sctp_indata.c,v 1.2 2015/12/13 18:53:57 christos Exp $ */
2 /*	$KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	*/
3 
4 /*
5  * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: sctp_indata.c,v 1.2 2015/12/13 18:53:57 christos Exp $");
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_ipsec.h"
38 #include "opt_inet.h"
39 #include "opt_sctp.h"
40 #endif /* _KERNEL_OPT */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/if.h>
51 #include <net/route.h>
52 
53 
54 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
55 #include <sys/limits.h>
56 #else
57 #include <machine/limits.h>
58 #endif
59 #include <machine/cpu.h>
60 
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/ip.h>
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #endif /* INET6 */
67 #include <netinet/in_pcb.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip_var.h>
70 #ifdef INET6
71 #include <netinet6/ip6_var.h>
72 #endif /* INET6 */
73 #include <netinet/ip_icmp.h>
74 #include <netinet/icmp_var.h>
75 #include <netinet/sctp_var.h>
76 #include <netinet/sctp_pcb.h>
77 #include <netinet/sctp_header.h>
78 #include <netinet/sctputil.h>
79 #include <netinet/sctp_output.h>
80 #include <netinet/sctp_input.h>
81 #include <netinet/sctp_hashdriver.h>
82 #include <netinet/sctp_indata.h>
83 #include <netinet/sctp_uio.h>
84 #include <netinet/sctp_timer.h>
85 #ifdef IPSEC
86 #include <netinet6/ipsec.h>
87 #include <netkey/key.h>
88 #endif /*IPSEC*/
89 
90 #include <net/net_osdep.h>
91 
92 #ifdef SCTP_DEBUG
93 extern u_int32_t sctp_debug_on;
94 #endif
95 
96 /*
97  * NOTES: On the outbound side of things I need to check the sack timer to
98  * see if I should generate a sack into the chunk queue (if I have data to
99  * send that is and will be sending it .. for bundling.
100  *
101  * The callback in sctp_usrreq.c will get called when the socket is read
102  * from. This will cause sctp_service_queues() to get called on the top
103  * entry in the list.
104  */
105 
106 extern int sctp_strict_sacks;
107 
108 void
109 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
110 {
111 	u_int32_t calc, calc_w_oh;
112 
113 #ifdef SCTP_DEBUG
114 	if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
115 		printf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
116 		       (u_long)stcb->sctp_socket->so_rcv.sb_cc,
117 		       (u_long)stcb->sctp_socket->so_rcv.sb_hiwat,
118 		       (u_long)stcb->sctp_socket->so_rcv.sb_lowat,
119 		       (u_long)stcb->sctp_socket->so_rcv.sb_mbcnt,
120 		       (u_long)stcb->sctp_socket->so_rcv.sb_mbmax);
121 		printf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
122 		       sctp_sbspace(&stcb->sctp_socket->so_rcv),
123 		       asoc->size_on_delivery_queue,
124 		       asoc->size_on_reasm_queue,
125 		       asoc->size_on_all_streams);
126 	}
127 #endif
128 	if (stcb->sctp_socket->so_rcv.sb_cc == 0 &&
129 	    asoc->size_on_delivery_queue == 0 &&
130 	    asoc->size_on_reasm_queue == 0 &&
131 	    asoc->size_on_all_streams == 0) {
132 		/* Full rwnd granted */
133 		asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat,
134 				    SCTP_MINIMAL_RWND);
135 		return;
136 	}
137 	/* get actual space */
138 	calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
139 
140 	/* take out what has NOT been put on socket queue and
141 	 * we yet hold for putting up.
142 	 */
143 	calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
144 	calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
145 	calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
146 
147 	/* what is the overhead of all these rwnd's */
148 	calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
149 
150 	asoc->my_rwnd = calc;
151 	if (calc_w_oh == 0) {
152 		/* If our overhead is greater than the advertised
153 		 * rwnd, we clamp the rwnd to 1. This lets us
154 		 * still accept inbound segments, but hopefully will
155 		 * shut the sender down when he finally gets the message.
156 		 */
157  		asoc->my_rwnd = 1;
158 	} else {
159 		/* SWS threshold */
160 		if (asoc->my_rwnd &&
161 		    (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
162 			/* SWS engaged, tell peer none left */
163 			asoc->my_rwnd = 1;
164 #ifdef SCTP_DEBUG
165 			if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
166 				printf(" - SWS zeros\n");
167 			}
168 		} else {
169 			if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
170 				printf("\n");
171 			}
172 #endif
173 		}
174 	}
175 }
176 
177 /*
178  * Take a chk structure and build it into an mbuf. Hmm should we change things
179  * so that instead we store the data side in a chunk?
180  */
181 static struct mbuf *
182 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
183     uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
184 {
185 	struct sctp_sndrcvinfo *outinfo;
186 	struct cmsghdr *cmh;
187 	struct mbuf *ret;
188 
189 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
190 		/* user does not want the sndrcv ctl */
191 		return (NULL);
192 	}
193 
194 	MGETHDR(ret, M_DONTWAIT, MT_CONTROL);
195 	if (ret == NULL) {
196 		/* No space */
197 		return (ret);
198 	}
199 	/* We need a CMSG header followed by the struct  */
200 	cmh = mtod(ret, struct cmsghdr *);
201 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
202 	cmh->cmsg_level = IPPROTO_SCTP;
203 	cmh->cmsg_type = SCTP_SNDRCV;
204 	cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
205 	outinfo->sinfo_stream = stream_no;
206 	outinfo->sinfo_ssn = stream_seq;
207 	if (flags & SCTP_DATA_UNORDERED) {
208 		outinfo->sinfo_flags = MSG_UNORDERED;
209 	} else {
210 		outinfo->sinfo_flags = 0;
211 	}
212 	outinfo->sinfo_ppid = ppid;
213 	outinfo->sinfo_context = context;
214 	outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
215 	outinfo->sinfo_tsn = tsn;
216 	outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
217 	ret->m_len = cmh->cmsg_len;
218 	ret->m_pkthdr.len = ret->m_len;
219 	/*
220 	 * We track how many control len's have gone upon the sb
221 	 * and do not count these in the rwnd calculation.
222 	 */
223 	stcb->asoc.my_rwnd_control_len +=
224 	    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
225 
226 	return (ret);
227 }
228 
229 /*
230  * Take a chk structure and build it into an mbuf.  Should we change things
231  * so that instead we store the data side in a chunk?
232  */
233 static
234 struct mbuf *
235 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
236 {
237 	struct sctp_sndrcvinfo *outinfo;
238 	struct cmsghdr *cmh;
239 	struct mbuf *ret;
240 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
241 		/* user does not want the sndrcv ctl */
242 		return (NULL);
243 	}
244 	MGET(ret, M_DONTWAIT, MT_CONTROL);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 
250 	/* We need a CMSG header followed by the struct  */
251 	cmh = mtod(ret, struct cmsghdr *);
252 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
253 	cmh->cmsg_level = IPPROTO_SCTP;
254 	cmh->cmsg_type = SCTP_SNDRCV;
255 	cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
256 	outinfo->sinfo_stream = chk->rec.data.stream_number;
257 	outinfo->sinfo_ssn = chk->rec.data.stream_seq;
258 	if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
259 		outinfo->sinfo_flags = MSG_UNORDERED;
260 	} else {
261 		outinfo->sinfo_flags = 0;
262 	}
263 	outinfo->sinfo_ppid = chk->rec.data.payloadtype;
264 	outinfo->sinfo_context = chk->rec.data.context;
265 	outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
266 	outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
267 	outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
268 	ret->m_len = cmh->cmsg_len;
269 	stcb->asoc.my_rwnd_control_len +=
270 	    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
271 
272 	return (ret);
273 }
274 
275 int
276 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
277     struct sctp_tmit_chunk *chk, int hold_locks)
278 {
279 	struct mbuf *control, *m;
280 	int free_it;
281 	struct sockaddr_in6 sin6;
282 	const struct sockaddr *to;
283 
284 #ifdef SCTP_DEBUG
285 	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
286 		printf("I am now in Deliver data! (%p)\n", chk);
287 	}
288 #endif
289 	/* get a write lock on the inp if not already */
290 	if (hold_locks == 0) {
291 		SCTP_TCB_UNLOCK(stcb);
292 		SCTP_INP_WLOCK(stcb->sctp_ep);
293 		SCTP_TCB_LOCK(stcb);
294 	}
295 	free_it = 0;
296 	/* We always add it to the queue */
297 	if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
298 		/* socket above is long gone */
299 #ifdef SCTP_DEBUG
300 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
301 			printf("gone is gone!\n");
302 		}
303 #endif
304 		if (chk != NULL) {
305 			if (chk->data)
306 				sctp_m_freem(chk->data);
307 			chk->data = NULL;
308 			sctp_free_remote_addr(chk->whoTo);
309 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
310 			sctppcbinfo.ipi_count_chunk--;
311 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
312 				panic("Chunk count is negative");
313 			}
314 			sctppcbinfo.ipi_gencnt_chunk++;
315 		}
316 		TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
317 			asoc->size_on_delivery_queue -= chk->send_size;
318 			asoc->cnt_on_delivery_queue--;
319 			/*
320 			 * Lose the data pointer, since its in the socket buffer
321 			 */
322 			if (chk->data)
323 				sctp_m_freem(chk->data);
324 			chk->data = NULL;
325 			/* Now free the address and data */
326 			sctp_free_remote_addr(chk->whoTo);
327 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
328 			sctppcbinfo.ipi_count_chunk--;
329 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
330 				panic("Chunk count is negative");
331 			}
332 			sctppcbinfo.ipi_gencnt_chunk++;
333 		}
334 		if (hold_locks == 0) {
335 			SCTP_INP_WUNLOCK(stcb->sctp_ep);
336 		}
337 		return (0);
338 	}
339 	if (chk != NULL) {
340 		TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
341 		asoc->size_on_delivery_queue += chk->send_size;
342 		asoc->cnt_on_delivery_queue++;
343 	}
344 	if (asoc->fragmented_delivery_inprogress) {
345 		/*
346 		 * oh oh, fragmented delivery in progress
347 		 * return out of here.
348 		 */
349 #ifdef SCTP_DEBUG
350 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
351 			printf("Fragmented delivery in progress?\n");
352 		}
353 #endif
354 		if (hold_locks == 0) {
355 			SCTP_INP_WUNLOCK(stcb->sctp_ep);
356 		}
357 		return (0);
358 	}
359 	/* Now grab the first one  */
360 	chk = TAILQ_FIRST(&asoc->delivery_queue);
361 	if (chk == NULL) {
362 		/* Nothing in queue */
363 #ifdef SCTP_DEBUG
364 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
365 			printf("Nothing in queue?\n");
366 		}
367 #endif
368 		asoc->size_on_delivery_queue = 0;
369 		asoc->cnt_on_delivery_queue = 0;
370 		if (hold_locks == 0) {
371 			SCTP_INP_WUNLOCK(stcb->sctp_ep);
372 		}
373 		return (0);
374 	}
375 
376 	if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
377 		/* Boy, there really is NO room */
378 		if (hold_locks == 0) {
379 			SCTP_INP_WUNLOCK(stcb->sctp_ep);
380 		}
381 		return (0);
382 	}
383 #ifdef SCTP_DEBUG
384 	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
385 		printf("Now to the delivery with chk(%p)!\n", chk);
386 	}
387 #endif
388 	/* XXX need to append PKTHDR to the socket buffer first */
389 	if ((chk->data->m_flags & M_PKTHDR) == 0) {
390 		MGETHDR(m, M_DONTWAIT, MT_DATA);
391 		if (m == NULL) {
392 			/* no room! */
393 			if (hold_locks == 0) {
394 				SCTP_INP_WUNLOCK(stcb->sctp_ep);
395 			}
396 			return (0);
397 		}
398 		m->m_pkthdr.len = chk->send_size;
399 		m->m_len = 0;
400 		m->m_next = chk->data;
401 		chk->data = m;
402 	}
403 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
404 		if (chk->data->m_next == NULL) {
405 			/* hopefully we hit here most of the time */
406 			chk->data->m_flags |= M_EOR;
407 		} else {
408 			/* Add the flag to the LAST mbuf in the chain */
409 			m = chk->data;
410 			while (m->m_next != NULL) {
411 				m = m->m_next;
412 			}
413 			m->m_flags |= M_EOR;
414 		}
415 	}
416 
417 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 		struct sockaddr_in6 lsa6;
419 
420 		control = sctp_build_ctl(stcb, chk);
421 		to = rtcache_getdst(&chk->whoTo->ro);
422 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
423 		    to->sa_family == AF_INET) {
424 			const struct sockaddr_in *sin;
425 
426 			sin = (const struct sockaddr_in *)to;
427 			memset(&sin6, 0, sizeof(sin6));
428 			sin6.sin6_family = AF_INET6;
429 			sin6.sin6_len = sizeof(struct sockaddr_in6);
430 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
431 			bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
432 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
433 			sin6.sin6_port = sin->sin_port;
434 			to = (struct sockaddr *)&sin6;
435 		}
436 		/* check and strip embedded scope junk */
437 		to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
438 		    &lsa6);
439 		if (((const struct sockaddr_in *)to)->sin_port == 0) {
440 			printf("Huh a, port is %d not net:%p %d?\n",
441 			       ((const struct sockaddr_in *)to)->sin_port,
442 			       chk->whoTo,
443 			       (int)(ntohs(stcb->rport)));
444 			/*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
445 			/* XXX */
446 		}
447 		if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
448 			/* Gak not enough room */
449 			if (control) {
450 				sctp_m_freem(control);
451 				stcb->asoc.my_rwnd_control_len -=
452 				    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
453 			}
454 			goto skip;
455 		}
456 		if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
457 		    to, chk->data, control, stcb->asoc.my_vtag,
458 		    stcb->sctp_ep)) {
459 			/* Gak not enough room */
460 			if (control) {
461 				sctp_m_freem(control);
462 				stcb->asoc.my_rwnd_control_len -=
463 				    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
464 			}
465 		} else {
466 			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
467 				if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
468 					stcb->asoc.my_rwnd_control_len +=
469 						sizeof(struct mbuf);
470 				}
471 			} else {
472 				stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
473 			}
474 			free_it = 1;
475 		}
476 	} else {
477 		/* append to a already started message. */
478 		if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
479 		    (long)chk->send_size) {
480 			sbappend(&stcb->sctp_socket->so_rcv, chk->data);
481 			free_it = 1;
482 		}
483 	}
484  skip:
485 	if (hold_locks == 0) {
486 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
487 	}
488 	/* free up the one we inserted */
489 	if (free_it) {
490 		/* Pull it off the queue */
491 #ifdef SCTP_DEBUG
492 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
493 			printf("Free_it true, doing tickle wakeup\n");
494 		}
495 #endif
496 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
497 		TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
498 		asoc->size_on_delivery_queue -= chk->send_size;
499 		asoc->cnt_on_delivery_queue--;
500 		/* Lose the data pointer, since its in the socket buffer */
501 		chk->data = NULL;
502 		/* Now free the address and data */
503 		sctp_free_remote_addr(chk->whoTo);
504 		SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
505 		sctppcbinfo.ipi_count_chunk--;
506 		if ((int)sctppcbinfo.ipi_count_chunk < 0) {
507 			panic("Chunk count is negative");
508 		}
509 		sctppcbinfo.ipi_gencnt_chunk++;
510 	}
511 	return (free_it);
512 }
513 
514 /*
515  * We are delivering currently from the reassembly queue. We must continue to
516  * deliver until we either:
517  * 1) run out of space.
518  * 2) run out of sequential TSN's
519  * 3) hit the SCTP_DATA_LAST_FRAG flag.
520  */
521 static void
522 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
523 {
524 	const struct sockaddr *to;
525 	struct sockaddr_in6 sin6;
526 	struct sctp_tmit_chunk *chk, *at;
527 	struct mbuf *control, *m;
528 	u_int16_t nxt_todel;
529 	u_int16_t stream_no;
530 	int cntDel;
531 	cntDel = stream_no = 0;
532 	if (hold_locks == 0) {
533 		/*
534 		 * you always have the TCB lock, we need
535 		 * to have the inp write lock as well.
536 		 */
537 		SCTP_TCB_UNLOCK(stcb);
538 		SCTP_INP_WLOCK(stcb->sctp_ep);
539 		SCTP_TCB_LOCK(stcb);
540 	}
541 	if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
542 		/* socket above is long gone */
543 		asoc->fragmented_delivery_inprogress = 0;
544 		TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
545 			asoc->size_on_delivery_queue -= chk->send_size;
546 			asoc->cnt_on_delivery_queue--;
547 			/*
548 			 * Lose the data pointer, since its in the socket buffer
549 			 */
550 			if (chk->data)
551 				sctp_m_freem(chk->data);
552 			chk->data = NULL;
553 			/* Now free the address and data */
554 			sctp_free_remote_addr(chk->whoTo);
555 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
556 			sctppcbinfo.ipi_count_chunk--;
557 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
558 				panic("Chunk count is negative");
559 			}
560 			sctppcbinfo.ipi_gencnt_chunk++;
561 		}
562 		if (hold_locks == 0) {
563 			SCTP_INP_WUNLOCK(stcb->sctp_ep);
564 		}
565 		return;
566 	}
567 	do {
568 		if (stcb->sctp_socket->so_rcv.sb_cc >=
569 		    stcb->sctp_socket->so_rcv.sb_hiwat) {
570 			if (cntDel) {
571 				sctp_sorwakeup(stcb->sctp_ep,
572 					       stcb->sctp_socket);
573 			}
574 			if (hold_locks == 0) {
575 				SCTP_INP_WUNLOCK(stcb->sctp_ep);
576 			}
577 			return;
578 		}
579 		chk = TAILQ_FIRST(&asoc->reasmqueue);
580 		if (chk == NULL) {
581 			if (cntDel) {
582 				sctp_sorwakeup(stcb->sctp_ep,
583 					       stcb->sctp_socket);
584 			}
585 			if (hold_locks == 0) {
586 				SCTP_INP_WUNLOCK(stcb->sctp_ep);
587 			}
588 			return;
589 		}
590 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
591 			/* Can't deliver more :< */
592 			if (cntDel) {
593 				sctp_sorwakeup(stcb->sctp_ep,
594 					       stcb->sctp_socket);
595 			}
596 			if (hold_locks == 0) {
597 				SCTP_INP_WUNLOCK(stcb->sctp_ep);
598 			}
599 			return;
600 		}
601 		stream_no = chk->rec.data.stream_number;
602 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
603 		if (nxt_todel != chk->rec.data.stream_seq &&
604 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
605 			/*
606 			 * Not the next sequence to deliver in its stream OR
607 			 * unordered
608 			 */
609 			if (cntDel) {
610 				sctp_sorwakeup(stcb->sctp_ep,
611 					       stcb->sctp_socket);
612 			}
613 			if (hold_locks == 0) {
614 				SCTP_INP_WUNLOCK(stcb->sctp_ep);
615 			}
616 			return;
617 		}
618 
619 		if ((chk->data->m_flags & M_PKTHDR) == 0) {
620 			MGETHDR(m, M_DONTWAIT, MT_DATA);
621 			if (m == NULL) {
622 				/* no room! */
623 				if (hold_locks == 0) {
624 					SCTP_INP_WUNLOCK(stcb->sctp_ep);
625 				}
626 				return;
627 			}
628 			m->m_pkthdr.len = chk->send_size;
629 			m->m_len = 0;
630 			m->m_next = chk->data;
631 			chk->data = m;
632 		}
633 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
634 			if (chk->data->m_next == NULL) {
635 				/* hopefully we hit here most of the time */
636 				chk->data->m_flags |= M_EOR;
637 			} else {
638 				/* Add the flag to the LAST mbuf in the chain */
639 				m = chk->data;
640 				while (m->m_next != NULL) {
641 					m = m->m_next;
642 				}
643 				m->m_flags |= M_EOR;
644 			}
645 		}
646 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
647 			struct sockaddr_in6 lsa6;
648 
649 			control = sctp_build_ctl(stcb, chk);
650 			to = rtcache_getdst(&chk->whoTo->ro);
651 			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
652 			    to->sa_family == AF_INET) {
653 				const struct sockaddr_in *sin;
654 
655 				sin = satocsin(to);
656 				memset(&sin6, 0, sizeof(sin6));
657 				sin6.sin6_family = AF_INET6;
658 				sin6.sin6_len = sizeof(struct sockaddr_in6);
659 				sin6.sin6_addr.s6_addr16[2] = 0xffff;
660 				bcopy(&sin->sin_addr,
661 				      &sin6.sin6_addr.s6_addr16[3],
662 				      sizeof(sin6.sin6_addr.s6_addr16[3]));
663 				sin6.sin6_port = sin->sin_port;
664 				to = (struct sockaddr *)&sin6;
665 			}
666 			/* check and strip embedded scope junk */
667 			to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
668 								   &lsa6);
669 			if (((const struct sockaddr_in *)to)->sin_port == 0) {
670 				printf("Huh b, port is %d not net:%p %d?\n",
671 				       ((const struct sockaddr_in *)to)->sin_port,
672 				       chk->whoTo,
673 				       (int)(ntohs(stcb->rport)));
674 				/*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
675 				/* XXX */
676 			}
677 			if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
678 			    (long)chk->send_size) {
679 				if (control) {
680 					sctp_m_freem(control);
681 					stcb->asoc.my_rwnd_control_len -=
682 						CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
683 				}
684 				sctp_sorwakeup(stcb->sctp_ep,
685 					       stcb->sctp_socket);
686 				if (hold_locks == 0) {
687 					SCTP_INP_WUNLOCK(stcb->sctp_ep);
688 				}
689 				return;
690 			}
691 			if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
692 						  to, chk->data, control, stcb->asoc.my_vtag,
693 						  stcb->sctp_ep)) {
694 				/* Gak not enough room */
695 				if (control) {
696 					sctp_m_freem(control);
697 					stcb->asoc.my_rwnd_control_len -=
698 						CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
699 				}
700 				sctp_sorwakeup(stcb->sctp_ep,
701 					       stcb->sctp_socket);
702 				if (hold_locks == 0) {
703 					SCTP_INP_WUNLOCK(stcb->sctp_ep);
704 				}
705 				return;
706 			}
707 			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
708 				if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
709 					stcb->asoc.my_rwnd_control_len +=
710 						sizeof(struct mbuf);
711 				}
712 			} else {
713 				stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
714 			}
715 			cntDel++;
716 		} else {
717 			if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
718 			    (long)chk->send_size) {
719 				sbappend(&stcb->sctp_socket->so_rcv, chk->data);
720 				cntDel++;
721 			} else {
722 				/* out of space in the sb */
723 				sctp_sorwakeup(stcb->sctp_ep,
724 					       stcb->sctp_socket);
725 				if (hold_locks == 0) {
726 					SCTP_INP_WUNLOCK(stcb->sctp_ep);
727 				}
728 				return;
729 			}
730 		}
731 		/* pull it we did it */
732 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
733 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
734 			asoc->fragmented_delivery_inprogress = 0;
735 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
736 				asoc->strmin[stream_no].last_sequence_delivered++;
737 			}
738 		}
739 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
740 		asoc->size_on_reasm_queue -= chk->send_size;
741 		asoc->cnt_on_reasm_queue--;
742 		/* free up the chk */
743 		sctp_free_remote_addr(chk->whoTo);
744 		chk->data = NULL;
745 		SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
746 		sctppcbinfo.ipi_count_chunk--;
747 		if ((int)sctppcbinfo.ipi_count_chunk < 0) {
748 			panic("Chunk count is negative");
749 		}
750 		sctppcbinfo.ipi_gencnt_chunk++;
751 		if (asoc->fragmented_delivery_inprogress == 0) {
752 			/*
753 			 * Now lets see if we can deliver the next one on the
754 			 * stream
755 			 */
756 			/*u_int16_t nxt_todel;*/
757 			struct sctp_stream_in *strm;
758 
759 			strm = &asoc->strmin[stream_no];
760 			nxt_todel = strm->last_sequence_delivered + 1;
761 			chk = TAILQ_FIRST(&strm->inqueue);
762 			if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
763 				while (chk != NULL) {
764 					/* all delivered */
765 					if (nxt_todel ==
766 					    chk->rec.data.stream_seq) {
767 						at = TAILQ_NEXT(chk, sctp_next);
768 						TAILQ_REMOVE(&strm->inqueue,
769 							     chk, sctp_next);
770 						asoc->size_on_all_streams -=
771 							chk->send_size;
772 						asoc->cnt_on_all_streams--;
773 						strm->last_sequence_delivered++;
774 						/*
775 						 * We ignore the return of
776 						 * deliver_data here since we
777 						 * always can hold the chunk on
778 						 * the d-queue. And we have a
779 						 * finite number that can be
780 						 * delivered from the strq.
781 						 */
782 						sctp_deliver_data(stcb, asoc, chk, 1);
783 						chk = at;
784 					} else {
785 						break;
786 					}
787 					nxt_todel =
788 						strm->last_sequence_delivered + 1;
789 				}
790 			}
791 			if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
792 				/* Here if deliver_data fails, we must break */
793 				if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
794 					break;
795 			}
796 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
797 			if (hold_locks == 0) {
798 				SCTP_INP_WUNLOCK(stcb->sctp_ep);
799 			}
800 			return;
801 		}
802 		chk = TAILQ_FIRST(&asoc->reasmqueue);
803 	} while (chk);
804 	if (cntDel) {
805 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
806 	}
807 	if (hold_locks == 0) {
808 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
809 	}
810 }
811 
812 /*
813  * Queue the chunk either right into the socket buffer if it is the next one
814  * to go OR put it in the correct place in the delivery queue.  If we do
815  * append to the so_buf, keep doing so until we are out of order.
816  * One big question still remains, what to do when the socket buffer is FULL??
817  */
818 static void
819 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
820     struct sctp_tmit_chunk *chk, int *abort_flag)
821 {
822 	struct sctp_stream_in *strm;
823 	struct sctp_tmit_chunk *at;
824 	int queue_needed;
825 	u_int16_t nxt_todel;
826 	struct mbuf *oper;
827 
828 /*** FIX FIX FIX ???
829  * Need to add code to deal with 16 bit seq wrap
830  * without a TSN wrap for ordered delivery (maybe).
831  * FIX FIX FIX ???
832  */
833 	queue_needed = 1;
834 	asoc->size_on_all_streams += chk->send_size;
835 	asoc->cnt_on_all_streams++;
836 	strm = &asoc->strmin[chk->rec.data.stream_number];
837 	nxt_todel = strm->last_sequence_delivered + 1;
838 #ifdef SCTP_STR_LOGGING
839 	sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
840 #endif
841 #ifdef SCTP_DEBUG
842 	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
843 		printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
844 		    (u_int)chk->rec.data.stream_seq,
845 		    (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
846 	}
847 #endif
848 	if (compare_with_wrap(strm->last_sequence_delivered,
849 	    chk->rec.data.stream_seq, MAX_SEQ) ||
850 	    (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
851 		/* The incoming sseq is behind where we last delivered? */
852 #ifdef SCTP_DEBUG
853 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
854 			printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
855 			    chk->rec.data.stream_seq,
856 			    strm->last_sequence_delivered);
857 		}
858 #endif
859 		/*
860 		 * throw it in the stream so it gets cleaned up in
861 		 * association destruction
862 		 */
863 		TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
864 		MGET(oper, M_DONTWAIT, MT_DATA);
865 		if (oper) {
866 			struct sctp_paramhdr *ph;
867 			u_int32_t *ippp;
868 
869 			oper->m_len = sizeof(struct sctp_paramhdr) +
870 			    sizeof(*ippp);
871 			ph = mtod(oper, struct sctp_paramhdr *);
872 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
873 			ph->param_length = htons(oper->m_len);
874 			ippp = (u_int32_t *)(ph + 1);
875 			*ippp = htonl(0x00000001);
876 		}
877 		sctp_abort_an_association(stcb->sctp_ep, stcb,
878 		    SCTP_PEER_FAULTY, oper);
879 
880 		*abort_flag = 1;
881 		return;
882 
883 	}
884 	if (nxt_todel == chk->rec.data.stream_seq) {
885 		/* can be delivered right away */
886 #ifdef SCTP_DEBUG
887 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
888 			printf("It's NEXT!\n");
889 		}
890 #endif
891 #ifdef SCTP_STR_LOGGING
892 		sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
893 #endif
894 		queue_needed = 0;
895 		asoc->size_on_all_streams -= chk->send_size;
896 		asoc->cnt_on_all_streams--;
897 		strm->last_sequence_delivered++;
898 		sctp_deliver_data(stcb, asoc, chk, 0);
899 		chk = TAILQ_FIRST(&strm->inqueue);
900 		while (chk != NULL) {
901 			/* all delivered */
902 			nxt_todel = strm->last_sequence_delivered + 1;
903 			if (nxt_todel == chk->rec.data.stream_seq) {
904 				at = TAILQ_NEXT(chk, sctp_next);
905 				TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
906 				asoc->size_on_all_streams -= chk->send_size;
907 				asoc->cnt_on_all_streams--;
908 				strm->last_sequence_delivered++;
909 				/*
910 				 * We ignore the return of deliver_data here
911 				 * since we always can hold the chunk on the
912 				 * d-queue. And we have a finite number that
913 				 * can be delivered from the strq.
914 				 */
915 #ifdef SCTP_STR_LOGGING
916 				sctp_log_strm_del(chk, NULL,
917 				    SCTP_STR_LOG_FROM_IMMED_DEL);
918 #endif
919 				sctp_deliver_data(stcb, asoc, chk, 0);
920 				chk = at;
921 				continue;
922 			}
923 			break;
924 		}
925 	}
926 	if (queue_needed) {
927 		/*
928 		 * Ok, we did not deliver this guy, find
929 		 * the correct place to put it on the queue.
930 		 */
931 #ifdef SCTP_DEBUG
932 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
933 			printf("Queue Needed!\n");
934 		}
935 #endif
936 		if (TAILQ_EMPTY(&strm->inqueue)) {
937 			/* Empty queue */
938 #ifdef SCTP_STR_LOGGING
939 			sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
940 #endif
941 			TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
942 		} else {
943 			TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
944 				if (compare_with_wrap(at->rec.data.stream_seq,
945 				    chk->rec.data.stream_seq, MAX_SEQ)) {
946 					/*
947 					 * one in queue is bigger than the new
948 					 * one, insert before this one
949 					 */
950 #ifdef SCTP_STR_LOGGING
951 					sctp_log_strm_del(chk, at,
952 					    SCTP_STR_LOG_FROM_INSERT_MD);
953 #endif
954 					TAILQ_INSERT_BEFORE(at, chk, sctp_next);
955 					break;
956 				} else if (at->rec.data.stream_seq ==
957 				    chk->rec.data.stream_seq) {
958 					/*
959 					 * Gak, He sent me a duplicate str seq
960 					 * number
961 					 */
962 					/*
963 					 * foo bar, I guess I will just free
964 					 * this new guy, should we abort too?
965 					 * FIX ME MAYBE? Or it COULD be that
966 					 * the SSN's have wrapped. Maybe I
967 					 * should compare to TSN somehow...
968 					 * sigh for now just blow away the
969 					 * chunk!
970 					 */
971 
972 					if (chk->data)
973 						sctp_m_freem(chk->data);
974 					chk->data = NULL;
975 					asoc->size_on_all_streams -= chk->send_size;
976 					asoc->cnt_on_all_streams--;
977 					sctp_pegs[SCTP_DUP_SSN_RCVD]++;
978 					sctp_free_remote_addr(chk->whoTo);
979 					SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
980 					sctppcbinfo.ipi_count_chunk--;
981 					if ((int)sctppcbinfo.ipi_count_chunk <
982 					    0) {
983 						panic("Chunk count is negative");
984 					}
985 					sctppcbinfo.ipi_gencnt_chunk++;
986 					return;
987 				} else {
988 					if (TAILQ_NEXT(at, sctp_next) == NULL) {
989 						/*
990 						 * We are at the end, insert it
991 						 * after this one
992 						 */
993 #ifdef SCTP_STR_LOGGING
994 						sctp_log_strm_del(chk, at,
995 						    SCTP_STR_LOG_FROM_INSERT_TL);
996 #endif
997 						TAILQ_INSERT_AFTER(&strm->inqueue,
998 						    at, chk, sctp_next);
999 						break;
1000 					}
1001 				}
1002 			}
1003 		}
1004 	} else {
1005 		/* We delivered some chunks, wake them up */
1006 
1007 #ifdef SCTP_DEBUG
1008 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1009 			printf("Doing WAKEUP!\n");
1010 		}
1011 #endif
1012 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1013 	}
1014 }
1015 
1016 /*
1017  * Returns two things: You get the total size of the deliverable parts of the
1018  * first fragmented message on the reassembly queue. And you get a 1 back if
1019  * all of the message is ready or a 0 back if the message is still incomplete
1020  */
1021 static int
1022 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1023 {
1024 	struct sctp_tmit_chunk *chk;
1025 	u_int32_t tsn;
1026 
1027 	*t_size = 0;
1028 	chk = TAILQ_FIRST(&asoc->reasmqueue);
1029 	if (chk == NULL) {
1030 		/* nothing on the queue */
1031 		return (0);
1032 	}
1033 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1034 		/* Not a first on the queue */
1035 		return (0);
1036 	}
1037 	tsn = chk->rec.data.TSN_seq;
1038 	while (chk) {
1039 		if (tsn != chk->rec.data.TSN_seq) {
1040 			return (0);
1041 		}
1042 		*t_size += chk->send_size;
1043 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1044 			return (1);
1045 		}
1046 		tsn++;
1047 		chk = TAILQ_NEXT(chk, sctp_next);
1048 	}
1049 	return (0);
1050 }
1051 
1052 /*
1053  * Dump onto the re-assembly queue, in its proper place. After dumping on
1054  * the queue, see if anthing can be delivered. If so pull it off (or as much
1055  * as we can. If we run out of space then we must dump what we can and set
1056  * the appropriate flag to say we queued what we could.
1057  */
1058 static void
1059 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1060     struct sctp_tmit_chunk *chk, int *abort_flag)
1061 {
1062 	struct mbuf *oper;
1063 	u_int16_t nxt_todel;
1064 	u_int32_t cum_ackp1, prev_tsn, post_tsn;
1065 	int tsize;
1066 	struct sctp_tmit_chunk *at, *prev, *next;
1067 
1068 	prev = next = NULL;
1069 	cum_ackp1 = asoc->tsn_last_delivered + 1;
1070 
1071 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1072 		/* This is the first one on the queue */
1073 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1074 		/*
1075 		 * we do not check for delivery of anything when
1076 		 * only one fragment is here
1077 		 */
1078 		asoc->size_on_reasm_queue = chk->send_size;
1079 		asoc->cnt_on_reasm_queue++;
1080 		if (chk->rec.data.TSN_seq == cum_ackp1) {
1081 			if (asoc->fragmented_delivery_inprogress == 0  &&
1082 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1083 			    SCTP_DATA_FIRST_FRAG) {
1084 				/*
1085 				 * An empty queue, no delivery inprogress, we
1086 				 * hit the next one and it does NOT have a
1087 				 * FIRST fragment mark.
1088 				 */
1089 #ifdef SCTP_DEBUG
1090 				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1091 					printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1092 				}
1093 #endif
1094 				MGET(oper, M_DONTWAIT, MT_DATA);
1095 				if (oper) {
1096 					struct sctp_paramhdr *ph;
1097 					u_int32_t *ippp;
1098 
1099 					oper->m_len =
1100 					    sizeof(struct sctp_paramhdr) +
1101 					    sizeof(*ippp);
1102 					ph = mtod(oper, struct sctp_paramhdr *);
1103 					ph->param_type =
1104 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1105 					ph->param_length = htons(oper->m_len);
1106 					ippp = (u_int32_t *)(ph + 1);
1107 					*ippp = htonl(0x10000001);
1108 				}
1109 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1110 				    SCTP_PEER_FAULTY, oper);
1111 				*abort_flag = 1;
1112 			} else if (asoc->fragmented_delivery_inprogress &&
1113 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1114 				/*
1115 				 * We are doing a partial delivery and the NEXT
1116 				 * chunk MUST be either the LAST or MIDDLE
1117 				 * fragment NOT a FIRST
1118 				 */
1119 #ifdef SCTP_DEBUG
1120 				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1121 					printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1122 				}
1123 #endif
1124 				MGET(oper, M_DONTWAIT, MT_DATA);
1125 				if (oper) {
1126 					struct sctp_paramhdr *ph;
1127 					u_int32_t *ippp;
1128 
1129 					oper->m_len =
1130 					    sizeof(struct sctp_paramhdr) +
1131 					    sizeof(*ippp);
1132 					ph = mtod(oper, struct sctp_paramhdr *);
1133 					ph->param_type =
1134 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1135 					ph->param_length = htons(oper->m_len);
1136 					ippp = (u_int32_t *)(ph + 1);
1137 					*ippp = htonl(0x10000002);
1138 				}
1139 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1140 				    SCTP_PEER_FAULTY, oper);
1141 				*abort_flag = 1;
1142 			} else if (asoc->fragmented_delivery_inprogress) {
1143 				/* Here we are ok with a MIDDLE or LAST piece */
1144 				if (chk->rec.data.stream_number !=
1145 				    asoc->str_of_pdapi) {
1146 					/* Got to be the right STR No */
1147 #ifdef SCTP_DEBUG
1148 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1149 						printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1150 						    chk->rec.data.stream_number,
1151 						    asoc->str_of_pdapi);
1152 					}
1153 #endif
1154 					MGET(oper, M_DONTWAIT, MT_DATA);
1155 					if (oper) {
1156 						struct sctp_paramhdr *ph;
1157 						u_int32_t *ippp;
1158 						oper->m_len =
1159 						    sizeof(struct sctp_paramhdr) +
1160 						    sizeof(*ippp);
1161 						ph = mtod(oper,
1162 						    struct sctp_paramhdr *);
1163 						ph->param_type =
1164 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1165 						ph->param_length =
1166 						    htons(oper->m_len);
1167 						ippp = (u_int32_t *)(ph + 1);
1168 						*ippp = htonl(0x10000003);
1169 					}
1170 					sctp_abort_an_association(stcb->sctp_ep,
1171 					    stcb, SCTP_PEER_FAULTY, oper);
1172 					*abort_flag = 1;
1173 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1174 				    SCTP_DATA_UNORDERED &&
1175 				    chk->rec.data.stream_seq !=
1176 				    asoc->ssn_of_pdapi) {
1177 					/* Got to be the right STR Seq */
1178 #ifdef SCTP_DEBUG
1179 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1180 						printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1181 						    chk->rec.data.stream_seq,
1182 						    asoc->ssn_of_pdapi);
1183 					}
1184 #endif
1185 					MGET(oper, M_DONTWAIT, MT_DATA);
1186 					if (oper) {
1187 						struct sctp_paramhdr *ph;
1188 						u_int32_t *ippp;
1189 						oper->m_len =
1190 						    sizeof(struct sctp_paramhdr) +
1191 						    sizeof(*ippp);
1192 						ph = mtod(oper,
1193 						    struct sctp_paramhdr *);
1194 						ph->param_type =
1195 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1196 						ph->param_length =
1197 						    htons(oper->m_len);
1198 						ippp = (u_int32_t *)(ph + 1);
1199 						*ippp = htonl(0x10000004);
1200 					}
1201 					sctp_abort_an_association(stcb->sctp_ep,
1202 					    stcb, SCTP_PEER_FAULTY, oper);
1203 					*abort_flag = 1;
1204 				}
1205 			}
1206 		}
1207 		return;
1208 	}
1209 	/* Find its place */
1210 	at = TAILQ_FIRST(&asoc->reasmqueue);
1211 
1212 	/* Grab the top flags */
1213 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1214 		if (compare_with_wrap(at->rec.data.TSN_seq,
1215 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1216 			/*
1217 			 * one in queue is bigger than the new one, insert
1218 			 * before this one
1219 			 */
1220 			/* A check */
1221 			asoc->size_on_reasm_queue += chk->send_size;
1222 			asoc->cnt_on_reasm_queue++;
1223 			next = at;
1224 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1225 			break;
1226 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1227 			/* Gak, He sent me a duplicate str seq number */
1228 			/*
1229 			 * foo bar, I guess I will just free this new guy,
1230 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1231 			 * that the SSN's have wrapped. Maybe I should compare
1232 			 * to TSN somehow... sigh for now just blow away the
1233 			 * chunk!
1234 			 */
1235 			if (chk->data)
1236 				sctp_m_freem(chk->data);
1237 			chk->data = NULL;
1238 			sctp_free_remote_addr(chk->whoTo);
1239 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1240 			sctppcbinfo.ipi_count_chunk--;
1241 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1242 				panic("Chunk count is negative");
1243 			}
1244 			sctppcbinfo.ipi_gencnt_chunk++;
1245 			return;
1246 		} else {
1247 			prev = at;
1248 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1249 				/*
1250 				 * We are at the end, insert it after this one
1251 				 */
1252 				/* check it first */
1253 				asoc->size_on_reasm_queue += chk->send_size;
1254 				asoc->cnt_on_reasm_queue++;
1255 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1256 				break;
1257 			}
1258 		}
1259 	}
1260 	/* Now the audits */
1261 	if (prev) {
1262 		prev_tsn = chk->rec.data.TSN_seq - 1;
1263 		if (prev_tsn == prev->rec.data.TSN_seq) {
1264 			/*
1265 			 * Ok the one I am dropping onto the end
1266 			 * is the NEXT. A bit of valdiation here.
1267 			 */
1268 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1269 			    SCTP_DATA_FIRST_FRAG ||
1270 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1271 			    SCTP_DATA_MIDDLE_FRAG) {
1272 				/*
1273 				 * Insert chk MUST be a MIDDLE or LAST fragment
1274 				 */
1275 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1276 				    SCTP_DATA_FIRST_FRAG) {
1277 #ifdef SCTP_DEBUG
1278 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1279 						printf("Prev check - It can be a midlle or last but not a first\n");
1280 						printf("Gak, Evil plot, it's a FIRST!\n");
1281 					}
1282 #endif
1283 					MGET(oper, M_DONTWAIT, MT_DATA);
1284 					if (oper) {
1285 						struct sctp_paramhdr *ph;
1286 						u_int32_t *ippp;
1287 
1288 						oper->m_len =
1289 						    sizeof(struct sctp_paramhdr) +
1290 						    sizeof(*ippp);
1291 						ph = mtod(oper,
1292 						    struct sctp_paramhdr *);
1293 						ph->param_type =
1294 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1295 						ph->param_length =
1296 						    htons(oper->m_len);
1297 
1298 						ippp = (u_int32_t *)(ph + 1);
1299 						*ippp = htonl(0x10000005);
1300 					}
1301 					sctp_abort_an_association(stcb->sctp_ep,
1302 					    stcb, SCTP_PEER_FAULTY, oper);
1303 					*abort_flag = 1;
1304 					return;
1305 				}
1306 				if (chk->rec.data.stream_number !=
1307 				    prev->rec.data.stream_number) {
1308 					/*
1309 					 * Huh, need the correct STR here, they
1310 					 * must be the same.
1311 					 */
1312 #ifdef SCTP_DEBUG
1313 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1314 						printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1315 						    chk->rec.data.stream_number,
1316 						    prev->rec.data.stream_number);
1317 					}
1318 #endif
1319 					MGET(oper, M_DONTWAIT, MT_DATA);
1320 					if (oper) {
1321 						struct sctp_paramhdr *ph;
1322 						u_int32_t *ippp;
1323 
1324 						oper->m_len =
1325 						    sizeof(struct sctp_paramhdr) +
1326 						    sizeof(*ippp);
1327 						ph = mtod(oper,
1328 						    struct sctp_paramhdr *);
1329 						ph->param_type =
1330 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1331 						ph->param_length =
1332 						    htons(oper->m_len);
1333 						ippp = (u_int32_t *)(ph + 1);
1334 						*ippp = htonl(0x10000006);
1335 					}
1336 
1337 					sctp_abort_an_association(stcb->sctp_ep,
1338 					    stcb, SCTP_PEER_FAULTY, oper);
1339 
1340 					*abort_flag = 1;
1341 					return;
1342 				}
1343 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1344 				    chk->rec.data.stream_seq !=
1345 				    prev->rec.data.stream_seq) {
1346 					/*
1347 					 * Huh, need the correct STR here, they
1348 					 * must be the same.
1349 					 */
1350 #ifdef SCTP_DEBUG
1351 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1352 						printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1353 						    chk->rec.data.stream_seq,
1354 						    prev->rec.data.stream_seq);
1355 					}
1356 #endif
1357 					MGET(oper, M_DONTWAIT, MT_DATA);
1358 					if (oper) {
1359 						struct sctp_paramhdr *ph;
1360 						u_int32_t *ippp;
1361 
1362 						oper->m_len =
1363 						    sizeof(struct sctp_paramhdr) +
1364 						    sizeof(*ippp);
1365 						ph = mtod(oper,
1366 						    struct sctp_paramhdr *);
1367 						ph->param_type =
1368 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1369 						ph->param_length =
1370 						    htons(oper->m_len);
1371 						ippp = (u_int32_t *)(ph + 1);
1372 						*ippp = htonl(0x10000007);
1373 					}
1374 
1375 					sctp_abort_an_association(stcb->sctp_ep,
1376 					    stcb, SCTP_PEER_FAULTY, oper);
1377 
1378 					*abort_flag = 1;
1379 					return;
1380 				}
1381 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1382 			    SCTP_DATA_LAST_FRAG) {
1383 				/* Insert chk MUST be a FIRST */
1384 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 				    SCTP_DATA_FIRST_FRAG) {
1386 #ifdef SCTP_DEBUG
1387 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1388 						printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1389 					}
1390 #endif
1391 					MGET(oper, M_DONTWAIT, MT_DATA);
1392 					if (oper) {
1393 						struct sctp_paramhdr *ph;
1394 						u_int32_t *ippp;
1395 
1396 						oper->m_len =
1397 						    sizeof(struct sctp_paramhdr) +
1398 						    sizeof(*ippp);
1399 						ph = mtod(oper,
1400 						    struct sctp_paramhdr *);
1401 						ph->param_type =
1402 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1403 						ph->param_length =
1404 						    htons(oper->m_len);
1405 						ippp = (u_int32_t *)(ph + 1);
1406 						*ippp = htonl(0x10000008);
1407 					}
1408 
1409 					sctp_abort_an_association(stcb->sctp_ep,
1410 					    stcb, SCTP_PEER_FAULTY, oper);
1411 
1412 					*abort_flag = 1;
1413 					return;
1414 				}
1415 			}
1416 		}
1417 	}
1418 
1419 	if (next) {
1420 		post_tsn = chk->rec.data.TSN_seq + 1;
1421 		if (post_tsn == next->rec.data.TSN_seq) {
1422 			/*
1423 			 * Ok the one I am inserting ahead of
1424 			 * is my NEXT one. A bit of valdiation here.
1425 			 */
1426 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1427 				/* Insert chk MUST be a last fragment */
1428 				if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1429 				   != SCTP_DATA_LAST_FRAG) {
1430 #ifdef SCTP_DEBUG
1431 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1432 						printf("Next chk - Next is FIRST, we must be LAST\n");
1433 						printf("Gak, Evil plot, its not a last!\n");
1434 					}
1435 #endif
1436 					MGET(oper, M_DONTWAIT, MT_DATA);
1437 					if (oper) {
1438 						struct sctp_paramhdr *ph;
1439 						u_int32_t *ippp;
1440 
1441 						oper->m_len =
1442 						    sizeof(struct sctp_paramhdr) +
1443 						    sizeof(*ippp);
1444 						ph = mtod(oper,
1445 						    struct sctp_paramhdr *);
1446 						ph->param_type =
1447 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1448 						ph->param_length =
1449 						    htons(oper->m_len);
1450 						ippp = (u_int32_t *)(ph + 1);
1451 						*ippp = htonl(0x10000009);
1452 					}
1453 
1454 					sctp_abort_an_association(stcb->sctp_ep,
1455 					    stcb, SCTP_PEER_FAULTY, oper);
1456 
1457 					*abort_flag = 1;
1458 					return;
1459 				}
1460 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1461 			    SCTP_DATA_MIDDLE_FRAG ||
1462 			    (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1463 			    SCTP_DATA_LAST_FRAG) {
1464 				/* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1465 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1466 				    SCTP_DATA_LAST_FRAG) {
1467 #ifdef SCTP_DEBUG
1468 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1469 						printf("Next chk - Next is a MIDDLE/LAST\n");
1470 						printf("Gak, Evil plot, new prev chunk is a LAST\n");
1471 					}
1472 #endif
1473 					MGET(oper, M_DONTWAIT, MT_DATA);
1474 					if (oper) {
1475 						struct sctp_paramhdr *ph;
1476 						u_int32_t *ippp;
1477 
1478 						oper->m_len =
1479 						    sizeof(struct sctp_paramhdr) +
1480 						    sizeof(*ippp);
1481 						ph = mtod(oper,
1482 						    struct sctp_paramhdr *);
1483 						ph->param_type =
1484 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1485 						ph->param_length =
1486 						    htons(oper->m_len);
1487 						ippp = (u_int32_t *)(ph + 1);
1488 						*ippp = htonl(0x1000000a);
1489 					}
1490 					sctp_abort_an_association(stcb->sctp_ep,
1491 					    stcb, SCTP_PEER_FAULTY, oper);
1492 
1493 					*abort_flag = 1;
1494 					return;
1495 				}
1496 				if (chk->rec.data.stream_number !=
1497 				    next->rec.data.stream_number) {
1498 					/*
1499 					 * Huh, need the correct STR here, they
1500 					 * must be the same.
1501 					 */
1502 #ifdef SCTP_DEBUG
1503 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1504 						printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1505 						    chk->rec.data.stream_number,
1506 						    next->rec.data.stream_number);
1507 					}
1508 #endif
1509 					MGET(oper, M_DONTWAIT, MT_DATA);
1510 					if (oper) {
1511 						struct sctp_paramhdr *ph;
1512 						u_int32_t *ippp;
1513 
1514 						oper->m_len =
1515 						    sizeof(struct sctp_paramhdr) +
1516 						    sizeof(*ippp);
1517 						ph = mtod(oper,
1518 						    struct sctp_paramhdr *);
1519 						ph->param_type =
1520 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1521 						ph->param_length =
1522 						    htons(oper->m_len);
1523 						ippp = (u_int32_t *)(ph + 1);
1524 						*ippp = htonl(0x1000000b);
1525 					}
1526 
1527 					sctp_abort_an_association(stcb->sctp_ep,
1528 					    stcb, SCTP_PEER_FAULTY, oper);
1529 
1530 					*abort_flag = 1;
1531 					return;
1532 				}
1533 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1534 				    chk->rec.data.stream_seq !=
1535 				    next->rec.data.stream_seq) {
1536 					/*
1537 					 * Huh, need the correct STR here, they
1538 					 * must be the same.
1539 					 */
1540 #ifdef SCTP_DEBUG
1541 					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1542 						printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1543 						    chk->rec.data.stream_seq,
1544 						    next->rec.data.stream_seq);
1545 					}
1546 #endif
1547 					MGET(oper, M_DONTWAIT, MT_DATA);
1548 					if (oper) {
1549 						struct sctp_paramhdr *ph;
1550 						u_int32_t *ippp;
1551 
1552 						oper->m_len =
1553 						    sizeof(struct sctp_paramhdr) +
1554 						    sizeof(*ippp);
1555 						ph = mtod(oper,
1556 						    struct sctp_paramhdr *);
1557 						ph->param_type =
1558 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1559 						ph->param_length =
1560 						    htons(oper->m_len);
1561 						ippp = (u_int32_t *)(ph + 1);
1562 						*ippp = htonl(0x1000000c);
1563 					}
1564 
1565 					sctp_abort_an_association(stcb->sctp_ep,
1566 					    stcb, SCTP_PEER_FAULTY, oper);
1567 
1568 					*abort_flag = 1;
1569 					return;
1570 
1571 				}
1572 			}
1573 		}
1574 	}
1575 	/*
1576 	 * now that we have all in there place we must check a number of
1577 	 * things to see if we can send data to the ULP.
1578 	 */
1579 	/* we need to do some delivery, if we can */
1580 	chk = TAILQ_FIRST(&asoc->reasmqueue);
1581 	if (chk == NULL) {
1582 		/* Huh? */
1583 		asoc->size_on_reasm_queue = 0;
1584 		asoc->cnt_on_reasm_queue = 0;
1585 		return;
1586 	}
1587 	if (asoc->fragmented_delivery_inprogress == 0) {
1588 		nxt_todel =
1589 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1590 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1591 		    (nxt_todel == chk->rec.data.stream_seq ||
1592 		     (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1593 			/*
1594 			 * Yep the first one is here and its
1595 			 * ok to deliver but should we?
1596 			 */
1597 			if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1598 			    (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1599 			     (asoc->size_on_reasm_queue >=
1600 			      (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) &&
1601 			      tsize))) {
1602 				/*
1603 				 * Yes, we setup to
1604 				 * start reception, by backing down the TSN
1605 				 * just in case we can't deliver. If we
1606 				 */
1607 				asoc->fragmented_delivery_inprogress = 1;
1608 				asoc->tsn_last_delivered =
1609 				    chk->rec.data.TSN_seq - 1;
1610 				asoc->str_of_pdapi =
1611 				    chk->rec.data.stream_number;
1612 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1613 				asoc->fragment_flags = chk->rec.data.rcv_flags;
1614 				sctp_service_reassembly(stcb, asoc, 0);
1615 			}
1616 		}
1617 	} else {
1618 		sctp_service_reassembly(stcb, asoc, 0);
1619 	}
1620 }
1621 
1622 /*
1623  * This is an unfortunate routine. It checks to make sure a evil guy is not
1624  * stuffing us full of bad packet fragments. A broken peer could also do this
1625  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1626  * :< more cycles.
1627  */
1628 static int
1629 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1630     struct sctp_tmit_chunk *chk)
1631 {
1632 	struct sctp_tmit_chunk *at;
1633 	u_int32_t tsn_est;
1634 
1635 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1636 		if (compare_with_wrap(chk->rec.data.TSN_seq,
1637 		    at->rec.data.TSN_seq, MAX_TSN)) {
1638 			/* is it one bigger? */
1639 			tsn_est = at->rec.data.TSN_seq + 1;
1640 			if (tsn_est == chk->rec.data.TSN_seq) {
1641 				/* yep. It better be a last then*/
1642 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1643 				    SCTP_DATA_LAST_FRAG) {
1644 					/*
1645 					 * Ok this guy belongs next to a guy
1646 					 * that is NOT last, it should be a
1647 					 * middle/last, not a complete chunk.
1648 					 */
1649 					return (1);
1650 				} else {
1651 					/*
1652 					 * This guy is ok since its a LAST and
1653 					 * the new chunk is a fully self-
1654 					 * contained one.
1655 					 */
1656 					return (0);
1657 				}
1658 			}
1659 		} else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1660 			/* Software error since I have a dup? */
1661 			return (1);
1662 		} else {
1663 			/*
1664 			 * Ok, 'at' is larger than new chunk but does it
1665 			 * need to be right before it.
1666 			 */
1667 			tsn_est = chk->rec.data.TSN_seq + 1;
1668 			if (tsn_est == at->rec.data.TSN_seq) {
1669 				/* Yep, It better be a first */
1670 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1671 				    SCTP_DATA_FIRST_FRAG) {
1672 					return (1);
1673 				} else {
1674 					return (0);
1675 				}
1676 			}
1677 		}
1678 	}
1679 	return (0);
1680 }
1681 
1682 extern unsigned int sctp_max_chunks_on_queue;
1683 static int
1684 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1685     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1686     struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1687     int *break_flag, int last_chunk)
1688 {
1689 	/* Process a data chunk */
1690 	/*  struct sctp_tmit_chunk *chk;*/
1691 	struct sctp_tmit_chunk *chk;
1692 	u_int32_t tsn, gap;
1693 	struct mbuf *dmbuf;
1694 	int the_len;
1695 	u_int16_t strmno, strmseq;
1696 	struct mbuf *oper;
1697 
1698 	chk = NULL;
1699 	tsn = ntohl(ch->dp.tsn);
1700 #ifdef SCTP_MAP_LOGGING
1701 	sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1702 #endif
1703 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1704 	    asoc->cumulative_tsn == tsn) {
1705 		/* It is a duplicate */
1706 		sctp_pegs[SCTP_DUPTSN_RECVD]++;
1707 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1708 			/* Record a dup for the next outbound sack */
1709 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1710 			asoc->numduptsns++;
1711 		}
1712 		return (0);
1713 	}
1714 	/* Calculate the number of TSN's between the base and this TSN */
1715 	if (tsn >= asoc->mapping_array_base_tsn) {
1716 		gap  = tsn - asoc->mapping_array_base_tsn;
1717 	} else {
1718 		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1719 	}
1720 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1721 		/* Can't hold the bit in the mapping at max array, toss it */
1722 		return (0);
1723 	}
1724 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1725 		if (sctp_expand_mapping_array(asoc)) {
1726 			/* Can't expand, drop it */
1727 			return (0);
1728 		}
1729 	}
1730 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1731 		*high_tsn = tsn;
1732 	}
1733 	/* See if we have received this one already */
1734 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1735 		sctp_pegs[SCTP_DUPTSN_RECVD]++;
1736 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1737 			/* Record a dup for the next outbound sack */
1738 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1739 			asoc->numduptsns++;
1740 		}
1741 		if (!callout_pending(&asoc->dack_timer.timer)) {
1742 			/*
1743 			 * By starting the timer we assure that we
1744 			 * WILL sack at the end of the packet
1745 			 * when sctp_sack_check gets called.
1746 			 */
1747 			sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1748 			    stcb, NULL);
1749 		}
1750 		return (0);
1751 	}
1752 	/*
1753 	 * Check to see about the GONE flag, duplicates would cause
1754 	 * a sack to be sent up above
1755 	 */
1756 	if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1757 		/*
1758 		 * wait a minute, this guy is gone, there is no
1759 		 * longer a receiver. Send peer an ABORT!
1760 		 */
1761 		struct mbuf *op_err;
1762 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1763 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1764 		*abort_flag = 1;
1765 		return (0);
1766 	}
1767 	/*
1768 	 * Now before going further we see if there is room. If NOT then
1769 	 * we MAY let one through only IF this TSN is the one we are
1770 	 * waiting for on a partial delivery API.
1771 	 */
1772 
1773 	/* now do the tests */
1774 	if (((asoc->cnt_on_all_streams +
1775 	 asoc->cnt_on_delivery_queue +
1776 	 asoc->cnt_on_reasm_queue +
1777 	  asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1778 	   (((int)asoc->my_rwnd) <= 0)) {
1779 		/*
1780 		 * When we have NO room in the rwnd we check
1781 		 * to make sure the reader is doing its job...
1782 		 */
1783 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1784 			/* some to read, wake-up */
1785 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1786 		}
1787 		/* now is it in the mapping array of what we have accepted? */
1788 		if (compare_with_wrap(tsn,
1789 		    asoc->highest_tsn_inside_map, MAX_TSN)) {
1790 
1791 			/* Nope not in the valid range dump it */
1792 #ifdef SCTP_DEBUG
1793 			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1794 				printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1795 				    (u_long)tsn, (u_long)asoc->my_rwnd,
1796 				    sctp_sbspace(&stcb->sctp_socket->so_rcv),
1797 				    stcb->asoc.cnt_on_delivery_queue);
1798 			}
1799 #endif
1800 			sctp_set_rwnd(stcb, asoc);
1801 			if ((asoc->cnt_on_all_streams +
1802 			    asoc->cnt_on_delivery_queue +
1803 			    asoc->cnt_on_reasm_queue +
1804 			    asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1805 				sctp_pegs[SCTP_MSGC_DROP]++;
1806 			} else {
1807 				sctp_pegs[SCTP_RWND_DROPS]++;
1808 			}
1809 			*break_flag = 1;
1810 			return (0);
1811 		}
1812 	}
1813 	strmno = ntohs(ch->dp.stream_id);
1814 	if (strmno >= asoc->streamincnt) {
1815 		struct sctp_paramhdr *phdr;
1816 		struct mbuf *mb;
1817 
1818 		MGETHDR(mb, M_DONTWAIT, MT_DATA);
1819 		if (mb != NULL) {
1820 			/* add some space up front so prepend will work well */
1821 			mb->m_data += sizeof(struct sctp_chunkhdr);
1822 			phdr = mtod(mb, struct sctp_paramhdr *);
1823 			/*
1824 			 * Error causes are just param's and this one has
1825 			 * two back to back phdr, one with the error type
1826 			 * and size, the other with the streamid and a rsvd
1827 		 	 */
1828 			mb->m_pkthdr.len = mb->m_len =
1829 			    (sizeof(struct sctp_paramhdr) * 2);
1830 			phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1831 			phdr->param_length =
1832 			    htons(sizeof(struct sctp_paramhdr) * 2);
1833 			phdr++;
1834 			/* We insert the stream in the type field */
1835 			phdr->param_type = ch->dp.stream_id;
1836 			/* And set the length to 0 for the rsvd field */
1837 			phdr->param_length = 0;
1838 			sctp_queue_op_err(stcb, mb);
1839 		}
1840 		sctp_pegs[SCTP_BAD_STRMNO]++;
1841 		return (0);
1842 	}
1843 	/*
1844 	 * Before we continue lets validate that we are not
1845 	 * being fooled by an evil attacker. We can only
1846 	 * have 4k chunks based on our TSN spread allowed
1847 	 * by the mapping array 512 * 8 bits, so there is
1848 	 * no way our stream sequence numbers could have wrapped.
1849 	 * We of course only validate the FIRST fragment so the
1850 	 * bit must be set.
1851 	 */
1852 	strmseq = ntohs(ch->dp.stream_sequence);
1853 	if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1854 	    (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1855 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1856 	     strmseq, MAX_SEQ) ||
1857 	     asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1858 		/* The incoming sseq is behind where we last delivered? */
1859 #ifdef SCTP_DEBUG
1860 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1861 			printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1862 			    strmseq,
1863 			    asoc->strmin[strmno].last_sequence_delivered);
1864 		}
1865 #endif
1866 		/*
1867 		 * throw it in the stream so it gets cleaned up in
1868 		 * association destruction
1869 		 */
1870 		MGET(oper, M_DONTWAIT, MT_DATA);
1871 		if (oper) {
1872 			struct sctp_paramhdr *ph;
1873 			u_int32_t *ippp;
1874 
1875 			oper->m_len = sizeof(struct sctp_paramhdr) +
1876 			    sizeof(*ippp);
1877 			ph = mtod(oper, struct sctp_paramhdr *);
1878 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1879 			ph->param_length = htons(oper->m_len);
1880 			ippp = (u_int32_t *)(ph + 1);
1881 			*ippp = htonl(0x20000001);
1882 		}
1883 		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1884 		    oper);
1885 		sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1886 		*abort_flag = 1;
1887 		return (0);
1888 	}
1889 
1890 	the_len = (chk_length-sizeof(struct sctp_data_chunk));
1891 	if (last_chunk == 0) {
1892 		dmbuf = sctp_m_copym(*m,
1893 		    (offset + sizeof(struct sctp_data_chunk)),
1894 		    the_len, M_DONTWAIT);
1895 	} else {
1896 		/* We can steal the last chunk */
1897 		dmbuf = *m;
1898 		/* lop off the top part */
1899 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1900 		if (dmbuf->m_pkthdr.len > the_len) {
1901 			/* Trim the end round bytes off  too */
1902 			m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1903 		}
1904 		sctp_pegs[SCTP_NO_COPY_IN]++;
1905 	}
1906 	if (dmbuf == NULL) {
1907 		sctp_pegs[SCTP_DROP_NOMEMORY]++;
1908 		return (0);
1909 	}
1910 	if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1911 	    asoc->fragmented_delivery_inprogress == 0 &&
1912 	    TAILQ_EMPTY(&asoc->delivery_queue) &&
1913 	    ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1914 	     ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1915 	      TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1916 	    ((long)(stcb->sctp_socket->so_rcv.sb_hiwat -
1917 	            stcb->sctp_socket->so_rcv.sb_cc) >= (long)the_len)) {
1918 		/* Candidate for express delivery */
1919 		/*
1920 		 * Its not fragmented,
1921 		 * No PD-API is up,
1922 		 * Nothing in the delivery queue,
1923 		 * Its un-ordered OR ordered and the next to deliver AND
1924 		 * nothing else is stuck on the stream queue,
1925 		 * And there is room for it in the socket buffer.
1926 		 * Lets just stuff it up the buffer....
1927 		 */
1928 
1929 		struct mbuf *control, *mmm;
1930 		struct sockaddr_in6 sin6;
1931 		struct sockaddr_in6 lsa6;
1932 		const struct sockaddr *to;
1933 
1934 		/* It would be nice to avoid this copy if we could :< */
1935 		control = sctp_build_ctl_nchunk(stcb, tsn,
1936 		    ch->dp.protocol_id, 0, strmno, strmseq,
1937 		    ch->ch.chunk_flags);
1938 		/* XXX need to append PKTHDR to the socket buffer first */
1939 
1940 		if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1941 			struct mbuf *tmp;
1942 			MGETHDR(tmp, M_DONTWAIT, MT_DATA);
1943 			if (tmp == NULL) {
1944 
1945 				/* no room! */
1946 				if (control) {
1947 					sctp_m_freem(control);
1948 					stcb->asoc.my_rwnd_control_len -=
1949 					    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1950 				}
1951 
1952 				goto failed_express_del;
1953 			}
1954 			tmp->m_pkthdr.len = the_len;
1955 			tmp->m_len = 0;
1956 			tmp->m_next = dmbuf;
1957 			dmbuf = tmp;
1958 		}
1959 		to = rtcache_getdst(&net->ro);
1960 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1961 		    to->sa_family == AF_INET) {
1962 			const struct sockaddr_in *sin;
1963 
1964 			sin = satocsin(to);
1965 			memset(&sin6, 0, sizeof(sin6));
1966 			sin6.sin6_family = AF_INET6;
1967 			sin6.sin6_len = sizeof(struct sockaddr_in6);
1968 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
1969 			bcopy(&sin->sin_addr,
1970 			    &sin6.sin6_addr.s6_addr16[3],
1971 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
1972 			sin6.sin6_port = sin->sin_port;
1973 			to = (struct sockaddr *)&sin6;
1974 		}
1975 
1976 		/* check and strip embedded scope junk */
1977 		to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
1978 		    &lsa6);
1979 		if (((const struct sockaddr_in *)to)->sin_port == 0) {
1980 			printf("Huh c, port is %d not net:%p %d?\n",
1981 			       ((const struct sockaddr_in *)to)->sin_port,
1982 			       net,
1983 			       (int)(ntohs(stcb->rport)));
1984 			/*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
1985 			/* XXX */
1986 		}
1987 
1988 		mmm = dmbuf;
1989 		/* Mark the EOR */
1990 		while (mmm->m_next != NULL) {
1991 			mmm = mmm->m_next;
1992 		}
1993 		mmm->m_flags |= M_EOR;
1994 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1995 			/* we have a new high score */
1996 			asoc->highest_tsn_inside_map = tsn;
1997 #ifdef SCTP_MAP_LOGGING
1998 			sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1999 #endif
2000 		}
2001 		SCTP_TCB_UNLOCK(stcb);
2002 		SCTP_INP_WLOCK(stcb->sctp_ep);
2003 		SCTP_TCB_LOCK(stcb);
2004 		if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
2005 		    control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2006 			if (control) {
2007 				sctp_m_freem(control);
2008 				stcb->asoc.my_rwnd_control_len -=
2009 				    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
2010 			}
2011 			sctp_m_freem(dmbuf);
2012 			goto failed_express_del;
2013 		}
2014 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
2015 			if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2016 				stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2017 			}
2018 		} else {
2019 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2020 		}
2021 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2022 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2023 		if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2024 
2025 			/* for ordered, bump what we delivered */
2026 			asoc->strmin[strmno].last_sequence_delivered++;
2027 		}
2028 		sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2029 #ifdef SCTP_STR_LOGGING
2030 		sctp_log_strm_del_alt(tsn, strmseq,
2031 		    SCTP_STR_LOG_FROM_EXPRS_DEL);
2032 #endif
2033 #ifdef SCTP_DEBUG
2034 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2035 			printf("Express Delivery succeeds\n");
2036 		}
2037 #endif
2038 		goto finish_express_del;
2039 	}
2040 
2041  failed_express_del:
2042 	/* If we reach here this is a new chunk */
2043 	chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2044 	if (chk == NULL) {
2045 		/* No memory so we drop the chunk */
2046 		sctp_pegs[SCTP_DROP_NOMEMORY]++;
2047 		if (last_chunk == 0) {
2048 			/* we copied it, free the copy */
2049 			sctp_m_freem(dmbuf);
2050 		}
2051 		return (0);
2052 	}
2053 	sctppcbinfo.ipi_count_chunk++;
2054 	sctppcbinfo.ipi_gencnt_chunk++;
2055 	chk->rec.data.TSN_seq = tsn;
2056 	chk->rec.data.stream_seq = strmseq;
2057 	chk->rec.data.stream_number = strmno;
2058 	chk->rec.data.payloadtype = ch->dp.protocol_id;
2059 	chk->rec.data.context = 0;
2060 	chk->rec.data.doing_fast_retransmit = 0;
2061 	chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2062 	chk->asoc = asoc;
2063 	chk->send_size = the_len;
2064 	chk->whoTo = net;
2065 	net->ref_count++;
2066 	chk->data = dmbuf;
2067 
2068 
2069 	/* Mark it as received */
2070 	/* Now queue it where it belongs */
2071 	if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2072 	    SCTP_DATA_NOT_FRAG) {
2073 		/* First a sanity check */
2074 		if (asoc->fragmented_delivery_inprogress) {
2075 			/*
2076 			 * Ok, we have a fragmented delivery in progress
2077 			 * if this chunk is next to deliver OR belongs in
2078 			 * our view to the reassembly, the peer is evil
2079 			 * or broken.
2080 			 */
2081 			u_int32_t estimate_tsn;
2082 			estimate_tsn = asoc->tsn_last_delivered + 1;
2083 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2084 			    (estimate_tsn == chk->rec.data.TSN_seq)) {
2085 				/* Evil/Broke peer */
2086 				MGET(oper, M_DONTWAIT, MT_DATA);
2087 				if (oper) {
2088 					struct sctp_paramhdr *ph;
2089 					u_int32_t *ippp;
2090 
2091 					oper->m_len =
2092 					    sizeof(struct sctp_paramhdr) +
2093 					    sizeof(*ippp);
2094 					ph = mtod(oper, struct sctp_paramhdr *);
2095 					ph->param_type =
2096 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2097 					ph->param_length = htons(oper->m_len);
2098 					ippp = (u_int32_t *)(ph + 1);
2099 					*ippp = htonl(0x20000002);
2100 				}
2101 				sctp_abort_an_association(stcb->sctp_ep, stcb,
2102 				    SCTP_PEER_FAULTY, oper);
2103 
2104 				*abort_flag = 1;
2105 				sctp_pegs[SCTP_DROP_FRAG]++;
2106 				return (0);
2107 			} else {
2108 				if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2109 					MGET(oper, M_DONTWAIT, MT_DATA);
2110 					if (oper) {
2111 						struct sctp_paramhdr *ph;
2112 						u_int32_t *ippp;
2113 
2114 						oper->m_len =
2115 						    sizeof(struct sctp_paramhdr) +
2116 						    sizeof(*ippp);
2117 						ph = mtod(oper,
2118 						    struct sctp_paramhdr *);
2119 						ph->param_type =
2120 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2121 						ph->param_length =
2122 						    htons(oper->m_len);
2123 						ippp = (u_int32_t *)(ph + 1);
2124 						*ippp = htonl(0x20000003);
2125 					}
2126 					sctp_abort_an_association(stcb->sctp_ep,
2127 					    stcb, SCTP_PEER_FAULTY, oper);
2128 
2129 					*abort_flag = 1;
2130 					sctp_pegs[SCTP_DROP_FRAG]++;
2131 					return (0);
2132 				}
2133 			}
2134 		} else {
2135 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2136 				/*
2137 				 * Reassembly queue is NOT empty
2138 				 * validate that this chk does not need to
2139 				 * be in reasembly queue. If it does then
2140 				 * our peer is broken or evil.
2141 				 */
2142 				if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2143 					MGET(oper, M_DONTWAIT, MT_DATA);
2144 					if (oper) {
2145 						struct sctp_paramhdr *ph;
2146 						u_int32_t *ippp;
2147 
2148 						oper->m_len =
2149 						    sizeof(struct sctp_paramhdr) +
2150 						    sizeof(*ippp);
2151 						ph = mtod(oper,
2152 						    struct sctp_paramhdr *);
2153 						ph->param_type =
2154 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2155 						ph->param_length =
2156 						    htons(oper->m_len);
2157 						ippp = (u_int32_t *)(ph + 1);
2158 						*ippp = htonl(0x20000004);
2159 					}
2160 					sctp_abort_an_association(stcb->sctp_ep,
2161 					    stcb, SCTP_PEER_FAULTY, oper);
2162 
2163 					*abort_flag = 1;
2164 					sctp_pegs[SCTP_DROP_FRAG]++;
2165 					return (0);
2166 				}
2167 			}
2168 		}
2169 		if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2170 			/* queue directly into socket buffer */
2171 			sctp_deliver_data(stcb, asoc, chk, 0);
2172 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2173 		} else {
2174 			/* Special check for when streams are resetting.
2175 			 * We could be more smart about this and check the
2176 			 * actual stream to see if it is not being reset.. that
2177 			 * way we would not create a HOLB when amongst streams
2178 			 * being reset and those not being reset.
2179 			 *
2180 			 * We take complete messages that have a stream reset
2181 			 * intervening (aka the TSN is after where our cum-ack needs
2182 			 * to be) off and put them on a pending_reply_queue. The
2183 			 * reassembly ones we do not have to worry about since
2184 			 * they are all sorted and proceessed by TSN order. It
2185 			 * is only the singletons I must worry about.
2186 			 */
2187 			if ((asoc->pending_reply) &&
2188 			   ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2189 			    (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2190 				) {
2191 				/* yep its past where we need to reset... go ahead and
2192 				 * queue it.
2193 				 */
2194 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2195 			}  else {
2196 				sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2197 			}
2198 		}
2199 	} else {
2200 		/* Into the re-assembly queue */
2201 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2202 		if (*abort_flag) {
2203 			sctp_pegs[SCTP_DROP_FRAG]++;
2204 			return (0);
2205 		}
2206 	}
2207 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2208 		/* we have a new high score */
2209 		asoc->highest_tsn_inside_map = tsn;
2210 #ifdef SCTP_MAP_LOGGING
2211 		sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2212 #endif
2213 	}
2214  finish_express_del:
2215 	if (last_chunk) {
2216 		*m = NULL;
2217 	}
2218 	sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2219 	/* Set it present please */
2220 #ifdef SCTP_STR_LOGGING
2221 	sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2222 #endif
2223 #ifdef SCTP_MAP_LOGGING
2224 	sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2225 		     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2226 #endif
2227 	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2228 	return (1);
2229 }
2230 
2231 void
2232 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2233 {
2234 	/*
2235 	 * Now we also need to check the mapping array in a couple of ways.
2236 	 * 1) Did we move the cum-ack point?
2237 	 */
2238 	struct sctp_association *asoc;
2239 	int i, at;
2240 	int m_size, all_ones;
2241 	int slide_from, slide_end, lgap, distance;
2242 #ifdef SCTP_MAP_LOGGING
2243 	uint32_t old_cumack, old_base, old_highest;
2244 	unsigned char aux_array[64];
2245 #endif
2246 
2247 	asoc = &stcb->asoc;
2248 	at = 0;
2249 
2250 #ifdef SCTP_MAP_LOGGING
2251 	old_cumack = asoc->cumulative_tsn;
2252 	old_base = asoc->mapping_array_base_tsn;
2253 	old_highest = asoc->highest_tsn_inside_map;
2254 	if (asoc->mapping_array_size < 64)
2255 		memcpy(aux_array, asoc->mapping_array,
2256 		    asoc->mapping_array_size);
2257 	else
2258 		memcpy(aux_array, asoc->mapping_array, 64);
2259 #endif
2260 
2261 	/*
2262 	 * We could probably improve this a small bit by calculating the
2263 	 * offset of the current cum-ack as the starting point.
2264 	 */
2265 	all_ones = 1;
2266 	m_size = stcb->asoc.mapping_array_size << 3;
2267 	for (i = 0; i < m_size; i++) {
2268 		if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2269 			/*
2270 			 * Ok we found the first place that we are
2271 			 * missing a TSN.
2272 			 */
2273 			at = i;
2274 			all_ones = 0;
2275 			asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2276 			    (i - 1);
2277 			break;
2278 		}
2279 	}
2280 	if (compare_with_wrap(asoc->cumulative_tsn,
2281 			      asoc->highest_tsn_inside_map,
2282 			      MAX_TSN)) {
2283 		panic("huh, cumack greater than high-tsn in map");
2284 	}
2285 	if (all_ones ||
2286 	    (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2287 		/* The complete array was completed by a single FR */
2288 		/* higest becomes the cum-ack */
2289 		int clr;
2290 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2291 		/* clear the array */
2292 		if (all_ones)
2293 			clr = asoc->mapping_array_size;
2294 		else {
2295 			clr = (at >> 3) + 1;
2296 			/*
2297 			 * this should be the allones case
2298 			 * but just in case :>
2299 			 */
2300 			if (clr > asoc->mapping_array_size)
2301 				clr = asoc->mapping_array_size;
2302 		}
2303 		memset(asoc->mapping_array, 0, clr);
2304 		/* base becomes one ahead of the cum-ack */
2305 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2306 #ifdef SCTP_MAP_LOGGING
2307 		sctp_log_map(old_base, old_cumack, old_highest,
2308 		    SCTP_MAP_PREPARE_SLIDE);
2309 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2310 		    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2311 #endif
2312 	} else if (at >= 8) {
2313 		/* we can slide the mapping array down */
2314 		/* Calculate the new byte postion we can move down */
2315 		slide_from = at >> 3;
2316 		/* now calculate the ceiling of the move using our highest TSN value */
2317 		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2318 			lgap = asoc->highest_tsn_inside_map -
2319 			    asoc->mapping_array_base_tsn;
2320 		} else {
2321 			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2322 			    asoc->highest_tsn_inside_map + 1;
2323 		}
2324 		slide_end = lgap >> 3;
2325 		if (slide_end < slide_from) {
2326 			panic("impossible slide");
2327 		}
2328 		distance = (slide_end-slide_from) + 1;
2329 #ifdef SCTP_MAP_LOGGING
2330 		sctp_log_map(old_base, old_cumack, old_highest,
2331 		    SCTP_MAP_PREPARE_SLIDE);
2332 		sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2333 		    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2334 #endif
2335 		if (distance + slide_from > asoc->mapping_array_size ||
2336 		    distance < 0) {
2337 #ifdef SCTP_DEBUG
2338 			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2339 				printf("Ugh bad addition.. you can't hrumpp!\n");
2340 			}
2341 #endif
2342 			/*
2343 			 * Here we do NOT slide forward the array so that
2344 			 * hopefully when more data comes in to fill it up
2345 			 * we will be able to slide it forward. Really
2346 			 * I don't think this should happen :-0
2347 			 */
2348 
2349 #ifdef SCTP_MAP_LOGGING
2350 			sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2351 			    (uint32_t)asoc->mapping_array_size,
2352 			    SCTP_MAP_SLIDE_NONE);
2353 #endif
2354 		} else {
2355 			int ii;
2356 			for (ii = 0; ii < distance; ii++) {
2357 				asoc->mapping_array[ii] =
2358 				    asoc->mapping_array[slide_from + ii];
2359 			}
2360 			for (ii = distance;ii <= slide_end; ii++) {
2361 				asoc->mapping_array[ii] = 0;
2362 			}
2363 			asoc->mapping_array_base_tsn += (slide_from << 3);
2364 #ifdef SCTP_MAP_LOGGING
2365 			sctp_log_map(asoc->mapping_array_base_tsn,
2366 			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2367 			    SCTP_MAP_SLIDE_RESULT);
2368 #endif
2369 		}
2370 	}
2371 
2372         /* check the special flag for stream resets */
2373 	if ((asoc->pending_reply) &&
2374 	   ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2375 	    ((asoc->cumulative_tsn+1) ==  ntohl(asoc->pending_reply->reset_at_tsn)))
2376 		) {
2377 		/* we have finished working through the backlogged TSN's now
2378 		 * time to reset streams.
2379 		 * 1: call reset function.
2380 		 * 2: free pending_reply space
2381 		 * 3: distribute any chunks in pending_reply_queue.
2382 		 */
2383 		struct sctp_tmit_chunk *chk;
2384 		sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2385 		free(asoc->pending_reply, M_PCB);
2386 		asoc->pending_reply = NULL;
2387 		chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2388 		while (chk) {
2389 			TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2390 			sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2391 			if (*abort_flag) {
2392 				return;
2393 			}
2394 			chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2395 		}
2396 	}
2397 	/*
2398 	 * Now we need to see if we need to queue a sack or just start
2399 	 * the timer (if allowed).
2400 	 */
2401 	if (ok_to_sack) {
2402 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2403 			/*
2404 			 * Ok special case, in SHUTDOWN-SENT case.
2405 			 * here we maker sure SACK timer is off and
2406 			 * instead send a SHUTDOWN and a SACK
2407 			 */
2408 			if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2409 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2410 				    stcb->sctp_ep, stcb, NULL);
2411 			}
2412 #ifdef SCTP_DEBUG
2413 			if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2414 				printf("%s:%d sends a shutdown\n",
2415 				       __FILE__,
2416 				       __LINE__
2417 				       );
2418 			}
2419 #endif
2420 			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2421 			sctp_send_sack(stcb);
2422 		} else {
2423 			int is_a_gap;
2424 			/* is there a gap now ? */
2425 			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2426 			    stcb->asoc.cumulative_tsn, MAX_TSN);
2427 			if ((stcb->asoc.first_ack_sent == 0) ||	/* First time we send a sack */
2428 			    ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2429 			    (stcb->asoc.numduptsns) ||		/* we have dup's */
2430 			    (is_a_gap) ||			/* is still a gap */
2431 			    (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2432 				) {
2433 				/*
2434 			 	 * Ok we must build a SACK since the timer
2435 				 * is pending, we got our first packet OR
2436 				 * there are gaps or duplicates.
2437 				 */
2438 				stcb->asoc.first_ack_sent = 1;
2439 				sctp_send_sack(stcb);
2440 				/* The sending will stop the timer */
2441 			} else {
2442 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2443 				    stcb->sctp_ep, stcb, NULL);
2444 			}
2445 		}
2446 	}
2447 }
2448 
2449 void
2450 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2451 {
2452 	struct sctp_tmit_chunk *chk;
2453 	int tsize, cntDel;
2454 	u_int16_t nxt_todel;
2455 
2456 	cntDel = 0;
2457 	if (asoc->fragmented_delivery_inprogress) {
2458 		sctp_service_reassembly(stcb, asoc, hold_locks);
2459 	}
2460 	/* Can we proceed further, i.e. the PD-API is complete */
2461 	if (asoc->fragmented_delivery_inprogress) {
2462 		/* no */
2463 		return;
2464 	}
2465 
2466 	/*
2467 	 * Yes, reassembly delivery no longer in progress see if we
2468 	 * have some on the sb hold queue.
2469 	 */
2470 	do {
2471 		if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
2472 			if (cntDel == 0)
2473 				sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2474 			break;
2475 		}
2476 		/* If deliver_data says no we must stop */
2477 		if (sctp_deliver_data(stcb, asoc, (struct sctp_tmit_chunk *)NULL, hold_locks) == 0)
2478 			break;
2479 		cntDel++;
2480 		chk = TAILQ_FIRST(&asoc->delivery_queue);
2481 	} while (chk);
2482 	if (cntDel) {
2483 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2484 	}
2485 	/*
2486 	 * Now is there some other chunk I can deliver
2487 	 * from the reassembly queue.
2488 	 */
2489 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2490 	if (chk == NULL) {
2491 		asoc->size_on_reasm_queue = 0;
2492 		asoc->cnt_on_reasm_queue = 0;
2493 		return;
2494 	}
2495 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2496 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2497 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2498 	     (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2499 		/*
2500 		 * Yep the first one is here. We setup to
2501 		 * start reception, by backing down the TSN
2502 		 * just in case we can't deliver.
2503 		 */
2504 
2505 		/*
2506 		 * Before we start though either all of the
2507 		 * message should be here or 1/4 the socket buffer
2508 		 * max or nothing on the delivery queue and something
2509 		 * can be delivered.
2510 		 */
2511 		if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2512 		    (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2513 		     (asoc->size_on_reasm_queue >=
2514 		      (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) && tsize))) {
2515 			asoc->fragmented_delivery_inprogress = 1;
2516 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2517 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2518 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2519 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2520 			sctp_service_reassembly(stcb, asoc, hold_locks);
2521 		}
2522 	}
2523 }
2524 
2525 int
2526 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2527     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2528     struct sctp_nets *net, u_int32_t *high_tsn)
2529 {
2530 	struct sctp_data_chunk *ch, chunk_buf;
2531 	struct sctp_association *asoc;
2532 	int num_chunks = 0;	/* number of control chunks processed */
2533 	int chk_length, break_flag, last_chunk;
2534 	int abort_flag = 0, was_a_gap = 0;
2535 	struct mbuf *m;
2536 
2537 	/* set the rwnd */
2538 	sctp_set_rwnd(stcb, &stcb->asoc);
2539 
2540 	m = *mm;
2541 	asoc = &stcb->asoc;
2542 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2543 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2544 		/* there was a gap before this data was processed */
2545 		was_a_gap = 1;
2546 	}
2547 	/*
2548 	 * setup where we got the last DATA packet from for
2549 	 * any SACK that may need to go out. Don't bump
2550 	 * the net. This is done ONLY when a chunk
2551 	 * is assigned.
2552 	 */
2553 	asoc->last_data_chunk_from = net;
2554 
2555 	/*
2556 	 * Now before we proceed we must figure out if this
2557 	 * is a wasted cluster... i.e. it is a small packet
2558 	 * sent in and yet the driver underneath allocated a
2559 	 * full cluster for it. If so we must copy it to a
2560 	 * smaller mbuf and free up the cluster mbuf. This
2561 	 * will help with cluster starvation.
2562 	 */
2563 	if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2564 		/* we only handle mbufs that are singletons.. not chains */
2565 		MGET(m, M_DONTWAIT, MT_DATA);
2566 		if (m) {
2567 			/* ok lets see if we can copy the data up */
2568 			vaddr_t *from, *to;
2569 
2570 			if ((*mm)->m_flags & M_PKTHDR) {
2571 				/* got to copy the header first */
2572 #ifdef __APPLE__
2573 				M_COPY_PKTHDR(m, (*mm));
2574 #else
2575 				M_MOVE_PKTHDR(m, (*mm));
2576 #endif
2577 			}
2578 			/* get the pointers and copy */
2579 			to = mtod(m, vaddr_t *);
2580 			from = mtod((*mm), vaddr_t *);
2581 			memcpy(to, from, (*mm)->m_len);
2582 			/* copy the length and free up the old */
2583 			m->m_len = (*mm)->m_len;
2584 			sctp_m_freem(*mm);
2585 			/* sucess, back copy */
2586 			*mm = m;
2587 		} else {
2588 			/* We are in trouble in the mbuf world .. yikes */
2589 			m = *mm;
2590 		}
2591 	}
2592 	/* get pointer to the first chunk header */
2593 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2594 	    sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2595 	if (ch == NULL) {
2596 		printf(" ... its short\n");
2597 		return (1);
2598 	}
2599 	/*
2600 	 * process all DATA chunks...
2601 	 */
2602 
2603 #ifdef SCTP_DEBUG
2604 	if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2605 		printf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2606 		    *offset, length, iphlen, (int)ch->ch.chunk_type);
2607 	}
2608 #endif
2609 
2610 	*high_tsn = asoc->cumulative_tsn;
2611 	break_flag = 0;
2612 	while (ch->ch.chunk_type == SCTP_DATA) {
2613 		/* validate chunk length */
2614 		chk_length = ntohs(ch->ch.chunk_length);
2615 		if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2616 		    length - *offset < chk_length) {
2617 			/*
2618 			 * Need to send an abort since we had a invalid
2619 			 * data chunk.
2620 			 */
2621 			struct mbuf *op_err;
2622 			MGET(op_err, M_DONTWAIT, MT_DATA);
2623 			if (op_err) {
2624 				struct sctp_paramhdr *ph;
2625 				u_int32_t *ippp;
2626 
2627 				op_err->m_len = sizeof(struct sctp_paramhdr) +
2628 				    sizeof(*ippp);
2629 				ph = mtod(op_err, struct sctp_paramhdr *);
2630 				ph->param_type =
2631 				    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2632 				ph->param_length = htons(op_err->m_len);
2633 				ippp = (u_int32_t *)(ph + 1);
2634 				*ippp = htonl(0x30000001);
2635 			}
2636 			sctp_abort_association(inp, stcb, m, iphlen, sh,
2637 			    op_err);
2638 			return (2);
2639 		}
2640 #ifdef SCTP_DEBUG
2641 		if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2642 			printf("A chunk of len:%d to process (tot:%d)\n",
2643 			    chk_length, length - *offset);
2644 		}
2645 #endif
2646 
2647 #ifdef SCTP_AUDITING_ENABLED
2648 		sctp_audit_log(0xB1, 0);
2649 #endif
2650 		if (SCTP_SIZE32(chk_length) == *offset - length) {
2651 			last_chunk = 1;
2652 		} else {
2653 			last_chunk = 0;
2654 		}
2655 		if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2656 		    chk_length, net, high_tsn, &abort_flag, &break_flag,
2657 		    last_chunk)) {
2658 			num_chunks++;
2659 #ifdef SCTP_DEBUG
2660 			if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2661 				printf("Now incr num_chunks to %d\n",
2662 				    num_chunks);
2663 			}
2664 #endif
2665 		}
2666 		if (abort_flag)
2667 			return (2);
2668 
2669 		if (break_flag) {
2670 			/*
2671 			 * Set because of out of rwnd space and no drop rep
2672 			 * space left.
2673 			 */
2674 			break;
2675 		}
2676 
2677 		*offset += SCTP_SIZE32(chk_length);
2678 		if (*offset >= length) {
2679 			/* no more data left in the mbuf chain */
2680 			break;
2681 		}
2682 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2683 		    sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2684 		if (ch == NULL) {
2685 			*offset = length;
2686 			break;
2687 		}
2688 	} /* while */
2689 	if (break_flag) {
2690 		/*
2691 		 * we need to report rwnd overrun drops.
2692 		 */
2693 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2694 	}
2695 	if (num_chunks) {
2696 		/*
2697 		 * Did we get data, if so update the time for
2698 		 * auto-close and give peer credit for being
2699 		 * alive.
2700 		 */
2701 		sctp_pegs[SCTP_DATA_DG_RECV]++;
2702 		stcb->asoc.overall_error_count = 0;
2703 		SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2704 	}
2705 	/* now service all of the reassm queue and delivery queue */
2706 	sctp_service_queues(stcb, asoc, 0);
2707 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2708 		/*
2709 		 * Assure that we ack right away by making
2710 		 * sure that a d-ack timer is running. So the
2711 		 * sack_check will send a sack.
2712 		 */
2713 		sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2714 		    net);
2715 	}
2716 	/* Start a sack timer or QUEUE a SACK for sending */
2717 	sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2718 	if (abort_flag)
2719 		return (2);
2720 
2721 	return (0);
2722 }
2723 
2724 static void
2725 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2726     struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2727     u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2728 {
2729 	/************************************************/
2730 	/* process fragments and update sendqueue        */
2731 	/************************************************/
2732 	struct sctp_sack *sack;
2733 	struct sctp_gap_ack_block *frag;
2734 	struct sctp_tmit_chunk *tp1;
2735 	int i;
2736 	unsigned int j;
2737 #ifdef SCTP_FR_LOGGING
2738 	int num_frs=0;
2739 #endif
2740 	uint16_t frag_strt, frag_end, primary_flag_set;
2741 	u_long last_frag_high;
2742 
2743 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2744 		primary_flag_set = 1;
2745 	} else {
2746 		primary_flag_set = 0;
2747 	}
2748 
2749 	sack = &ch->sack;
2750 	frag = (struct sctp_gap_ack_block *)((vaddr_t)sack +
2751 	    sizeof(struct sctp_sack));
2752 	tp1 = NULL;
2753 	last_frag_high = 0;
2754 	for (i = 0; i < num_seg; i++) {
2755 		frag_strt = ntohs(frag->start);
2756 		frag_end = ntohs(frag->end);
2757 		/* some sanity checks on the fargment offsets */
2758 		if (frag_strt > frag_end) {
2759 			/* this one is malformed, skip */
2760 			frag++;
2761 			continue;
2762 		}
2763 		if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2764 		    MAX_TSN))
2765 			*biggest_tsn_acked = frag_end+last_tsn;
2766 
2767 		/* mark acked dgs and find out the highestTSN being acked */
2768 		if (tp1 == NULL) {
2769 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2770 
2771 			/* save the locations of the last frags */
2772 			last_frag_high = frag_end + last_tsn;
2773 		} else {
2774 			/*
2775 			 * now lets see if we need to reset the queue
2776 			 * due to a out-of-order SACK fragment
2777 			 */
2778 			if (compare_with_wrap(frag_strt+last_tsn,
2779 			    last_frag_high, MAX_TSN)) {
2780 				/*
2781 				 * if the new frag starts after the last TSN
2782 				 * frag covered, we are ok
2783 				 * and this one is beyond the last one
2784 				 */
2785 				;
2786 			} else {
2787 				/*
2788 				 * ok, they have reset us, so we need to reset
2789 				 * the queue this will cause extra hunting but
2790 				 * hey, they chose the performance
2791 				 * hit when they failed to order there gaps..
2792 				 */
2793 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
2794 			}
2795 			last_frag_high = frag_end + last_tsn;
2796 		}
2797 		for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2798 			while (tp1) {
2799 #ifdef SCTP_FR_LOGGING
2800 				if (tp1->rec.data.doing_fast_retransmit)
2801 					num_frs++;
2802 #endif
2803 
2804 				if (tp1->rec.data.TSN_seq == j) {
2805 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2806 						/* must be held until cum-ack passes */
2807 						/* ECN Nonce: Add the nonce value to the sender's nonce sum */
2808 						if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2809 							/*
2810 							 * If it is less than
2811 							 * ACKED, it is now
2812 							 * no-longer in flight.
2813 							 * Higher values may
2814 							 * already be set via
2815 							 * previous Gap Ack
2816 							 * Blocks...
2817 							 * i.e. ACKED or MARKED.
2818 							 */
2819 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2820 							    *biggest_newly_acked_tsn,
2821 							    MAX_TSN)) {
2822 								*biggest_newly_acked_tsn =
2823 								    tp1->rec.data.TSN_seq;
2824 							}
2825 							sctp_flight_size_decrease(tp1);
2826 
2827 							sctp_total_flight_decrease(stcb, tp1);
2828 
2829 							if (tp1->snd_count < 2) {
2830 								/* True non-retransmited chunk */
2831 								tp1->whoTo->net_ack2 +=
2832 								    tp1->send_size;
2833 
2834 								/* update RTO too? */
2835 								if (tp1->do_rtt) {
2836 									tp1->whoTo->RTO =
2837 									    sctp_calculate_rto(stcb,
2838 									    asoc,
2839 									    tp1->whoTo,
2840 									    &tp1->sent_rcv_time);
2841 									tp1->whoTo->rto_pending = 0;
2842 									tp1->do_rtt = 0;
2843 								}
2844 							}
2845 						}
2846 						if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2847 						    tp1->sent != SCTP_DATAGRAM_UNSENT &&
2848 						    compare_with_wrap(tp1->rec.data.TSN_seq,
2849 						    asoc->this_sack_highest_gap,
2850 						    MAX_TSN)) {
2851 							asoc->this_sack_highest_gap =
2852 							    tp1->rec.data.TSN_seq;
2853 							if (primary_flag_set) {
2854 								tp1->whoTo->cacc_saw_newack = 1;
2855 							}
2856 						}
2857 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2858 #ifdef SCTP_DEBUG
2859 							if (sctp_debug_on &
2860 							    SCTP_DEBUG_INDATA3) {
2861 								printf("Hmm. one that is in RESEND that is now ACKED\n");
2862 							}
2863 #endif
2864 							sctp_ucount_decr(asoc->sent_queue_retran_cnt);
2865 #ifdef SCTP_AUDITING_ENABLED
2866 							sctp_audit_log(0xB2,
2867 							    (asoc->sent_queue_retran_cnt & 0x000000ff));
2868 #endif
2869 
2870 						}
2871 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2872 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2873 						tp1->sent = SCTP_DATAGRAM_MARKED;
2874 					}
2875 					break;
2876 				} /* if (tp1->TSN_seq == j) */
2877 				if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2878 				    MAX_TSN))
2879 					break;
2880 				tp1 = TAILQ_NEXT(tp1, sctp_next);
2881 			}/* end while (tp1) */
2882 		}  /* end for (j = fragStart */
2883 		frag++; /* next one */
2884 	}
2885 #ifdef SCTP_FR_LOGGING
2886 	if (num_frs)
2887 		sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2888 		    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2889 #endif
2890 }
2891 
2892 static void
2893 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2894     u_long biggest_tsn_acked)
2895 {
2896 	struct sctp_tmit_chunk *tp1;
2897 	int tot_revoked=0;
2898 
2899 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2900 	while (tp1) {
2901 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2902 		    MAX_TSN)) {
2903 			/*
2904 			 * ok this guy is either ACK or MARKED. If it is ACKED
2905 			 * it has been previously acked but not this time i.e.
2906 			 * revoked.  If it is MARKED it was ACK'ed again.
2907 			 */
2908 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2909 				/* it has been revoked */
2910 				/*
2911 				 * We do NOT add back to flight size here since
2912 				 * it is really NOT in flight. Resend (when/if
2913 				 * it occurs will add to flight size
2914 				 */
2915 				tp1->sent = SCTP_DATAGRAM_SENT;
2916 				tot_revoked++;
2917 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2918 				/* it has been re-acked in this SACK */
2919 				tp1->sent = SCTP_DATAGRAM_ACKED;
2920 			}
2921 		}
2922 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2923 		    MAX_TSN)) {
2924 			/* above the sack */
2925 			break;
2926 		}
2927 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2928 			break;
2929 		tp1 = TAILQ_NEXT(tp1, sctp_next);
2930 	}
2931 	if (tot_revoked > 0) {
2932 		/* Setup the ecn nonce re-sync point. We
2933 		 * do this since once data is revoked
2934 		 * we begin to retransmit things, which
2935 		 * do NOT have the ECN bits set. This means
2936 		 * we are now out of sync and must wait until
2937 		 * we get back in sync with the peer to
2938 		 * check ECN bits.
2939 		 */
2940 		tp1 = TAILQ_FIRST(&asoc->send_queue);
2941 		if (tp1 == NULL) {
2942 			asoc->nonce_resync_tsn = asoc->sending_seq;
2943 		} else {
2944 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2945 		}
2946 		asoc->nonce_wait_for_ecne = 0;
2947 		asoc->nonce_sum_check = 0;
2948 	}
2949 
2950 }
2951 
2952 extern int sctp_peer_chunk_oh;
2953 
2954 static void
2955 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2956     u_long biggest_tsn_acked, int strike_enabled,
2957     u_long biggest_tsn_newly_acked, int accum_moved)
2958 {
2959 	struct sctp_tmit_chunk *tp1;
2960 	int strike_flag=0;
2961 	struct timeval now;
2962 	int tot_retrans=0;
2963 	u_int32_t sending_seq;
2964 	int primary_switch_active = 0;
2965 	int double_switch_active = 0;
2966 
2967 	/* select the sending_seq, this is
2968 	 * either the next thing ready to
2969 	 * be sent but not transmitted, OR,
2970 	 * the next seq we assign.
2971 	 */
2972 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2973 	if (tp1 == NULL) {
2974 		sending_seq = asoc->sending_seq;
2975 	} else {
2976 		sending_seq = tp1->rec.data.TSN_seq;
2977 	}
2978 
2979 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2980 		primary_switch_active = 1;
2981 	}
2982 	if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
2983 		double_switch_active = 1;
2984 	}
2985 	if (stcb->asoc.peer_supports_prsctp ) {
2986 		SCTP_GETTIME_TIMEVAL(&now);
2987 	}
2988 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2989 	while (tp1) {
2990 		strike_flag=0;
2991 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2992 		    MAX_TSN) ||
2993 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2994 			/* done */
2995 			break;
2996 		}
2997 		if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
2998 		    SCTP_PR_SCTP_ENABLED &&
2999 		    tp1->sent < SCTP_DATAGRAM_ACKED) {
3000 			/* Is it expired? */
3001 #ifndef __FreeBSD__
3002 			if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3003 #else
3004 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3005 #endif
3006 			{
3007 				/* Yes so drop it */
3008 				if (tp1->data != NULL) {
3009 					sctp_release_pr_sctp_chunk(stcb, tp1,
3010 					    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3011 					    &asoc->sent_queue);
3012 				}
3013 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3014 				continue;
3015 			}
3016 		}
3017 
3018 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3019 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3020 			/* we are beyond the tsn in the sack  */
3021  			break;
3022 		}
3023 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3024 			/* either a RESEND, ACKED, or MARKED */
3025 			/* skip */
3026 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3027 			continue;
3028 		}
3029 		if (primary_switch_active && (strike_enabled == 0)) {
3030 			if (tp1->whoTo != asoc->primary_destination) {
3031 				/*
3032 				 * We can only strike things on the primary if
3033 				 * the strike_enabled flag is clear
3034 				 */
3035 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3036 				continue;
3037 			}
3038 		} else if (primary_switch_active) {
3039 			if (tp1->whoTo->cacc_saw_newack == 0) {
3040 				/*
3041 				 * Only one was received but it was NOT
3042 				 * this one.
3043 				 */
3044 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3045 				continue;
3046 			}
3047 		}
3048 		if (double_switch_active &&
3049 		    (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3050 		    tp1->rec.data.TSN_seq, MAX_TSN))) {
3051 			/*
3052 			 * With a double switch we do NOT mark unless we
3053 			 * are beyond the switch point.
3054 			 */
3055 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3056 			continue;
3057 		}
3058 		/*
3059 		 * Here we check to see if we were have already done a FR
3060 		 * and if so we see if the biggest TSN we saw in the sack is
3061 		 * smaller than the recovery point. If so we don't strike the
3062 		 * tsn... otherwise we CAN strike the TSN.
3063 		 */
3064 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3065 		 	/*
3066 		 	 * Strike the TSN if in fast-recovery and
3067 			 * cum-ack moved.
3068 			 */
3069 			tp1->sent++;
3070 		} else if (tp1->rec.data.doing_fast_retransmit) {
3071 			/*
3072 			 * For those that have done a FR we must
3073 			 * take special consideration if we strike. I.e
3074 			 * the biggest_newly_acked must be higher
3075 			 * than the sending_seq at the time we did
3076 			 * the FR.
3077 			 */
3078 #ifdef SCTP_FR_TO_ALTERNATE
3079 			/*
3080 			 * If FR's go to new networks, then we
3081 			 * must only do this for singly homed asoc's. However
3082 			 * if the FR's go to the same network (Armando's work)
3083 			 * then its ok to FR multiple times.
3084 			 */
3085 			if (asoc->numnets < 2)
3086 #else
3087 			if (1)
3088 #endif
3089 			{
3090 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3091 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3092 				    (biggest_tsn_newly_acked ==
3093 				     tp1->rec.data.fast_retran_tsn)) {
3094 					/*
3095 					 * Strike the TSN, since this ack is
3096 					 * beyond where things were when we did
3097 					 * a FR.
3098 					 */
3099 #ifdef SCTP_FR_LOGGING
3100 					sctp_log_fr(biggest_tsn_newly_acked,
3101 					    tp1->rec.data.TSN_seq,
3102 					    tp1->rec.data.fast_retran_tsn,
3103 					    SCTP_FR_LOG_STRIKE_CHUNK);
3104 #endif
3105 					tp1->sent++;
3106 					strike_flag=1;
3107 				}
3108 			}
3109  		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3110  		    biggest_tsn_newly_acked, MAX_TSN)) {
3111 			/*
3112 			 * We don't strike these:
3113 			 * This is the  HTNA algorithm i.e. we don't strike
3114 			 * If our TSN is larger than the Highest TSN Newly
3115 			 * Acked.
3116 			 */
3117 			;
3118 	 	} else {
3119 		 	/* Strike the TSN */
3120 			tp1->sent++;
3121 		}
3122 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3123 			/* Increment the count to resend */
3124 			struct sctp_nets *alt;
3125 
3126 #ifdef SCTP_FR_LOGGING
3127 			sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3128 			    0, SCTP_FR_MARKED);
3129 #endif
3130 			if (strike_flag) {
3131 				/* This is a subsequent FR */
3132 				sctp_pegs[SCTP_DUP_FR]++;
3133 			}
3134 			asoc->sent_queue_retran_cnt++;
3135 #ifdef SCTP_FR_TO_ALTERNATE
3136 			/* Can we find an alternate? */
3137 			alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3138 #else
3139 			/*
3140 			 * default behavior is to NOT retransmit FR's
3141 			 * to an alternate. Armando Caro's paper details
3142 			 * why.
3143 			 */
3144 			alt = tp1->whoTo;
3145 #endif
3146 			tp1->rec.data.doing_fast_retransmit = 1;
3147 			tot_retrans++;
3148 			/* mark the sending seq for possible subsequent FR's */
3149 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3150 				/*
3151 				 * If the queue of send is empty then its the
3152 				 * next sequence number that will be assigned so
3153 				 * we subtract one from this to get the one we
3154 				 * last sent.
3155 				 */
3156  				tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3157 			} else {
3158 				/*
3159 			 	 * If there are chunks on the send queue
3160 				 * (unsent data that has made it from the
3161 				 * stream queues but not out the door, we take
3162 				 * the first one (which will have the lowest
3163 				 * TSN) and subtract one to get the one we last
3164 				 * sent.
3165 				 */
3166 				struct sctp_tmit_chunk *ttt;
3167 				ttt = TAILQ_FIRST(&asoc->send_queue);
3168 				tp1->rec.data.fast_retran_tsn =
3169 				    ttt->rec.data.TSN_seq - 1;
3170 			}
3171 			if (tp1->do_rtt) {
3172 				/*
3173 				 * this guy had a RTO calculation pending on it,
3174 				 * cancel it
3175 				 */
3176 				tp1->whoTo->rto_pending = 0;
3177 				tp1->do_rtt = 0;
3178 			}
3179 			/* fix counts and things */
3180 
3181 			tp1->whoTo->net_ack++;
3182 			sctp_flight_size_decrease(tp1);
3183 #ifdef SCTP_LOG_RWND
3184 			sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3185 				      asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3186 #endif
3187 			/* add back to the rwnd */
3188 			asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3189 
3190 			/* remove from the total flight */
3191 			sctp_total_flight_decrease(stcb, tp1);
3192 			if (alt != tp1->whoTo) {
3193 				/* yes, there is an alternate. */
3194 				sctp_free_remote_addr(tp1->whoTo);
3195 				tp1->whoTo = alt;
3196 				alt->ref_count++;
3197 			}
3198 		}
3199 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3200 	} /* while (tp1) */
3201 
3202 	if (tot_retrans > 0) {
3203 		/* Setup the ecn nonce re-sync point. We
3204 		 * do this since once we go to FR something
3205 		 * we introduce a Karn's rule scenario and
3206 		 * won't know the totals for the ECN bits.
3207 		 */
3208 		asoc->nonce_resync_tsn = sending_seq;
3209 		asoc->nonce_wait_for_ecne = 0;
3210 		asoc->nonce_sum_check = 0;
3211 	}
3212 
3213 }
3214 
3215 struct sctp_tmit_chunk *
3216 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3217     struct sctp_association *asoc)
3218 {
3219 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3220 	struct timeval now;
3221 	int now_filled=0;
3222 
3223 	if (asoc->peer_supports_prsctp == 0) {
3224 		return (NULL);
3225 	}
3226 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3227 	while (tp1) {
3228 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3229 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3230 			/* no chance to advance, out of here */
3231 			break;
3232 		}
3233 		if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3234 			/*
3235 			 * We can't fwd-tsn past any that are reliable
3236 			 * aka retransmitted until the asoc fails.
3237 			 */
3238 			break;
3239 		}
3240 		if (!now_filled) {
3241 			SCTP_GETTIME_TIMEVAL(&now);
3242 			now_filled = 1;
3243 		}
3244 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3245 		/*
3246 		 * now we got a chunk which is marked for another
3247 		 * retransmission to a PR-stream but has run
3248 		 * out its chances already maybe OR has been
3249 		 * marked to skip now. Can we skip it if its a
3250 		 * resend?
3251 		 */
3252 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3253 		    (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3254 			/*
3255 			 * Now is this one marked for resend and its time
3256 			 * is now up?
3257 			 */
3258 #ifndef __FreeBSD__
3259 			if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3260 #else
3261 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3262 #endif
3263 			{
3264 				/* Yes so drop it */
3265 				if (tp1->data) {
3266 					sctp_release_pr_sctp_chunk(stcb, tp1,
3267 					    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3268 					    &asoc->sent_queue);
3269 				}
3270 			} else {
3271 				/*
3272 				 * No, we are done when hit one for resend whos
3273 				 * time as not expired.
3274 				 */
3275 				break;
3276 			}
3277 		}
3278 		/*
3279 		 * Ok now if this chunk is marked to drop it
3280 		 * we can clean up the chunk, advance our peer ack point
3281 		 * and we can check the next chunk.
3282 		 */
3283 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3284 			/* advance PeerAckPoint goes forward */
3285 			asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3286 			a_adv = tp1;
3287 			/*
3288 			 * we don't want to de-queue it here. Just wait for the
3289 			 * next peer SACK to come with a new cumTSN and then
3290 			 * the chunk will be droped in the normal fashion.
3291 			 */
3292 			if (tp1->data) {
3293 				sctp_free_bufspace(stcb, asoc, tp1);
3294 #ifdef SCTP_DEBUG
3295 				if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3296 					printf("--total out:%lu total_mbuf_out:%lu\n",
3297 					    (u_long)asoc->total_output_queue_size,
3298 					    (u_long)asoc->total_output_mbuf_queue_size);
3299 				}
3300 #endif
3301 				/*
3302 				 * Maybe there should be another notification
3303 				 * type
3304 				 */
3305 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3306 				    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3307 				    tp1);
3308 				sctp_m_freem(tp1->data);
3309 				tp1->data = NULL;
3310 				sctp_sowwakeup(stcb->sctp_ep,
3311 				    stcb->sctp_socket);
3312 			}
3313 		} else {
3314 			/* If it is still in RESEND we can advance no further */
3315 			break;
3316 		}
3317 		/*
3318 		 * If we hit here we just dumped tp1, move to next
3319 		 * tsn on sent queue.
3320 		 */
3321 		tp1 = tp2;
3322 	}
3323 	return (a_adv);
3324 }
3325 
3326 #ifdef SCTP_HIGH_SPEED
3327 struct sctp_hs_raise_drop {
3328 	int32_t cwnd;
3329 	int32_t increase;
3330 	int32_t drop_percent;
3331 };
3332 
3333 #define SCTP_HS_TABLE_SIZE 73
3334 
3335 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3336 	{38,1,50},	/* 0   */
3337 	{118,2,44},	/* 1   */
3338 	{221,3,41},	/* 2   */
3339 	{347,4,38},	/* 3   */
3340 	{495,5,37},	/* 4   */
3341 	{663,6,35},	/* 5   */
3342 	{851,7,34},	/* 6   */
3343 	{1058,8,33},	/* 7   */
3344 	{1284,9,32},	/* 8   */
3345 	{1529,10,31},	/* 9   */
3346 	{1793,11,30},	/* 10  */
3347 	{2076,12,29},	/* 11  */
3348 	{2378,13,28},	/* 12  */
3349 	{2699,14,28},	/* 13  */
3350 	{3039,15,27},	/* 14  */
3351 	{3399,16,27},	/* 15  */
3352 	{3778,17,26},	/* 16  */
3353 	{4177,18,26},	/* 17  */
3354 	{4596,19,25},	/* 18  */
3355 	{5036,20,25},	/* 19  */
3356 	{5497,21,24},	/* 20  */
3357 	{5979,22,24},	/* 21  */
3358 	{6483,23,23},	/* 22  */
3359 	{7009,24,23},	/* 23  */
3360 	{7558,25,22},	/* 24  */
3361 	{8130,26,22},	/* 25  */
3362 	{8726,27,22},	/* 26  */
3363 	{9346,28,21},	/* 27  */
3364 	{9991,29,21},	/* 28  */
3365 	{10661,30,21},  /* 29  */
3366 	{11358,31,20},  /* 30  */
3367 	{12082,32,20},  /* 31  */
3368 	{12834,33,20},  /* 32  */
3369 	{13614,34,19},  /* 33  */
3370 	{14424,35,19},  /* 34  */
3371 	{15265,36,19},  /* 35  */
3372 	{16137,37,19},  /* 36  */
3373 	{17042,38,18},  /* 37  */
3374 	{17981,39,18},  /* 38  */
3375 	{18955,40,18},  /* 39  */
3376 	{19965,41,17},  /* 40  */
3377 	{21013,42,17},  /* 41  */
3378 	{22101,43,17},  /* 42  */
3379 	{23230,44,17},  /* 43  */
3380 	{24402,45,16},  /* 44  */
3381 	{25618,46,16},  /* 45  */
3382 	{26881,47,16},  /* 46  */
3383 	{28193,48,16},  /* 47  */
3384 	{29557,49,15},  /* 48  */
3385 	{30975,50,15},  /* 49  */
3386 	{32450,51,15},  /* 50  */
3387 	{33986,52,15},  /* 51  */
3388 	{35586,53,14},  /* 52  */
3389 	{37253,54,14},  /* 53  */
3390 	{38992,55,14},  /* 54  */
3391 	{40808,56,14},  /* 55  */
3392 	{42707,57,13},  /* 56  */
3393 	{44694,58,13},  /* 57  */
3394 	{46776,59,13},  /* 58  */
3395 	{48961,60,13},  /* 59  */
3396 	{51258,61,13},  /* 60  */
3397 	{53677,62,12},  /* 61  */
3398 	{56230,63,12},  /* 62  */
3399 	{58932,64,12},  /* 63  */
3400 	{61799,65,12},  /* 64  */
3401 	{64851,66,11},  /* 65  */
3402 	{68113,67,11},  /* 66  */
3403 	{71617,68,11},  /* 67  */
3404 	{75401,69,10},  /* 68  */
3405 	{79517,70,10},  /* 69  */
3406 	{84035,71,10},  /* 70  */
3407 	{89053,72,10},  /* 71  */
3408 	{94717,73,9}    /* 72  */
3409 };
3410 
3411 static void
3412 sctp_hs_cwnd_increase(struct sctp_nets *net)
3413 {
3414  	int cur_val, i, indx, incr;
3415 
3416 	cur_val = net->cwnd >> 10;
3417 	indx = SCTP_HS_TABLE_SIZE - 1;
3418 
3419 	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3420 		/* normal mode */
3421 		if (net->net_ack > net->mtu) {
3422 			net->cwnd += net->mtu;
3423 #ifdef SCTP_CWND_LOGGING
3424 			sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3425 #endif
3426 		} else {
3427 			net->cwnd += net->net_ack;
3428 #ifdef SCTP_CWND_LOGGING
3429 			sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3430 #endif
3431 		}
3432 	} else {
3433 		for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3434 			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3435 				indx = i;
3436 				break;
3437 			}
3438 		}
3439 		net->last_hs_used = indx;
3440 		incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3441 		net->cwnd += incr;
3442 #ifdef SCTP_CWND_LOGGING
3443 		sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3444 #endif
3445 	}
3446 }
3447 
3448 static void
3449 sctp_hs_cwnd_decrease(struct sctp_nets *net)
3450 {
3451  	int cur_val, i, indx;
3452 #ifdef SCTP_CWND_LOGGING
3453 	int old_cwnd = net->cwnd;
3454 #endif
3455 
3456 	cur_val = net->cwnd >> 10;
3457 	indx = net->last_hs_used;
3458 	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3459 		/* normal mode */
3460 		net->ssthresh = net->cwnd / 2;
3461 		if (net->ssthresh < (net->mtu*2)) {
3462 			net->ssthresh = 2 * net->mtu;
3463 		}
3464 		net->cwnd = net->ssthresh;
3465 #ifdef SCTP_CWND_LOGGING
3466 		sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3467 #endif
3468 	} else {
3469 		/* drop by the proper amount */
3470 		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3471 		    sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3472 		net->cwnd = net->ssthresh;
3473 		/* now where are we */
3474 		indx = net->last_hs_used;
3475 		cur_val = net->cwnd >> 10;
3476 		/* reset where we are in the table */
3477 		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3478 			/* feel out of hs */
3479 			net->last_hs_used = 0;
3480 		} else {
3481 			for (i = indx; i >= 1; i--) {
3482 				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3483 					break;
3484 				}
3485 			}
3486 			net->last_hs_used = indx;
3487 		}
3488 	}
3489 }
3490 #endif
3491 
3492 void
3493 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3494     struct sctp_nets *net_from, int *abort_now)
3495 {
3496 	struct sctp_association *asoc;
3497 	struct sctp_sack *sack;
3498 	struct sctp_tmit_chunk *tp1, *tp2;
3499 	u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3500 	uint16_t num_seg;
3501 	unsigned int sack_length;
3502 	uint32_t send_s;
3503 	int some_on_streamwheel;
3504 	int strike_enabled = 0, cnt_of_cacc = 0;
3505 	int accum_moved = 0;
3506 	int marking_allowed = 1;
3507 	int will_exit_fast_recovery=0;
3508 	u_int32_t a_rwnd;
3509 	struct sctp_nets *net = NULL;
3510 	int nonce_sum_flag, ecn_seg_sums=0;
3511 	asoc = &stcb->asoc;
3512 
3513 	/*
3514 	 * Handle the incoming sack on data I have been sending.
3515 	 */
3516 
3517 	/*
3518 	 * we take any chance we can to service our queues since we
3519 	 * cannot get awoken when the socket is read from :<
3520 	 */
3521 	asoc->overall_error_count = 0;
3522 
3523 	if (asoc->sent_queue_retran_cnt) {
3524 #ifdef SCTP_DEBUG
3525 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3526 			printf("Handling SACK for asoc:%p retran:%d\n",
3527 			       asoc, asoc->sent_queue_retran_cnt);
3528 		}
3529 #endif
3530 	}
3531 
3532 	sctp_service_queues(stcb, asoc, 0);
3533 
3534 	/*
3535 	 * Now perform the actual SACK handling:
3536 	 * 1) Verify that it is not an old sack, if so discard.
3537 	 * 2) If there is nothing left in the send queue (cum-ack is equal
3538 	 *    to last acked) then you have a duplicate too, update any rwnd
3539 	 *    change and verify no timers are running. then return.
3540 	 * 3) Process any new consequtive data i.e. cum-ack moved
3541 	 *    process these first and note that it moved.
3542 	 * 4) Process any sack blocks.
3543 	 * 5) Drop any acked from the queue.
3544 	 * 6) Check for any revoked blocks and mark.
3545 	 * 7) Update the cwnd.
3546 	 * 8) Nothing left, sync up flightsizes and things, stop all timers
3547 	 *    and also check for shutdown_pending state. If so then go ahead
3548 	 *    and send off the shutdown. If in shutdown recv, send off the
3549 	 *    shutdown-ack and start that timer, Ret.
3550 	 * 9) Strike any non-acked things and do FR procedure if needed being
3551 	 *    sure to set the FR flag.
3552 	 * 10) Do pr-sctp procedures.
3553 	 * 11) Apply any FR penalties.
3554 	 * 12) Assure we will SACK if in shutdown_recv state.
3555 	 */
3556 
3557 	sack_length = ntohs(ch->ch.chunk_length);
3558 	if (sack_length < sizeof(struct sctp_sack_chunk)) {
3559 #ifdef SCTP_DEBUG
3560 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3561 			printf("Bad size on sack chunk .. to small\n");
3562 		}
3563 #endif
3564 		return;
3565 	}
3566 	/* ECN Nonce */
3567 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3568 	sack = &ch->sack;
3569 	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3570 	num_seg = ntohs(sack->num_gap_ack_blks);
3571 
3572 	/* reality check */
3573 	if (TAILQ_EMPTY(&asoc->send_queue)) {
3574 		send_s = asoc->sending_seq;
3575 	} else {
3576 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3577 		send_s = tp1->rec.data.TSN_seq;
3578 	}
3579 
3580 	if (sctp_strict_sacks) {
3581 		if (cum_ack == send_s ||
3582 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3583 			struct mbuf *oper;
3584 			/*
3585 			 * no way, we have not even sent this TSN out yet.
3586 			 * Peer is hopelessly messed up with us.
3587 			 */
3588 		hopeless_peer:
3589 			*abort_now = 1;
3590 			/* XXX */
3591 			MGET(oper, M_DONTWAIT, MT_DATA);
3592 			if (oper) {
3593 				struct sctp_paramhdr *ph;
3594 				u_int32_t *ippp;
3595 
3596 				oper->m_len = sizeof(struct sctp_paramhdr) +
3597 					sizeof(*ippp);
3598 				ph = mtod(oper, struct sctp_paramhdr *);
3599 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3600 				ph->param_length = htons(oper->m_len);
3601 				ippp = (u_int32_t *)(ph + 1);
3602 				*ippp = htonl(0x30000002);
3603 			}
3604 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3605 			return;
3606 		}
3607 	}
3608 	/* update the Rwnd of the peer */
3609 	a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3610 	if (asoc->sent_queue_retran_cnt) {
3611 #ifdef SCTP_DEBUG
3612 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3613 			printf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3614 			       cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3615 		}
3616 #endif
3617 	}
3618 	if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3619 		/* we are not allowed to mark for FR */
3620 		marking_allowed = 0;
3621 	}
3622 	/**********************/
3623 	/* 1) check the range */
3624 	/**********************/
3625 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3626 		/* acking something behind */
3627 		if (asoc->sent_queue_retran_cnt) {
3628 #ifdef SCTP_DEBUG
3629 			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3630 				printf("The cum-ack is behind us\n");
3631 			}
3632 #endif
3633 		}
3634 		return;
3635 	}
3636 
3637 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3638 		/* nothing left on sendqueue.. consider done */
3639 #ifdef SCTP_LOG_RWND
3640 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3641 				  asoc->peers_rwnd, 0, 0, a_rwnd);
3642 #endif
3643 		asoc->peers_rwnd = a_rwnd;
3644 		if (asoc->sent_queue_retran_cnt) {
3645 #ifdef SCTP_DEBUG
3646 			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3647 				printf("Huh? retran set but none on queue\n");
3648 			}
3649 #endif
3650 			asoc->sent_queue_retran_cnt = 0;
3651 		}
3652 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3653 			/* SWS sender side engages */
3654 			asoc->peers_rwnd = 0;
3655 		}
3656 		/* stop any timers */
3657 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3658 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3659 					stcb, net);
3660 			net->partial_bytes_acked = 0;
3661 			net->flight_size = 0;
3662 		}
3663 		asoc->total_flight = 0;
3664 		asoc->total_flight_count = 0;
3665 		return;
3666 	}
3667 	/*
3668 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3669 	 * things. The total byte count acked is tracked in netAckSz AND
3670 	 * netAck2 is used to track the total bytes acked that are un-
3671 	 * amibguious and were never retransmitted. We track these on a
3672 	 * per destination address basis.
3673 	 */
3674 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3675 		net->prev_cwnd = net->cwnd;
3676 		net->net_ack = 0;
3677 		net->net_ack2 = 0;
3678 	}
3679 	/* process the new consecutive TSN first */
3680 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3681 	while (tp1) {
3682 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3683 				      MAX_TSN) ||
3684 		    last_tsn == tp1->rec.data.TSN_seq) {
3685 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3686 				/* ECN Nonce: Add the nonce to the sender's nonce sum */
3687 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3688 				accum_moved = 1;
3689 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3690 					/*
3691 					 * If it is less than ACKED, it is now
3692 					 * no-longer in flight. Higher values
3693 					 * may occur during marking
3694 					 */
3695 					if ((tp1->whoTo->dest_state &
3696 					     SCTP_ADDR_UNCONFIRMED) &&
3697 					    (tp1->snd_count < 2) ) {
3698 						/*
3699 						 * If there was no retran and
3700 						 * the address is un-confirmed
3701 						 * and we sent there and are
3702 						 * now sacked.. its confirmed,
3703 						 * mark it so.
3704 						 */
3705 						tp1->whoTo->dest_state &=
3706 							~SCTP_ADDR_UNCONFIRMED;
3707 					}
3708 					sctp_flight_size_decrease(tp1);
3709 					sctp_total_flight_decrease(stcb, tp1);
3710 					tp1->whoTo->net_ack += tp1->send_size;
3711 					if (tp1->snd_count < 2) {
3712 						/* True non-retransmited chunk */
3713 						tp1->whoTo->net_ack2 +=
3714 							tp1->send_size;
3715 						/* update RTO too? */
3716 						if (tp1->do_rtt) {
3717 							tp1->whoTo->RTO =
3718 								sctp_calculate_rto(stcb,
3719 										   asoc, tp1->whoTo,
3720 										   &tp1->sent_rcv_time);
3721 							tp1->whoTo->rto_pending = 0;
3722 							tp1->do_rtt = 0;
3723 						}
3724 					}
3725 				}
3726 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3727 #ifdef SCTP_DEBUG
3728 					if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3729 						printf("Hmm. one that is in RESEND that is now ACKED\n");
3730 					}
3731 #endif
3732 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3733 #ifdef SCTP_AUDITING_ENABLED
3734 					sctp_audit_log(0xB3,
3735 						       (asoc->sent_queue_retran_cnt & 0x000000ff));
3736 #endif
3737 
3738 				}
3739 				tp1->sent = SCTP_DATAGRAM_ACKED;
3740 			}
3741 		} else {
3742 			break;
3743 		}
3744 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3745 	}
3746 	/*******************************************/
3747 	/* cancel ALL T3-send timer if accum moved */
3748 	/*******************************************/
3749 	if (accum_moved) {
3750 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3751 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3752 					stcb, net);
3753 		}
3754 	}
3755 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3756 	/* always set this up to cum-ack */
3757 	asoc->this_sack_highest_gap = last_tsn;
3758 
3759 	if (num_seg * sizeof(struct sctp_gap_ack_block) + sizeof(struct sctp_sack_chunk) > sack_length) {
3760 		/* skip corrupt segments */
3761 		strike_enabled = 0;
3762 		goto skip_segments;
3763 	}
3764 
3765 	if (num_seg > 0) {
3766 		if (asoc->primary_destination->dest_state &
3767 		    SCTP_ADDR_SWITCH_PRIMARY) {
3768 			/* clear the nets CACC flags */
3769 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3770 				net->cacc_saw_newack = 0;
3771 			}
3772 		}
3773 		/*
3774 		 * thisSackHighestGap will increase while handling NEW segments
3775 		 */
3776 
3777 		sctp_handle_segments(stcb, asoc, ch, last_tsn,
3778 		    &biggest_tsn_acked, &biggest_tsn_newly_acked,
3779 		    num_seg, &ecn_seg_sums);
3780 
3781 		if (sctp_strict_sacks) {
3782 			/* validate the biggest_tsn_acked in the gap acks
3783 			 * if strict adherence is wanted.
3784 			 */
3785 			if ((biggest_tsn_acked == send_s) ||
3786 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3787 				/*
3788 				 * peer is either confused or we are under
3789 				 * attack. We must abort.
3790 				 */
3791 				goto hopeless_peer;
3792 			}
3793 		}
3794 
3795 		if (asoc->primary_destination->dest_state &
3796 		    SCTP_ADDR_SWITCH_PRIMARY) {
3797 			/* clear the nets CACC flags */
3798 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3799 				if (net->cacc_saw_newack) {
3800 					cnt_of_cacc++;
3801 				}
3802 			}
3803 		}
3804 
3805 	}
3806 
3807 	if (cnt_of_cacc < 2) {
3808 		strike_enabled = 1;
3809 	} else {
3810 		strike_enabled = 0;
3811 	}
3812  skip_segments:
3813 	/********************************************/
3814 	/* drop the acked chunks from the sendqueue */
3815 	/********************************************/
3816 	asoc->last_acked_seq = cum_ack;
3817 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3818 		if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3819 		    (compare_with_wrap(cum_ack,
3820 				       asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3821 			struct sctp_nets *lnet;
3822 			/* Turn off the switch flag for ALL addresses */
3823 			TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3824 				asoc->primary_destination->dest_state &=
3825 					~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3826 			}
3827 		}
3828 	}
3829 	/* Drag along the t3 timeout point so we don't have a problem at wrap */
3830 	if (marking_allowed) {
3831 		asoc->t3timeout_highest_marked = cum_ack;
3832 	}
3833 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3834 	do {
3835 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3836 				      MAX_TSN)) {
3837 			break;
3838 		}
3839 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3840 			/* no more sent on list */
3841 			break;
3842 		}
3843 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3844 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3845 		if (tp1->data) {
3846 			sctp_free_bufspace(stcb, asoc, tp1);
3847 #ifdef SCTP_DEBUG
3848 			if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3849 				printf("--total out:%lu total_mbuf_out:%lu\n",
3850 				       (u_long)asoc->total_output_queue_size,
3851 				       (u_long)asoc->total_output_mbuf_queue_size);
3852 			}
3853 #endif
3854 
3855 			sctp_m_freem(tp1->data);
3856 			if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3857 				asoc->sent_queue_cnt_removeable--;
3858 			}
3859 
3860 		}
3861 		tp1->data = NULL;
3862 		asoc->sent_queue_cnt--;
3863 		sctp_free_remote_addr(tp1->whoTo);
3864 		sctppcbinfo.ipi_count_chunk--;
3865 		asoc->chunks_on_out_queue--;
3866 
3867 		if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3868 			panic("Chunk count is going negative");
3869 		}
3870 		SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3871 		sctppcbinfo.ipi_gencnt_chunk++;
3872 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3873 		tp1 = tp2;
3874 	} while (tp1 != NULL);
3875 
3876 
3877 	if (asoc->fast_retran_loss_recovery && accum_moved) {
3878 		if (compare_with_wrap(asoc->last_acked_seq,
3879 				      asoc->fast_recovery_tsn, MAX_TSN) ||
3880 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3881 			/* Setup so we will exit RFC2582 fast recovery */
3882 			will_exit_fast_recovery = 1;
3883 		}
3884 	}
3885 
3886 	/* Check for revoked fragments if we hand
3887 	 * fragments in a previous segment. If we
3888 	 * had no previous fragments we cannot have
3889 	 * a revoke issue.
3890 	 */
3891 	if (asoc->saw_sack_with_frags)
3892 		sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3893 
3894 	if (num_seg)
3895 		asoc->saw_sack_with_frags = 1;
3896 	else
3897 		asoc->saw_sack_with_frags = 0;
3898 
3899 	/******************************/
3900 	/* update cwnd                */
3901 	/******************************/
3902 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3903 		/* if nothing was acked on this destination skip it */
3904 		if (net->net_ack == 0)
3905 			continue;
3906 
3907 		if (net->net_ack2 > 0) {
3908 			/*
3909 			 * Karn's rule applies to clearing error count,
3910 			 * this is optional.
3911 			 */
3912 			net->error_count = 0;
3913 			if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3914 			    SCTP_ADDR_NOT_REACHABLE) {
3915 				/* addr came good */
3916 				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3917 				net->dest_state |= SCTP_ADDR_REACHABLE;
3918 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3919 						SCTP_RECEIVED_SACK, (void *)net);
3920 				/* now was it the primary? if so restore */
3921 				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3922 					sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3923 				}
3924 			}
3925 		}
3926 
3927 		if (asoc->fast_retran_loss_recovery &&
3928 		    will_exit_fast_recovery == 0) {
3929 			/* If we are in loss recovery we skip any cwnd update */
3930 			sctp_pegs[SCTP_CWND_SKIP]++;
3931 			goto skip_cwnd_update;
3932 		}
3933 		if (accum_moved) {
3934 			/* If the cumulative ack moved we can proceed */
3935 			if (net->cwnd <= net->ssthresh) {
3936 				/* We are in slow start */
3937 				if (net->flight_size + net->net_ack >=
3938 				    net->cwnd ) {
3939 #ifdef SCTP_HIGH_SPEED
3940 					sctp_hs_cwnd_increase(net);
3941 #else
3942 					if (net->net_ack > net->mtu) {
3943 						net->cwnd += net->mtu;
3944 #ifdef SCTP_CWND_LOGGING
3945 						sctp_log_cwnd(net, net->mtu,
3946 							      SCTP_CWND_LOG_FROM_SS);
3947 #endif
3948 
3949 					} else {
3950 						net->cwnd += net->net_ack;
3951 #ifdef SCTP_CWND_LOGGING
3952 						sctp_log_cwnd(net, net->net_ack,
3953 							      SCTP_CWND_LOG_FROM_SS);
3954 #endif
3955 
3956 					}
3957 #endif
3958 					sctp_pegs[SCTP_CWND_SS]++;
3959 				} else {
3960 					unsigned int dif;
3961 					sctp_pegs[SCTP_CWND_NOUSE_SS]++;
3962 					dif = net->cwnd - (net->flight_size +
3963 							   net->net_ack);
3964 #ifdef SCTP_CWND_LOGGING
3965 /*					sctp_log_cwnd(net, net->net_ack,
3966 					SCTP_CWND_LOG_NOADV_SS);*/
3967 #endif
3968 					if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
3969 						sctp_pegs[SCTP_CWND_DIFF_SA] =
3970 							dif;
3971 						sctp_pegs[SCTP_OQS_AT_SS] =
3972 							asoc->total_output_queue_size;
3973 						sctp_pegs[SCTP_SQQ_AT_SS] =
3974 							asoc->sent_queue_cnt;
3975 						sctp_pegs[SCTP_SQC_AT_SS] =
3976 							asoc->send_queue_cnt;
3977 					}
3978 				}
3979 			} else {
3980 				/* We are in congestion avoidance */
3981 				if (net->flight_size + net->net_ack >=
3982 				    net->cwnd) {
3983 					/*
3984 					 * add to pba only if we had a cwnd's
3985 					 * worth (or so) in flight OR the
3986 					 * burst limit was applied.
3987 					 */
3988 					net->partial_bytes_acked +=
3989 						net->net_ack;
3990 
3991 					/*
3992 					 * Do we need to increase
3993 					 * (if pba is > cwnd)?
3994 					 */
3995 					if (net->partial_bytes_acked >=
3996 					    net->cwnd) {
3997 						if (net->cwnd <
3998 						    net->partial_bytes_acked) {
3999 							net->partial_bytes_acked -=
4000 								net->cwnd;
4001 						} else {
4002 							net->partial_bytes_acked =
4003 								0;
4004 						}
4005 						net->cwnd += net->mtu;
4006 #ifdef SCTP_CWND_LOGGING
4007 						sctp_log_cwnd(net, net->mtu,
4008 							      SCTP_CWND_LOG_FROM_CA);
4009 #endif
4010 						sctp_pegs[SCTP_CWND_CA]++;
4011 					}
4012 				} else {
4013 					unsigned int dif;
4014 					sctp_pegs[SCTP_CWND_NOUSE_CA]++;
4015 #ifdef SCTP_CWND_LOGGING
4016 /*					sctp_log_cwnd(net, net->net_ack,
4017 					SCTP_CWND_LOG_NOADV_CA);
4018 */
4019 #endif
4020 					dif = net->cwnd - (net->flight_size +
4021 							   net->net_ack);
4022 					if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
4023 						sctp_pegs[SCTP_CWND_DIFF_CA] =
4024 							dif;
4025 						sctp_pegs[SCTP_OQS_AT_CA] =
4026 							asoc->total_output_queue_size;
4027 						sctp_pegs[SCTP_SQQ_AT_CA] =
4028 							asoc->sent_queue_cnt;
4029 						sctp_pegs[SCTP_SQC_AT_CA] =
4030 							asoc->send_queue_cnt;
4031 
4032 					}
4033 
4034 				}
4035 			}
4036 		} else {
4037 			sctp_pegs[SCTP_CWND_NOCUM]++;
4038 		}
4039 	skip_cwnd_update:
4040 		/*
4041 		 * NOW, according to Karn's rule do we need to restore the
4042 		 * RTO timer back? Check our net_ack2. If not set then we
4043 		 * have a ambiguity.. i.e. all data ack'd was sent to more
4044 		 * than one place.
4045 		 */
4046 
4047 		if (net->net_ack2) {
4048 			/* restore any doubled timers */
4049 			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4050 			if (net->RTO < stcb->asoc.minrto) {
4051 				net->RTO = stcb->asoc.minrto;
4052 			}
4053 			if (net->RTO > stcb->asoc.maxrto) {
4054 				net->RTO = stcb->asoc.maxrto;
4055 			}
4056 		}
4057 		if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4058 			sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4059 		}
4060 	}
4061 	/**********************************/
4062 	/* Now what about shutdown issues */
4063 	/**********************************/
4064 	some_on_streamwheel = 0;
4065 	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4066 		/* Check to see if some data queued */
4067 		struct sctp_stream_out *outs;
4068 		TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4069 			if (!TAILQ_EMPTY(&outs->outqueue)) {
4070 				some_on_streamwheel = 1;
4071 				break;
4072 			}
4073 		}
4074 	}
4075 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4076 	    some_on_streamwheel == 0) {
4077 		/* nothing left on sendqueue.. consider done */
4078 		/* stop all timers */
4079 #ifdef SCTP_LOG_RWND
4080 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4081 				  asoc->peers_rwnd, 0, 0,  a_rwnd);
4082 #endif
4083 		asoc->peers_rwnd = a_rwnd;
4084 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4085 			/* SWS sender side engages */
4086 			asoc->peers_rwnd = 0;
4087 		}
4088 		/* stop any timers */
4089 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4090 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4091 					stcb, net);
4092 			net->flight_size = 0;
4093 			net->partial_bytes_acked = 0;
4094 		}
4095 		asoc->total_flight = 0;
4096 		asoc->total_flight_count = 0;
4097 		/* clean up */
4098 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4099 			asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4100 #ifdef SCTP_DEBUG
4101 			if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4102 				printf("%s:%d sends a shutdown\n",
4103 				       __FILE__,
4104 				       __LINE__
4105 				       );
4106 			}
4107 #endif
4108 			sctp_send_shutdown(stcb,
4109 					   stcb->asoc.primary_destination);
4110 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4111 					 stcb->sctp_ep, stcb, asoc->primary_destination);
4112 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4113 					 stcb->sctp_ep, stcb, asoc->primary_destination);
4114 		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4115 			asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4116 
4117 			sctp_send_shutdown_ack(stcb,
4118 					       stcb->asoc.primary_destination);
4119 
4120 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4121 					 stcb->sctp_ep, stcb, asoc->primary_destination);
4122 		}
4123 		return;
4124 	}
4125 	/*
4126 	 * Now here we are going to recycle net_ack for a different
4127 	 * use... HEADS UP.
4128 	 */
4129 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4130 		net->net_ack = 0;
4131 	}
4132 	if ((num_seg > 0) && marking_allowed) {
4133 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4134 					   strike_enabled, biggest_tsn_newly_acked, accum_moved);
4135 	}
4136 
4137 	/*********************************************/
4138 	/* Here we perform PR-SCTP procedures        */
4139 	/* (section 4.2)                             */
4140 	/*********************************************/
4141 	/* C1. update advancedPeerAckPoint */
4142 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4143 		asoc->advanced_peer_ack_point = cum_ack;
4144 	}
4145 	/* C2. try to further move advancedPeerAckPoint ahead */
4146 	if (asoc->peer_supports_prsctp) {
4147 		struct sctp_tmit_chunk *lchk;
4148 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4149 		/* C3. See if we need to send a Fwd-TSN */
4150 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4151 				      MAX_TSN)) {
4152 			/*
4153 			 * ISSUE with ECN, see FWD-TSN processing for notes
4154 			 * on issues that will occur when the ECN NONCE stuff
4155 			 * is put into SCTP for cross checking.
4156 			 */
4157 			send_forward_tsn(stcb, asoc);
4158 
4159 			/* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4160 			asoc->nonce_sum_check = 0;
4161 			asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4162 			if (lchk) {
4163 				/* Assure a timer is up */
4164 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4165 						 stcb->sctp_ep, stcb, lchk->whoTo);
4166 			}
4167 		}
4168 	}
4169 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4170 		if (asoc->fast_retran_loss_recovery == 0) {
4171 			/* out of a RFC2582 Fast recovery window? */
4172 			if (net->net_ack > 0) {
4173 				/*
4174 				 * per section 7.2.3, are there
4175 				 * any destinations that had a fast
4176 				 * retransmit to them. If so what we
4177 				 * need to do is adjust ssthresh and
4178 				 * cwnd.
4179 				 */
4180 				struct sctp_tmit_chunk *lchk;
4181 #ifdef  SCTP_HIGH_SPEED
4182 				sctp_hs_cwnd_decrease(net);
4183 #else
4184 #ifdef SCTP_CWND_LOGGING
4185 				int old_cwnd = net->cwnd;
4186 #endif
4187 				net->ssthresh = net->cwnd / 2;
4188 				if (net->ssthresh < (net->mtu*2)) {
4189 					net->ssthresh = 2 * net->mtu;
4190 				}
4191 				net->cwnd = net->ssthresh;
4192 #ifdef SCTP_CWND_LOGGING
4193 				sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4194 					      SCTP_CWND_LOG_FROM_FR);
4195 #endif
4196 #endif
4197 
4198 				lchk = TAILQ_FIRST(&asoc->send_queue);
4199 
4200 				net->partial_bytes_acked = 0;
4201 				/* Turn on fast recovery window */
4202 				asoc->fast_retran_loss_recovery = 1;
4203 				if (lchk == NULL) {
4204 					/* Mark end of the window */
4205 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4206 				} else {
4207 					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4208 				}
4209 
4210 
4211 				/* Disable Nonce Sum Checking and store the resync tsn*/
4212 				asoc->nonce_sum_check = 0;
4213 				asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4214 
4215 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4216 						stcb->sctp_ep, stcb, net);
4217 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4218 						 stcb->sctp_ep, stcb, net);
4219 			}
4220 		} else if (net->net_ack > 0) {
4221 			/*
4222 			 * Mark a peg that we WOULD have done a cwnd reduction
4223 			 * but RFC2582 prevented this action.
4224 			 */
4225 			sctp_pegs[SCTP_FR_INAWINDOW]++;
4226 		}
4227 	}
4228 
4229 
4230 	/******************************************************************
4231 	 *  Here we do the stuff with ECN Nonce checking.
4232 	 *  We basically check to see if the nonce sum flag was incorrect
4233 	 *  or if resynchronization needs to be done. Also if we catch a
4234 	 *  misbehaving receiver we give him the kick.
4235 	 ******************************************************************/
4236 
4237 	if (asoc->ecn_nonce_allowed) {
4238 		if (asoc->nonce_sum_check) {
4239 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4240 				if (asoc->nonce_wait_for_ecne == 0) {
4241 					struct sctp_tmit_chunk *lchk;
4242 					lchk = TAILQ_FIRST(&asoc->send_queue);
4243 					asoc->nonce_wait_for_ecne = 1;
4244 					if (lchk) {
4245 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4246 					} else {
4247 						asoc->nonce_wait_tsn = asoc->sending_seq;
4248 					}
4249 				} else {
4250 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4251 					   (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4252 						/* Misbehaving peer. We need to react to this guy */
4253 						printf("Mis-behaving peer detected\n");
4254 						asoc->ecn_allowed = 0;
4255 						asoc->ecn_nonce_allowed = 0;
4256 					}
4257 				}
4258 			}
4259 		} else {
4260 			/* See if Resynchronization Possible */
4261 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4262 				asoc->nonce_sum_check = 1;
4263 				/* now we must calculate what the base
4264 				 * is. We do this based on two things, we know
4265 				 * the total's for all the segments gap-acked
4266 				 * in the SACK, its stored in ecn_seg_sums.
4267 				 * We also know the SACK's nonce sum, its
4268 				 * in nonce_sum_flag. So we can build a truth
4269 				 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4270                                  *
4271                                  *   SACK-flag-Value         Seg-Sums              Base
4272 				 *         0                    0                   0
4273 				 *         1                    0                   1
4274 				 *         0                    1                   1
4275 				 *         1                    1                   0
4276 				 */
4277 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4278 			}
4279 		}
4280 	}
4281 	/* Now are we exiting loss recovery ? */
4282 	if (will_exit_fast_recovery) {
4283 		/* Ok, we must exit fast recovery */
4284 		asoc->fast_retran_loss_recovery = 0;
4285 	}
4286 	if ((asoc->sat_t3_loss_recovery) &&
4287 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4288 				MAX_TSN) ||
4289 	      (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4290 		/* end satellite t3 loss recovery */
4291 		asoc->sat_t3_loss_recovery = 0;
4292 	}
4293 	/* Adjust and set the new rwnd value */
4294 #ifdef SCTP_LOG_RWND
4295 	sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4296 			  asoc->peers_rwnd,  asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4297 #endif
4298 
4299 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4300 					    (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4301 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4302 		/* SWS sender side engages */
4303 		asoc->peers_rwnd = 0;
4304 	}
4305 	/*
4306 	 * Now we must setup so we have a timer up for anyone with
4307 	 * outstanding data.
4308 	 */
4309 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4310 		struct sctp_tmit_chunk *chk;
4311 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4312 			if (chk->whoTo == net &&
4313 			    (chk->sent < SCTP_DATAGRAM_ACKED ||
4314 			     chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4315 				/*
4316 				 * Not ack'ed and still outstanding to this
4317 				 * destination or marked and must be
4318 				 * sacked after fwd-tsn sent.
4319 				 */
4320 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4321 						 stcb->sctp_ep, stcb, net);
4322 				break;
4323 			}
4324 		}
4325 	}
4326 }
4327 
4328 void
4329 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4330     struct sctp_nets *netp, int *abort_flag)
4331 {
4332 	/* Mutate a shutdown into a SACK */
4333 	struct sctp_sack_chunk sack;
4334 
4335 	/* Copy cum-ack */
4336 	sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4337 	/* Arrange so a_rwnd does NOT change */
4338 	sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4339 	sack.ch.chunk_flags = 0;
4340 	sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4341 	sack.sack.a_rwnd =
4342 	    htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4343 	/*
4344 	 * no gaps in this one. This may cause a temporal view to reneging,
4345 	 * but hopefully the second chunk is a true SACK in the packet and
4346 	 * will correct this view. One will come soon after no matter what
4347 	 * to fix this.
4348 	 */
4349 	sack.sack.num_gap_ack_blks = 0;
4350 	sack.sack.num_dup_tsns = 0;
4351 	/* Now call the SACK processor */
4352 	sctp_handle_sack(&sack, stcb, netp, abort_flag);
4353 }
4354 
4355 static void
4356 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4357     struct sctp_stream_in *strmin)
4358 {
4359 	struct sctp_tmit_chunk *chk, *nchk;
4360 	struct sctp_association *asoc;
4361 	int tt;
4362 
4363 	asoc = &stcb->asoc;
4364 	tt = strmin->last_sequence_delivered;
4365 	/*
4366 	 * First deliver anything prior to and including the stream no that
4367 	 * came in
4368 	 */
4369 	chk = TAILQ_FIRST(&strmin->inqueue);
4370 	while (chk) {
4371 		nchk = TAILQ_NEXT(chk, sctp_next);
4372 		if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4373 		    (tt == chk->rec.data.stream_seq)) {
4374 			/* this is deliverable now */
4375 			TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4376 			/* subtract pending on streams */
4377 			asoc->size_on_all_streams -= chk->send_size;
4378 			asoc->cnt_on_all_streams--;
4379 			/* deliver it to at least the delivery-q */
4380 			sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4381 		} else {
4382 			/* no more delivery now. */
4383 			break;
4384 		}
4385 		chk = nchk;
4386 	}
4387 	/*
4388 	 * now we must deliver things in queue the normal way  if any
4389 	 * are now ready.
4390 	 */
4391 	tt = strmin->last_sequence_delivered + 1;
4392 	chk = TAILQ_FIRST(&strmin->inqueue);
4393 	while (chk) {
4394 		nchk = TAILQ_NEXT(chk, sctp_next);
4395 		if (tt == chk->rec.data.stream_seq) {
4396 			/* this is deliverable now */
4397 			TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4398 			/* subtract pending on streams */
4399 			asoc->size_on_all_streams -= chk->send_size;
4400 			asoc->cnt_on_all_streams--;
4401 			/* deliver it to at least the delivery-q */
4402 			strmin->last_sequence_delivered =
4403 			    chk->rec.data.stream_seq;
4404 			sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4405 			tt = strmin->last_sequence_delivered + 1;
4406 		} else {
4407 			break;
4408 		}
4409 		chk = nchk;
4410 	}
4411 
4412 }
4413 
4414 void
4415 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4416     struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4417 {
4418 	/*
4419 	 * ISSUES that MUST be fixed for ECN! When we are the
4420 	 * sender of the forward TSN, when the SACK comes back
4421 	 * that acknowledges the FWD-TSN we must reset the
4422 	 * NONCE sum to match correctly. This will get quite
4423 	 * tricky since we may have sent more data interveneing and
4424 	 * must carefully account for what the SACK says on the
4425 	 * nonce and any gaps that are reported. This work
4426 	 * will NOT be done here, but I note it here since
4427 	 * it is really related to PR-SCTP and FWD-TSN's
4428 	 */
4429 
4430 	/* The pr-sctp fwd tsn */
4431 	/*
4432 	 * here we will perform all the data receiver side steps for
4433 	 * processing FwdTSN, as required in by pr-sctp draft:
4434 	 *
4435 	 * Assume we get FwdTSN(x):
4436 	 *
4437 	 * 1) update local cumTSN to x
4438 	 * 2) try to further advance cumTSN to x + others we have
4439 	 * 3) examine and update re-ordering queue on pr-in-streams
4440 	 * 4) clean up re-assembly queue
4441 	 * 5) Send a sack to report where we are.
4442 	 */
4443 	struct sctp_strseq *stseq;
4444 	struct sctp_association *asoc;
4445 	u_int32_t new_cum_tsn, gap, back_out_htsn;
4446 	unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4447 	struct sctp_stream_in *strm;
4448 	struct sctp_tmit_chunk *chk, *at;
4449 
4450 	cumack_set_flag = 0;
4451 	asoc = &stcb->asoc;
4452 	cnt_gone = 0;
4453 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4454 #ifdef SCTP_DEBUG
4455 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4456 			printf("Bad size too small/big fwd-tsn\n");
4457 		}
4458 #endif
4459 		return;
4460 	}
4461 	m_size = (stcb->asoc.mapping_array_size << 3);
4462 	/*************************************************************/
4463 	/* 1. Here we update local cumTSN and shift the bitmap array */
4464 	/*************************************************************/
4465 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4466 
4467 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4468 	    asoc->cumulative_tsn == new_cum_tsn) {
4469 		/* Already got there ... */
4470 		return;
4471 	}
4472 
4473 	back_out_htsn = asoc->highest_tsn_inside_map;
4474 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4475 	    MAX_TSN)) {
4476 		asoc->highest_tsn_inside_map = new_cum_tsn;
4477 #ifdef SCTP_MAP_LOGGING
4478 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4479 #endif
4480 	}
4481 	/*
4482 	 * now we know the new TSN is more advanced, let's find the
4483 	 * actual gap
4484 	 */
4485 	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4486 			       MAX_TSN)) ||
4487 	     (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4488 		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4489 	} else {
4490 		/* try to prevent underflow here */
4491 		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4492 	}
4493 
4494 	if (gap >= m_size) {
4495 		asoc->highest_tsn_inside_map = back_out_htsn;
4496 		if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4497 			/*
4498 			 * out of range (of single byte chunks in the rwnd I
4499 			 * give out)
4500 			 * too questionable. better to drop it silently
4501 			 */
4502 			return;
4503 		}
4504 		if (asoc->highest_tsn_inside_map >
4505 		    asoc->mapping_array_base_tsn) {
4506 			gap = asoc->highest_tsn_inside_map -
4507 			    asoc->mapping_array_base_tsn;
4508 		} else {
4509 			gap = asoc->highest_tsn_inside_map +
4510 			    (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4511 		}
4512 		cumack_set_flag = 1;
4513 	}
4514 	for (i = 0; i <= gap; i++) {
4515 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4516 	}
4517 	/*
4518 	 * Now after marking all, slide thing forward but no
4519 	 * sack please.
4520 	 */
4521 	sctp_sack_check(stcb, 0, 0, abort_flag);
4522 	if (*abort_flag)
4523 		return;
4524 
4525 	if (cumack_set_flag) {
4526 		/*
4527 		 * fwd-tsn went outside my gap array - not a
4528 		 * common occurance. Do the same thing we
4529 		 * do when a cookie-echo arrives.
4530 		 */
4531 		asoc->highest_tsn_inside_map =  new_cum_tsn - 1;
4532 		asoc->mapping_array_base_tsn = new_cum_tsn;
4533 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4534 #ifdef SCTP_MAP_LOGGING
4535 		sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4536 #endif
4537 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4538 	}
4539 	/*************************************************************/
4540 	/* 2. Clear up re-assembly queue                             */
4541 	/*************************************************************/
4542 
4543 	/*
4544 	 * First service it if pd-api is up, just in case we can
4545 	 * progress it forward
4546 	 */
4547 	if (asoc->fragmented_delivery_inprogress) {
4548 		sctp_service_reassembly(stcb, asoc, 0);
4549 	}
4550 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4551 		/* For each one on here see if we need to toss it */
4552 		/*
4553 		 * For now large messages held on the reasmqueue that are
4554 		 * complete will be tossed too. We could in theory do more
4555 		 * work to spin through and stop after dumping one msg
4556 		 * aka seeing the start of a new msg at the head, and call
4557 		 * the delivery function... to see if it can be delivered...
4558 		 * But for now we just dump everything on the queue.
4559 		 */
4560 		chk = TAILQ_FIRST(&asoc->reasmqueue);
4561 		while (chk) {
4562 			at = TAILQ_NEXT(chk, sctp_next);
4563 			if (compare_with_wrap(asoc->cumulative_tsn,
4564 			    chk->rec.data.TSN_seq, MAX_TSN) ||
4565 			    asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4566 				/* It needs to be tossed */
4567 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4568 				if (compare_with_wrap(chk->rec.data.TSN_seq,
4569 				    asoc->tsn_last_delivered, MAX_TSN)) {
4570 					asoc->tsn_last_delivered =
4571 					    chk->rec.data.TSN_seq;
4572 					asoc->str_of_pdapi =
4573 					    chk->rec.data.stream_number;
4574 					asoc->ssn_of_pdapi =
4575 					    chk->rec.data.stream_seq;
4576 					asoc->fragment_flags =
4577 					    chk->rec.data.rcv_flags;
4578 				}
4579 				asoc->size_on_reasm_queue -= chk->send_size;
4580 				asoc->cnt_on_reasm_queue--;
4581 				cnt_gone++;
4582 
4583 				/* Clear up any stream problem */
4584 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4585 				    SCTP_DATA_UNORDERED &&
4586 				    (compare_with_wrap(chk->rec.data.stream_seq,
4587 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4588 				    MAX_SEQ))) {
4589 					/*
4590 					 * We must dump forward this streams
4591 					 * sequence number if the chunk is not
4592 					 * unordered that is being skipped.
4593 					 * There is a chance that if the peer
4594 					 * does not include the last fragment
4595 					 * in its FWD-TSN we WILL have a problem
4596 					 * here since you would have a partial
4597 					 * chunk in queue that may not be
4598 					 * deliverable.
4599 					 * Also if a Partial delivery API as
4600 					 * started the user may get a partial
4601 					 * chunk. The next read returning a new
4602 					 * chunk... really ugly but I see no way
4603 					 * around it! Maybe a notify??
4604 					 */
4605 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4606 					    chk->rec.data.stream_seq;
4607 				}
4608 				if (chk->data) {
4609 					sctp_m_freem(chk->data);
4610 					chk->data = NULL;
4611 				}
4612 				sctp_free_remote_addr(chk->whoTo);
4613 				SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4614 				sctppcbinfo.ipi_count_chunk--;
4615 				if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4616 					panic("Chunk count is negative");
4617 				}
4618 				sctppcbinfo.ipi_gencnt_chunk++;
4619 			} else {
4620 				/*
4621 				 * Ok we have gone beyond the end of the
4622 				 * fwd-tsn's mark. Some checks...
4623 				 */
4624 				if ((asoc->fragmented_delivery_inprogress) &&
4625 				    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4626 					/* Special case PD-API is up and what we fwd-tsn'
4627 					 * over includes one that had the LAST_FRAG. We
4628 					 * no longer need to do the PD-API.
4629 					 */
4630 					asoc->fragmented_delivery_inprogress = 0;
4631 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4632 					    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4633 
4634 				}
4635 				break;
4636 			}
4637 			chk = at;
4638 		}
4639 	}
4640 	if (asoc->fragmented_delivery_inprogress) {
4641 		/*
4642 		 * Ok we removed cnt_gone chunks in the PD-API queue that
4643 		 * were being delivered. So now we must turn off the
4644 		 * flag.
4645 		 */
4646 		sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4647 		    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4648 		asoc->fragmented_delivery_inprogress = 0;
4649 	}
4650 	/*************************************************************/
4651 	/* 3. Update the PR-stream re-ordering queues                */
4652 	/*************************************************************/
4653 	stseq = (struct sctp_strseq *)((vaddr_t)fwd + sizeof(*fwd));
4654 	fwd_sz -= sizeof(*fwd);
4655 	{
4656 		/* New method. */
4657 		int num_str;
4658 		num_str = fwd_sz/sizeof(struct sctp_strseq);
4659 #ifdef SCTP_DEBUG
4660 		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4661 			printf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4662 			    num_str);
4663 		}
4664 #endif
4665 		for (i = 0; i < num_str; i++) {
4666 			u_int16_t st;
4667 #if 0
4668 			unsigned char *xx;
4669 			/* Convert */
4670 			xx = (unsigned char *)&stseq[i];
4671 #endif
4672 			st = ntohs(stseq[i].stream);
4673 			stseq[i].stream = st;
4674 			st = ntohs(stseq[i].sequence);
4675 			stseq[i].sequence = st;
4676 			/* now process */
4677 			if (stseq[i].stream > asoc->streamincnt) {
4678 #ifdef SCTP_DEBUG
4679 				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4680 					printf("Bogus stream number %d "
4681 					    "streamincnt is %d\n",
4682 					    stseq[i].stream, asoc->streamincnt);
4683 				}
4684 #endif
4685 				/*
4686 				 * It is arguable if we should continue. Since
4687 				 * the peer sent bogus stream info we may be in
4688 				 * deep trouble..
4689  				 * a return may be a better choice?
4690  				 */
4691  				continue;
4692  			}
4693 			strm = &asoc->strmin[stseq[i].stream];
4694 			if (compare_with_wrap(stseq[i].sequence,
4695 			    strm->last_sequence_delivered, MAX_SEQ)) {
4696 				/* Update the sequence number */
4697 				strm->last_sequence_delivered =
4698 				    stseq[i].sequence;
4699 			}
4700 			/* now kick the stream the new way */
4701 			sctp_kick_prsctp_reorder_queue(stcb, strm);
4702 		}
4703 	}
4704 }
4705