xref: /freebsd-src/sys/netinet/sctp_indata.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static uint32_t
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		KASSERT(asoc->cnt_on_reasm_queue == 0, ("cnt_on_reasm_queue is %u", asoc->cnt_on_reasm_queue));
96 		KASSERT(asoc->cnt_on_all_streams == 0, ("cnt_on_all_streams is %u", asoc->cnt_on_all_streams));
97 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 		return (calc);
99 	}
100 	/* get actual space */
101 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
102 	/*
103 	 * take out what has NOT been put on socket queue and we yet hold
104 	 * for putting up.
105 	 */
106 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
107 	    asoc->cnt_on_reasm_queue * MSIZE));
108 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
109 	    asoc->cnt_on_all_streams * MSIZE));
110 	if (calc == 0) {
111 		/* out of space */
112 		return (calc);
113 	}
114 	/* what is the overhead of all these rwnd's */
115 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
116 	/*
117 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
118 	 * even it is 0. SWS engaged
119 	 */
120 	if (calc < stcb->asoc.my_rwnd_control_len) {
121 		calc = 1;
122 	}
123 	return (calc);
124 }
125 
126 
127 
128 /*
129  * Build out our readq entry based on the incoming packet.
130  */
131 struct sctp_queued_to_read *
132 sctp_build_readq_entry(struct sctp_tcb *stcb,
133     struct sctp_nets *net,
134     uint32_t tsn, uint32_t ppid,
135     uint32_t context, uint16_t sid,
136     uint32_t mid, uint8_t flags,
137     struct mbuf *dm)
138 {
139 	struct sctp_queued_to_read *read_queue_e = NULL;
140 
141 	sctp_alloc_a_readq(stcb, read_queue_e);
142 	if (read_queue_e == NULL) {
143 		goto failed_build;
144 	}
145 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
146 	read_queue_e->sinfo_stream = sid;
147 	read_queue_e->sinfo_flags = (flags << 8);
148 	read_queue_e->sinfo_ppid = ppid;
149 	read_queue_e->sinfo_context = context;
150 	read_queue_e->sinfo_tsn = tsn;
151 	read_queue_e->sinfo_cumtsn = tsn;
152 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
153 	read_queue_e->mid = mid;
154 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
155 	TAILQ_INIT(&read_queue_e->reasm);
156 	read_queue_e->whoFrom = net;
157 	atomic_add_int(&net->ref_count, 1);
158 	read_queue_e->data = dm;
159 	read_queue_e->stcb = stcb;
160 	read_queue_e->port_from = stcb->rport;
161 failed_build:
162 	return (read_queue_e);
163 }
164 
165 struct mbuf *
166 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
167 {
168 	struct sctp_extrcvinfo *seinfo;
169 	struct sctp_sndrcvinfo *outinfo;
170 	struct sctp_rcvinfo *rcvinfo;
171 	struct sctp_nxtinfo *nxtinfo;
172 	struct cmsghdr *cmh;
173 	struct mbuf *ret;
174 	int len;
175 	int use_extended;
176 	int provide_nxt;
177 
178 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
179 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
180 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
181 		/* user does not want any ancillary data */
182 		return (NULL);
183 	}
184 	len = 0;
185 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
186 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
187 	}
188 	seinfo = (struct sctp_extrcvinfo *)sinfo;
189 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
190 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
191 		provide_nxt = 1;
192 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
193 	} else {
194 		provide_nxt = 0;
195 	}
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
197 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
198 			use_extended = 1;
199 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 		} else {
201 			use_extended = 0;
202 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
203 		}
204 	} else {
205 		use_extended = 0;
206 	}
207 
208 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
209 	if (ret == NULL) {
210 		/* No space */
211 		return (ret);
212 	}
213 	SCTP_BUF_LEN(ret) = 0;
214 
215 	/* We need a CMSG header followed by the struct */
216 	cmh = mtod(ret, struct cmsghdr *);
217 	/*
218 	 * Make sure that there is no un-initialized padding between the
219 	 * cmsg header and cmsg data and after the cmsg data.
220 	 */
221 	memset(cmh, 0, len);
222 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
223 		cmh->cmsg_level = IPPROTO_SCTP;
224 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
225 		cmh->cmsg_type = SCTP_RCVINFO;
226 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
227 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
228 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
229 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
230 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
231 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
232 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
233 		rcvinfo->rcv_context = sinfo->sinfo_context;
234 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
235 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
236 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 	}
238 	if (provide_nxt) {
239 		cmh->cmsg_level = IPPROTO_SCTP;
240 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
241 		cmh->cmsg_type = SCTP_NXTINFO;
242 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
243 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
244 		nxtinfo->nxt_flags = 0;
245 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
246 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
247 		}
248 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
249 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
250 		}
251 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
252 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
253 		}
254 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
255 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
256 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
257 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
258 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
259 	}
260 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
261 		cmh->cmsg_level = IPPROTO_SCTP;
262 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
263 		if (use_extended) {
264 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 			cmh->cmsg_type = SCTP_EXTRCV;
266 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
267 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
268 		} else {
269 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
270 			cmh->cmsg_type = SCTP_SNDRCV;
271 			*outinfo = *sinfo;
272 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
273 		}
274 	}
275 	return (ret);
276 }
277 
278 
279 static void
280 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
281 {
282 	uint32_t gap, i, cumackp1;
283 	int fnd = 0;
284 	int in_r = 0, in_nr = 0;
285 
286 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 		return;
288 	}
289 	cumackp1 = asoc->cumulative_tsn + 1;
290 	if (SCTP_TSN_GT(cumackp1, tsn)) {
291 		/*
292 		 * this tsn is behind the cum ack and thus we don't need to
293 		 * worry about it being moved from one to the other.
294 		 */
295 		return;
296 	}
297 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
298 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
299 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
300 	if ((in_r == 0) && (in_nr == 0)) {
301 #ifdef INVARIANTS
302 		panic("Things are really messed up now");
303 #else
304 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
305 		sctp_print_mapping_array(asoc);
306 #endif
307 	}
308 	if (in_nr == 0)
309 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if (in_r)
311 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
312 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
313 		asoc->highest_tsn_inside_nr_map = tsn;
314 	}
315 	if (tsn == asoc->highest_tsn_inside_map) {
316 		/* We must back down to see what the new highest is */
317 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
318 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
319 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
320 				asoc->highest_tsn_inside_map = i;
321 				fnd = 1;
322 				break;
323 			}
324 		}
325 		if (!fnd) {
326 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
327 		}
328 	}
329 }
330 
331 static int
332 sctp_place_control_in_stream(struct sctp_stream_in *strm,
333     struct sctp_association *asoc,
334     struct sctp_queued_to_read *control)
335 {
336 	struct sctp_queued_to_read *at;
337 	struct sctp_readhead *q;
338 	uint8_t flags, unordered;
339 
340 	flags = (control->sinfo_flags >> 8);
341 	unordered = flags & SCTP_DATA_UNORDERED;
342 	if (unordered) {
343 		q = &strm->uno_inqueue;
344 		if (asoc->idata_supported == 0) {
345 			if (!TAILQ_EMPTY(q)) {
346 				/*
347 				 * Only one stream can be here in old style
348 				 * -- abort
349 				 */
350 				return (-1);
351 			}
352 			TAILQ_INSERT_TAIL(q, control, next_instrm);
353 			control->on_strm_q = SCTP_ON_UNORDERED;
354 			return (0);
355 		}
356 	} else {
357 		q = &strm->inqueue;
358 	}
359 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
360 		control->end_added = 1;
361 		control->first_frag_seen = 1;
362 		control->last_frag_seen = 1;
363 	}
364 	if (TAILQ_EMPTY(q)) {
365 		/* Empty queue */
366 		TAILQ_INSERT_HEAD(q, control, next_instrm);
367 		if (unordered) {
368 			control->on_strm_q = SCTP_ON_UNORDERED;
369 		} else {
370 			control->on_strm_q = SCTP_ON_ORDERED;
371 		}
372 		return (0);
373 	} else {
374 		TAILQ_FOREACH(at, q, next_instrm) {
375 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
376 				/*
377 				 * one in queue is bigger than the new one,
378 				 * insert before this one
379 				 */
380 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
381 				if (unordered) {
382 					control->on_strm_q = SCTP_ON_UNORDERED;
383 				} else {
384 					control->on_strm_q = SCTP_ON_ORDERED;
385 				}
386 				break;
387 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
388 				/*
389 				 * Gak, He sent me a duplicate msg id
390 				 * number?? return -1 to abort.
391 				 */
392 				return (-1);
393 			} else {
394 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
395 					/*
396 					 * We are at the end, insert it
397 					 * after this one
398 					 */
399 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
400 						sctp_log_strm_del(control, at,
401 						    SCTP_STR_LOG_FROM_INSERT_TL);
402 					}
403 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
404 					if (unordered) {
405 						control->on_strm_q = SCTP_ON_UNORDERED;
406 					} else {
407 						control->on_strm_q = SCTP_ON_ORDERED;
408 					}
409 					break;
410 				}
411 			}
412 		}
413 	}
414 	return (0);
415 }
416 
417 static void
418 sctp_abort_in_reasm(struct sctp_tcb *stcb,
419     struct sctp_queued_to_read *control,
420     struct sctp_tmit_chunk *chk,
421     int *abort_flag, int opspot)
422 {
423 	char msg[SCTP_DIAG_INFO_LEN];
424 	struct mbuf *oper;
425 
426 	if (stcb->asoc.idata_supported) {
427 		snprintf(msg, sizeof(msg),
428 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
429 		    opspot,
430 		    control->fsn_included,
431 		    chk->rec.data.tsn,
432 		    chk->rec.data.sid,
433 		    chk->rec.data.fsn, chk->rec.data.mid);
434 	} else {
435 		snprintf(msg, sizeof(msg),
436 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
437 		    opspot,
438 		    control->fsn_included,
439 		    chk->rec.data.tsn,
440 		    chk->rec.data.sid,
441 		    chk->rec.data.fsn,
442 		    (uint16_t)chk->rec.data.mid);
443 	}
444 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
445 	sctp_m_freem(chk->data);
446 	chk->data = NULL;
447 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
448 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
449 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
450 	*abort_flag = 1;
451 }
452 
453 static void
454 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
455 {
456 	/*
457 	 * The control could not be placed and must be cleaned.
458 	 */
459 	struct sctp_tmit_chunk *chk, *nchk;
460 
461 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
462 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
463 		if (chk->data)
464 			sctp_m_freem(chk->data);
465 		chk->data = NULL;
466 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
467 	}
468 	sctp_free_a_readq(stcb, control);
469 }
470 
471 /*
472  * Queue the chunk either right into the socket buffer if it is the next one
473  * to go OR put it in the correct place in the delivery queue.  If we do
474  * append to the so_buf, keep doing so until we are out of order as
475  * long as the control's entered are non-fragmented.
476  */
477 static void
478 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
479     struct sctp_association *asoc,
480     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 {
482 	/*
483 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 	 * all the data in one stream this could happen quite rapidly. One
485 	 * could use the TSN to keep track of things, but this scheme breaks
486 	 * down in the other type of stream usage that could occur. Send a
487 	 * single msg to stream 0, send 4Billion messages to stream 1, now
488 	 * send a message to stream 0. You have a situation where the TSN
489 	 * has wrapped but not in the stream. Is this worth worrying about
490 	 * or should we just change our queue sort at the bottom to be by
491 	 * TSN.
492 	 *
493 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
494 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 	 * assignment this could happen... and I don't see how this would be
496 	 * a violation. So for now I am undecided an will leave the sort by
497 	 * SSN alone. Maybe a hybred approach is the answer
498 	 *
499 	 */
500 	struct sctp_queued_to_read *at;
501 	int queue_needed;
502 	uint32_t nxt_todel;
503 	struct mbuf *op_err;
504 	struct sctp_stream_in *strm;
505 	char msg[SCTP_DIAG_INFO_LEN];
506 
507 	strm = &asoc->strmin[control->sinfo_stream];
508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
509 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
510 	}
511 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
512 		/* The incoming sseq is behind where we last delivered? */
513 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
514 		    strm->last_mid_delivered, control->mid);
515 		/*
516 		 * throw it in the stream so it gets cleaned up in
517 		 * association destruction
518 		 */
519 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
520 		if (asoc->idata_supported) {
521 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
522 			    strm->last_mid_delivered, control->sinfo_tsn,
523 			    control->sinfo_stream, control->mid);
524 		} else {
525 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
526 			    (uint16_t)strm->last_mid_delivered,
527 			    control->sinfo_tsn,
528 			    control->sinfo_stream,
529 			    (uint16_t)control->mid);
530 		}
531 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
532 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
533 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
534 		*abort_flag = 1;
535 		return;
536 
537 	}
538 	queue_needed = 1;
539 	asoc->size_on_all_streams += control->length;
540 	sctp_ucount_incr(asoc->cnt_on_all_streams);
541 	nxt_todel = strm->last_mid_delivered + 1;
542 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
543 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
544 		struct socket *so;
545 
546 		so = SCTP_INP_SO(stcb->sctp_ep);
547 		atomic_add_int(&stcb->asoc.refcnt, 1);
548 		SCTP_TCB_UNLOCK(stcb);
549 		SCTP_SOCKET_LOCK(so, 1);
550 		SCTP_TCB_LOCK(stcb);
551 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
552 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
553 			SCTP_SOCKET_UNLOCK(so, 1);
554 			return;
555 		}
556 #endif
557 		/* can be delivered right away? */
558 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
559 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
560 		}
561 		/* EY it wont be queued if it could be delivered directly */
562 		queue_needed = 0;
563 		if (asoc->size_on_all_streams >= control->length) {
564 			asoc->size_on_all_streams -= control->length;
565 		} else {
566 #ifdef INVARIANTS
567 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
568 #else
569 			asoc->size_on_all_streams = 0;
570 #endif
571 		}
572 		sctp_ucount_decr(asoc->cnt_on_all_streams);
573 		strm->last_mid_delivered++;
574 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
575 		sctp_add_to_readq(stcb->sctp_ep, stcb,
576 		    control,
577 		    &stcb->sctp_socket->so_rcv, 1,
578 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
579 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
580 			/* all delivered */
581 			nxt_todel = strm->last_mid_delivered + 1;
582 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
583 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
584 				if (control->on_strm_q == SCTP_ON_ORDERED) {
585 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
586 					if (asoc->size_on_all_streams >= control->length) {
587 						asoc->size_on_all_streams -= control->length;
588 					} else {
589 #ifdef INVARIANTS
590 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
591 #else
592 						asoc->size_on_all_streams = 0;
593 #endif
594 					}
595 					sctp_ucount_decr(asoc->cnt_on_all_streams);
596 #ifdef INVARIANTS
597 				} else {
598 					panic("Huh control: %p is on_strm_q: %d",
599 					    control, control->on_strm_q);
600 #endif
601 				}
602 				control->on_strm_q = 0;
603 				strm->last_mid_delivered++;
604 				/*
605 				 * We ignore the return of deliver_data here
606 				 * since we always can hold the chunk on the
607 				 * d-queue. And we have a finite number that
608 				 * can be delivered from the strq.
609 				 */
610 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
611 					sctp_log_strm_del(control, NULL,
612 					    SCTP_STR_LOG_FROM_IMMED_DEL);
613 				}
614 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 				sctp_add_to_readq(stcb->sctp_ep, stcb,
616 				    control,
617 				    &stcb->sctp_socket->so_rcv, 1,
618 				    SCTP_READ_LOCK_NOT_HELD,
619 				    SCTP_SO_LOCKED);
620 				continue;
621 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
622 				*need_reasm = 1;
623 			}
624 			break;
625 		}
626 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
627 		SCTP_SOCKET_UNLOCK(so, 1);
628 #endif
629 	}
630 	if (queue_needed) {
631 		/*
632 		 * Ok, we did not deliver this guy, find the correct place
633 		 * to put it on the queue.
634 		 */
635 		if (sctp_place_control_in_stream(strm, asoc, control)) {
636 			snprintf(msg, sizeof(msg),
637 			    "Queue to str MID: %u duplicate",
638 			    control->mid);
639 			sctp_clean_up_control(stcb, control);
640 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
641 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
642 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
643 			*abort_flag = 1;
644 		}
645 	}
646 }
647 
648 
649 static void
650 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
651 {
652 	struct mbuf *m, *prev = NULL;
653 	struct sctp_tcb *stcb;
654 
655 	stcb = control->stcb;
656 	control->held_length = 0;
657 	control->length = 0;
658 	m = control->data;
659 	while (m) {
660 		if (SCTP_BUF_LEN(m) == 0) {
661 			/* Skip mbufs with NO length */
662 			if (prev == NULL) {
663 				/* First one */
664 				control->data = sctp_m_free(m);
665 				m = control->data;
666 			} else {
667 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
668 				m = SCTP_BUF_NEXT(prev);
669 			}
670 			if (m == NULL) {
671 				control->tail_mbuf = prev;
672 			}
673 			continue;
674 		}
675 		prev = m;
676 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
677 		if (control->on_read_q) {
678 			/*
679 			 * On read queue so we must increment the SB stuff,
680 			 * we assume caller has done any locks of SB.
681 			 */
682 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
683 		}
684 		m = SCTP_BUF_NEXT(m);
685 	}
686 	if (prev) {
687 		control->tail_mbuf = prev;
688 	}
689 }
690 
691 static void
692 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
693 {
694 	struct mbuf *prev = NULL;
695 	struct sctp_tcb *stcb;
696 
697 	stcb = control->stcb;
698 	if (stcb == NULL) {
699 #ifdef INVARIANTS
700 		panic("Control broken");
701 #else
702 		return;
703 #endif
704 	}
705 	if (control->tail_mbuf == NULL) {
706 		/* TSNH */
707 		control->data = m;
708 		sctp_setup_tail_pointer(control);
709 		return;
710 	}
711 	control->tail_mbuf->m_next = m;
712 	while (m) {
713 		if (SCTP_BUF_LEN(m) == 0) {
714 			/* Skip mbufs with NO length */
715 			if (prev == NULL) {
716 				/* First one */
717 				control->tail_mbuf->m_next = sctp_m_free(m);
718 				m = control->tail_mbuf->m_next;
719 			} else {
720 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
721 				m = SCTP_BUF_NEXT(prev);
722 			}
723 			if (m == NULL) {
724 				control->tail_mbuf = prev;
725 			}
726 			continue;
727 		}
728 		prev = m;
729 		if (control->on_read_q) {
730 			/*
731 			 * On read queue so we must increment the SB stuff,
732 			 * we assume caller has done any locks of SB.
733 			 */
734 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
735 		}
736 		*added += SCTP_BUF_LEN(m);
737 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
738 		m = SCTP_BUF_NEXT(m);
739 	}
740 	if (prev) {
741 		control->tail_mbuf = prev;
742 	}
743 }
744 
745 static void
746 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
747 {
748 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
749 	nc->sinfo_stream = control->sinfo_stream;
750 	nc->mid = control->mid;
751 	TAILQ_INIT(&nc->reasm);
752 	nc->top_fsn = control->top_fsn;
753 	nc->mid = control->mid;
754 	nc->sinfo_flags = control->sinfo_flags;
755 	nc->sinfo_ppid = control->sinfo_ppid;
756 	nc->sinfo_context = control->sinfo_context;
757 	nc->fsn_included = 0xffffffff;
758 	nc->sinfo_tsn = control->sinfo_tsn;
759 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
760 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
761 	nc->whoFrom = control->whoFrom;
762 	atomic_add_int(&nc->whoFrom->ref_count, 1);
763 	nc->stcb = control->stcb;
764 	nc->port_from = control->port_from;
765 }
766 
767 static void
768 sctp_reset_a_control(struct sctp_queued_to_read *control,
769     struct sctp_inpcb *inp, uint32_t tsn)
770 {
771 	control->fsn_included = tsn;
772 	if (control->on_read_q) {
773 		/*
774 		 * We have to purge it from there, hopefully this will work
775 		 * :-)
776 		 */
777 		TAILQ_REMOVE(&inp->read_queue, control, next);
778 		control->on_read_q = 0;
779 	}
780 }
781 
782 static int
783 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
784     struct sctp_association *asoc,
785     struct sctp_stream_in *strm,
786     struct sctp_queued_to_read *control,
787     uint32_t pd_point,
788     int inp_read_lock_held)
789 {
790 	/*
791 	 * Special handling for the old un-ordered data chunk. All the
792 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
793 	 * to see if we have it all. If you return one, no other control
794 	 * entries on the un-ordered queue will be looked at. In theory
795 	 * there should be no others entries in reality, unless the guy is
796 	 * sending both unordered NDATA and unordered DATA...
797 	 */
798 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
799 	uint32_t fsn;
800 	struct sctp_queued_to_read *nc;
801 	int cnt_added;
802 
803 	if (control->first_frag_seen == 0) {
804 		/* Nothing we can do, we have not seen the first piece yet */
805 		return (1);
806 	}
807 	/* Collapse any we can */
808 	cnt_added = 0;
809 restart:
810 	fsn = control->fsn_included + 1;
811 	/* Now what can we add? */
812 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
813 		if (chk->rec.data.fsn == fsn) {
814 			/* Ok lets add it */
815 			sctp_alloc_a_readq(stcb, nc);
816 			if (nc == NULL) {
817 				break;
818 			}
819 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
820 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
821 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
822 			fsn++;
823 			cnt_added++;
824 			chk = NULL;
825 			if (control->end_added) {
826 				/* We are done */
827 				if (!TAILQ_EMPTY(&control->reasm)) {
828 					/*
829 					 * Ok we have to move anything left
830 					 * on the control queue to a new
831 					 * control.
832 					 */
833 					sctp_build_readq_entry_from_ctl(nc, control);
834 					tchk = TAILQ_FIRST(&control->reasm);
835 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
836 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
837 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
838 							asoc->size_on_reasm_queue -= tchk->send_size;
839 						} else {
840 #ifdef INVARIANTS
841 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
842 #else
843 							asoc->size_on_reasm_queue = 0;
844 #endif
845 						}
846 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
847 						nc->first_frag_seen = 1;
848 						nc->fsn_included = tchk->rec.data.fsn;
849 						nc->data = tchk->data;
850 						nc->sinfo_ppid = tchk->rec.data.ppid;
851 						nc->sinfo_tsn = tchk->rec.data.tsn;
852 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
853 						tchk->data = NULL;
854 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
855 						sctp_setup_tail_pointer(nc);
856 						tchk = TAILQ_FIRST(&control->reasm);
857 					}
858 					/* Spin the rest onto the queue */
859 					while (tchk) {
860 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
861 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
862 						tchk = TAILQ_FIRST(&control->reasm);
863 					}
864 					/*
865 					 * Now lets add it to the queue
866 					 * after removing control
867 					 */
868 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
869 					nc->on_strm_q = SCTP_ON_UNORDERED;
870 					if (control->on_strm_q) {
871 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
872 						control->on_strm_q = 0;
873 					}
874 				}
875 				if (control->pdapi_started) {
876 					strm->pd_api_started = 0;
877 					control->pdapi_started = 0;
878 				}
879 				if (control->on_strm_q) {
880 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
881 					control->on_strm_q = 0;
882 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
883 				}
884 				if (control->on_read_q == 0) {
885 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
886 					    &stcb->sctp_socket->so_rcv, control->end_added,
887 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
888 				}
889 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
890 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
891 					/*
892 					 * Switch to the new guy and
893 					 * continue
894 					 */
895 					control = nc;
896 					goto restart;
897 				} else {
898 					if (nc->on_strm_q == 0) {
899 						sctp_free_a_readq(stcb, nc);
900 					}
901 				}
902 				return (1);
903 			} else {
904 				sctp_free_a_readq(stcb, nc);
905 			}
906 		} else {
907 			/* Can't add more */
908 			break;
909 		}
910 	}
911 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
912 		strm->pd_api_started = 1;
913 		control->pdapi_started = 1;
914 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
915 		    &stcb->sctp_socket->so_rcv, control->end_added,
916 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
917 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
918 		return (0);
919 	} else {
920 		return (1);
921 	}
922 }
923 
924 static void
925 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
926     struct sctp_association *asoc,
927     struct sctp_queued_to_read *control,
928     struct sctp_tmit_chunk *chk,
929     int *abort_flag)
930 {
931 	struct sctp_tmit_chunk *at;
932 	int inserted;
933 
934 	/*
935 	 * Here we need to place the chunk into the control structure sorted
936 	 * in the correct order.
937 	 */
938 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
939 		/* Its the very first one. */
940 		SCTPDBG(SCTP_DEBUG_XXX,
941 		    "chunk is a first fsn: %u becomes fsn_included\n",
942 		    chk->rec.data.fsn);
943 		if (control->first_frag_seen) {
944 			/*
945 			 * In old un-ordered we can reassembly on one
946 			 * control multiple messages. As long as the next
947 			 * FIRST is greater then the old first (TSN i.e. FSN
948 			 * wise)
949 			 */
950 			struct mbuf *tdata;
951 			uint32_t tmp;
952 
953 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
954 				/*
955 				 * Easy way the start of a new guy beyond
956 				 * the lowest
957 				 */
958 				goto place_chunk;
959 			}
960 			if ((chk->rec.data.fsn == control->fsn_included) ||
961 			    (control->pdapi_started)) {
962 				/*
963 				 * Ok this should not happen, if it does we
964 				 * started the pd-api on the higher TSN
965 				 * (since the equals part is a TSN failure
966 				 * it must be that).
967 				 *
968 				 * We are completly hosed in that case since
969 				 * I have no way to recover. This really
970 				 * will only happen if we can get more TSN's
971 				 * higher before the pd-api-point.
972 				 */
973 				sctp_abort_in_reasm(stcb, control, chk,
974 				    abort_flag,
975 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
976 
977 				return;
978 			}
979 			/*
980 			 * Ok we have two firsts and the one we just got is
981 			 * smaller than the one we previously placed.. yuck!
982 			 * We must swap them out.
983 			 */
984 			/* swap the mbufs */
985 			tdata = control->data;
986 			control->data = chk->data;
987 			chk->data = tdata;
988 			/* Save the lengths */
989 			chk->send_size = control->length;
990 			/* Recompute length of control and tail pointer */
991 			sctp_setup_tail_pointer(control);
992 			/* Fix the FSN included */
993 			tmp = control->fsn_included;
994 			control->fsn_included = chk->rec.data.fsn;
995 			chk->rec.data.fsn = tmp;
996 			/* Fix the TSN included */
997 			tmp = control->sinfo_tsn;
998 			control->sinfo_tsn = chk->rec.data.tsn;
999 			chk->rec.data.tsn = tmp;
1000 			/* Fix the PPID included */
1001 			tmp = control->sinfo_ppid;
1002 			control->sinfo_ppid = chk->rec.data.ppid;
1003 			chk->rec.data.ppid = tmp;
1004 			/* Fix tail pointer */
1005 			goto place_chunk;
1006 		}
1007 		control->first_frag_seen = 1;
1008 		control->fsn_included = chk->rec.data.fsn;
1009 		control->top_fsn = chk->rec.data.fsn;
1010 		control->sinfo_tsn = chk->rec.data.tsn;
1011 		control->sinfo_ppid = chk->rec.data.ppid;
1012 		control->data = chk->data;
1013 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1014 		chk->data = NULL;
1015 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1016 		sctp_setup_tail_pointer(control);
1017 		return;
1018 	}
1019 place_chunk:
1020 	inserted = 0;
1021 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1022 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1023 			/*
1024 			 * This one in queue is bigger than the new one,
1025 			 * insert the new one before at.
1026 			 */
1027 			asoc->size_on_reasm_queue += chk->send_size;
1028 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1029 			inserted = 1;
1030 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1031 			break;
1032 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1033 			/*
1034 			 * They sent a duplicate fsn number. This really
1035 			 * should not happen since the FSN is a TSN and it
1036 			 * should have been dropped earlier.
1037 			 */
1038 			sctp_abort_in_reasm(stcb, control, chk,
1039 			    abort_flag,
1040 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1041 			return;
1042 		}
1043 	}
1044 	if (inserted == 0) {
1045 		/* Its at the end */
1046 		asoc->size_on_reasm_queue += chk->send_size;
1047 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 		control->top_fsn = chk->rec.data.fsn;
1049 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1050 	}
1051 }
1052 
1053 static int
1054 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1055     struct sctp_stream_in *strm, int inp_read_lock_held)
1056 {
1057 	/*
1058 	 * Given a stream, strm, see if any of the SSN's on it that are
1059 	 * fragmented are ready to deliver. If so go ahead and place them on
1060 	 * the read queue. In so placing if we have hit the end, then we
1061 	 * need to remove them from the stream's queue.
1062 	 */
1063 	struct sctp_queued_to_read *control, *nctl = NULL;
1064 	uint32_t next_to_del;
1065 	uint32_t pd_point;
1066 	int ret = 0;
1067 
1068 	if (stcb->sctp_socket) {
1069 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1070 		    stcb->sctp_ep->partial_delivery_point);
1071 	} else {
1072 		pd_point = stcb->sctp_ep->partial_delivery_point;
1073 	}
1074 	control = TAILQ_FIRST(&strm->uno_inqueue);
1075 
1076 	if ((control != NULL) &&
1077 	    (asoc->idata_supported == 0)) {
1078 		/* Special handling needed for "old" data format */
1079 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1080 			goto done_un;
1081 		}
1082 	}
1083 	if (strm->pd_api_started) {
1084 		/* Can't add more */
1085 		return (0);
1086 	}
1087 	while (control) {
1088 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1089 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1090 		nctl = TAILQ_NEXT(control, next_instrm);
1091 		if (control->end_added) {
1092 			/* We just put the last bit on */
1093 			if (control->on_strm_q) {
1094 #ifdef INVARIANTS
1095 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1096 					panic("Huh control: %p on_q: %d -- not unordered?",
1097 					    control, control->on_strm_q);
1098 				}
1099 #endif
1100 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1101 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1102 				control->on_strm_q = 0;
1103 			}
1104 			if (control->on_read_q == 0) {
1105 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1106 				    control,
1107 				    &stcb->sctp_socket->so_rcv, control->end_added,
1108 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1109 			}
1110 		} else {
1111 			/* Can we do a PD-API for this un-ordered guy? */
1112 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1113 				strm->pd_api_started = 1;
1114 				control->pdapi_started = 1;
1115 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1116 				    control,
1117 				    &stcb->sctp_socket->so_rcv, control->end_added,
1118 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1119 
1120 				break;
1121 			}
1122 		}
1123 		control = nctl;
1124 	}
1125 done_un:
1126 	control = TAILQ_FIRST(&strm->inqueue);
1127 	if (strm->pd_api_started) {
1128 		/* Can't add more */
1129 		return (0);
1130 	}
1131 	if (control == NULL) {
1132 		return (ret);
1133 	}
1134 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1135 		/*
1136 		 * Ok the guy at the top was being partially delivered
1137 		 * completed, so we remove it. Note the pd_api flag was
1138 		 * taken off when the chunk was merged on in
1139 		 * sctp_queue_data_for_reasm below.
1140 		 */
1141 		nctl = TAILQ_NEXT(control, next_instrm);
1142 		SCTPDBG(SCTP_DEBUG_XXX,
1143 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1144 		    control, control->end_added, control->mid,
1145 		    control->top_fsn, control->fsn_included,
1146 		    strm->last_mid_delivered);
1147 		if (control->end_added) {
1148 			if (control->on_strm_q) {
1149 #ifdef INVARIANTS
1150 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1151 					panic("Huh control: %p on_q: %d -- not ordered?",
1152 					    control, control->on_strm_q);
1153 				}
1154 #endif
1155 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1156 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1157 				if (asoc->size_on_all_streams >= control->length) {
1158 					asoc->size_on_all_streams -= control->length;
1159 				} else {
1160 #ifdef INVARIANTS
1161 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1162 #else
1163 					asoc->size_on_all_streams = 0;
1164 #endif
1165 				}
1166 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1167 				control->on_strm_q = 0;
1168 			}
1169 			if (strm->pd_api_started && control->pdapi_started) {
1170 				control->pdapi_started = 0;
1171 				strm->pd_api_started = 0;
1172 			}
1173 			if (control->on_read_q == 0) {
1174 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1175 				    control,
1176 				    &stcb->sctp_socket->so_rcv, control->end_added,
1177 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1178 			}
1179 			control = nctl;
1180 		}
1181 	}
1182 	if (strm->pd_api_started) {
1183 		/*
1184 		 * Can't add more must have gotten an un-ordered above being
1185 		 * partially delivered.
1186 		 */
1187 		return (0);
1188 	}
1189 deliver_more:
1190 	next_to_del = strm->last_mid_delivered + 1;
1191 	if (control) {
1192 		SCTPDBG(SCTP_DEBUG_XXX,
1193 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1194 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1195 		    next_to_del);
1196 		nctl = TAILQ_NEXT(control, next_instrm);
1197 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1198 		    (control->first_frag_seen)) {
1199 			int done;
1200 
1201 			/* Ok we can deliver it onto the stream. */
1202 			if (control->end_added) {
1203 				/* We are done with it afterwards */
1204 				if (control->on_strm_q) {
1205 #ifdef INVARIANTS
1206 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1207 						panic("Huh control: %p on_q: %d -- not ordered?",
1208 						    control, control->on_strm_q);
1209 					}
1210 #endif
1211 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1212 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1213 					if (asoc->size_on_all_streams >= control->length) {
1214 						asoc->size_on_all_streams -= control->length;
1215 					} else {
1216 #ifdef INVARIANTS
1217 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1218 #else
1219 						asoc->size_on_all_streams = 0;
1220 #endif
1221 					}
1222 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1223 					control->on_strm_q = 0;
1224 				}
1225 				ret++;
1226 			}
1227 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1228 				/*
1229 				 * A singleton now slipping through - mark
1230 				 * it non-revokable too
1231 				 */
1232 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1233 			} else if (control->end_added == 0) {
1234 				/*
1235 				 * Check if we can defer adding until its
1236 				 * all there
1237 				 */
1238 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1239 					/*
1240 					 * Don't need it or cannot add more
1241 					 * (one being delivered that way)
1242 					 */
1243 					goto out;
1244 				}
1245 			}
1246 			done = (control->end_added) && (control->last_frag_seen);
1247 			if (control->on_read_q == 0) {
1248 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1249 				    control,
1250 				    &stcb->sctp_socket->so_rcv, control->end_added,
1251 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1252 			}
1253 			strm->last_mid_delivered = next_to_del;
1254 			if (done) {
1255 				control = nctl;
1256 				goto deliver_more;
1257 			} else {
1258 				/* We are now doing PD API */
1259 				strm->pd_api_started = 1;
1260 				control->pdapi_started = 1;
1261 			}
1262 		}
1263 	}
1264 out:
1265 	return (ret);
1266 }
1267 
1268 
1269 uint32_t
1270 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1271     struct sctp_stream_in *strm,
1272     struct sctp_tcb *stcb, struct sctp_association *asoc,
1273     struct sctp_tmit_chunk *chk, int hold_rlock)
1274 {
1275 	/*
1276 	 * Given a control and a chunk, merge the data from the chk onto the
1277 	 * control and free up the chunk resources.
1278 	 */
1279 	uint32_t added = 0;
1280 	int i_locked = 0;
1281 
1282 	if (control->on_read_q && (hold_rlock == 0)) {
1283 		/*
1284 		 * Its being pd-api'd so we must do some locks.
1285 		 */
1286 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1287 		i_locked = 1;
1288 	}
1289 	if (control->data == NULL) {
1290 		control->data = chk->data;
1291 		sctp_setup_tail_pointer(control);
1292 	} else {
1293 		sctp_add_to_tail_pointer(control, chk->data, &added);
1294 	}
1295 	control->fsn_included = chk->rec.data.fsn;
1296 	asoc->size_on_reasm_queue -= chk->send_size;
1297 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1298 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1299 	chk->data = NULL;
1300 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1301 		control->first_frag_seen = 1;
1302 		control->sinfo_tsn = chk->rec.data.tsn;
1303 		control->sinfo_ppid = chk->rec.data.ppid;
1304 	}
1305 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1306 		/* Its complete */
1307 		if ((control->on_strm_q) && (control->on_read_q)) {
1308 			if (control->pdapi_started) {
1309 				control->pdapi_started = 0;
1310 				strm->pd_api_started = 0;
1311 			}
1312 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1313 				/* Unordered */
1314 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1315 				control->on_strm_q = 0;
1316 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1317 				/* Ordered */
1318 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1319 				if (asoc->size_on_all_streams >= control->length) {
1320 					asoc->size_on_all_streams -= control->length;
1321 				} else {
1322 #ifdef INVARIANTS
1323 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1324 #else
1325 					asoc->size_on_all_streams = 0;
1326 #endif
1327 				}
1328 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1329 				control->on_strm_q = 0;
1330 #ifdef INVARIANTS
1331 			} else if (control->on_strm_q) {
1332 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1333 				    control->on_strm_q);
1334 #endif
1335 			}
1336 		}
1337 		control->end_added = 1;
1338 		control->last_frag_seen = 1;
1339 	}
1340 	if (i_locked) {
1341 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1342 	}
1343 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1344 	return (added);
1345 }
1346 
1347 /*
1348  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1349  * queue, see if anthing can be delivered. If so pull it off (or as much as
1350  * we can. If we run out of space then we must dump what we can and set the
1351  * appropriate flag to say we queued what we could.
1352  */
1353 static void
1354 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1355     struct sctp_queued_to_read *control,
1356     struct sctp_tmit_chunk *chk,
1357     int created_control,
1358     int *abort_flag, uint32_t tsn)
1359 {
1360 	uint32_t next_fsn;
1361 	struct sctp_tmit_chunk *at, *nat;
1362 	struct sctp_stream_in *strm;
1363 	int do_wakeup, unordered;
1364 	uint32_t lenadded;
1365 
1366 	strm = &asoc->strmin[control->sinfo_stream];
1367 	/*
1368 	 * For old un-ordered data chunks.
1369 	 */
1370 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1371 		unordered = 1;
1372 	} else {
1373 		unordered = 0;
1374 	}
1375 	/* Must be added to the stream-in queue */
1376 	if (created_control) {
1377 		if (unordered == 0) {
1378 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1379 		}
1380 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1381 			/* Duplicate SSN? */
1382 			sctp_abort_in_reasm(stcb, control, chk,
1383 			    abort_flag,
1384 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1385 			sctp_clean_up_control(stcb, control);
1386 			return;
1387 		}
1388 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1389 			/*
1390 			 * Ok we created this control and now lets validate
1391 			 * that its legal i.e. there is a B bit set, if not
1392 			 * and we have up to the cum-ack then its invalid.
1393 			 */
1394 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1395 				sctp_abort_in_reasm(stcb, control, chk,
1396 				    abort_flag,
1397 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1398 				return;
1399 			}
1400 		}
1401 	}
1402 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1403 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1404 		return;
1405 	}
1406 	/*
1407 	 * Ok we must queue the chunk into the reasembly portion: o if its
1408 	 * the first it goes to the control mbuf. o if its not first but the
1409 	 * next in sequence it goes to the control, and each succeeding one
1410 	 * in order also goes. o if its not in order we place it on the list
1411 	 * in its place.
1412 	 */
1413 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1414 		/* Its the very first one. */
1415 		SCTPDBG(SCTP_DEBUG_XXX,
1416 		    "chunk is a first fsn: %u becomes fsn_included\n",
1417 		    chk->rec.data.fsn);
1418 		if (control->first_frag_seen) {
1419 			/*
1420 			 * Error on senders part, they either sent us two
1421 			 * data chunks with FIRST, or they sent two
1422 			 * un-ordered chunks that were fragmented at the
1423 			 * same time in the same stream.
1424 			 */
1425 			sctp_abort_in_reasm(stcb, control, chk,
1426 			    abort_flag,
1427 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1428 			return;
1429 		}
1430 		control->first_frag_seen = 1;
1431 		control->sinfo_ppid = chk->rec.data.ppid;
1432 		control->sinfo_tsn = chk->rec.data.tsn;
1433 		control->fsn_included = chk->rec.data.fsn;
1434 		control->data = chk->data;
1435 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1436 		chk->data = NULL;
1437 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1438 		sctp_setup_tail_pointer(control);
1439 		asoc->size_on_all_streams += control->length;
1440 	} else {
1441 		/* Place the chunk in our list */
1442 		int inserted = 0;
1443 
1444 		if (control->last_frag_seen == 0) {
1445 			/* Still willing to raise highest FSN seen */
1446 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1447 				SCTPDBG(SCTP_DEBUG_XXX,
1448 				    "We have a new top_fsn: %u\n",
1449 				    chk->rec.data.fsn);
1450 				control->top_fsn = chk->rec.data.fsn;
1451 			}
1452 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1453 				SCTPDBG(SCTP_DEBUG_XXX,
1454 				    "The last fsn is now in place fsn: %u\n",
1455 				    chk->rec.data.fsn);
1456 				control->last_frag_seen = 1;
1457 			}
1458 			if (asoc->idata_supported || control->first_frag_seen) {
1459 				/*
1460 				 * For IDATA we always check since we know
1461 				 * that the first fragment is 0. For old
1462 				 * DATA we have to receive the first before
1463 				 * we know the first FSN (which is the TSN).
1464 				 */
1465 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1466 					/*
1467 					 * We have already delivered up to
1468 					 * this so its a dup
1469 					 */
1470 					sctp_abort_in_reasm(stcb, control, chk,
1471 					    abort_flag,
1472 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1473 					return;
1474 				}
1475 			}
1476 		} else {
1477 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 				/* Second last? huh? */
1479 				SCTPDBG(SCTP_DEBUG_XXX,
1480 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1481 				    chk->rec.data.fsn, control->top_fsn);
1482 				sctp_abort_in_reasm(stcb, control,
1483 				    chk, abort_flag,
1484 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1485 				return;
1486 			}
1487 			if (asoc->idata_supported || control->first_frag_seen) {
1488 				/*
1489 				 * For IDATA we always check since we know
1490 				 * that the first fragment is 0. For old
1491 				 * DATA we have to receive the first before
1492 				 * we know the first FSN (which is the TSN).
1493 				 */
1494 
1495 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1496 					/*
1497 					 * We have already delivered up to
1498 					 * this so its a dup
1499 					 */
1500 					SCTPDBG(SCTP_DEBUG_XXX,
1501 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1502 					    chk->rec.data.fsn, control->fsn_included);
1503 					sctp_abort_in_reasm(stcb, control, chk,
1504 					    abort_flag,
1505 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1506 					return;
1507 				}
1508 			}
1509 			/*
1510 			 * validate not beyond top FSN if we have seen last
1511 			 * one
1512 			 */
1513 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1514 				SCTPDBG(SCTP_DEBUG_XXX,
1515 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1516 				    chk->rec.data.fsn,
1517 				    control->top_fsn);
1518 				sctp_abort_in_reasm(stcb, control, chk,
1519 				    abort_flag,
1520 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1521 				return;
1522 			}
1523 		}
1524 		/*
1525 		 * If we reach here, we need to place the new chunk in the
1526 		 * reassembly for this control.
1527 		 */
1528 		SCTPDBG(SCTP_DEBUG_XXX,
1529 		    "chunk is a not first fsn: %u needs to be inserted\n",
1530 		    chk->rec.data.fsn);
1531 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1532 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1533 				/*
1534 				 * This one in queue is bigger than the new
1535 				 * one, insert the new one before at.
1536 				 */
1537 				SCTPDBG(SCTP_DEBUG_XXX,
1538 				    "Insert it before fsn: %u\n",
1539 				    at->rec.data.fsn);
1540 				asoc->size_on_reasm_queue += chk->send_size;
1541 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1542 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1543 				inserted = 1;
1544 				break;
1545 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1546 				/*
1547 				 * Gak, He sent me a duplicate str seq
1548 				 * number
1549 				 */
1550 				/*
1551 				 * foo bar, I guess I will just free this
1552 				 * new guy, should we abort too? FIX ME
1553 				 * MAYBE? Or it COULD be that the SSN's have
1554 				 * wrapped. Maybe I should compare to TSN
1555 				 * somehow... sigh for now just blow away
1556 				 * the chunk!
1557 				 */
1558 				SCTPDBG(SCTP_DEBUG_XXX,
1559 				    "Duplicate to fsn: %u -- abort\n",
1560 				    at->rec.data.fsn);
1561 				sctp_abort_in_reasm(stcb, control,
1562 				    chk, abort_flag,
1563 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1564 				return;
1565 			}
1566 		}
1567 		if (inserted == 0) {
1568 			/* Goes on the end */
1569 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1570 			    chk->rec.data.fsn);
1571 			asoc->size_on_reasm_queue += chk->send_size;
1572 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1573 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1574 		}
1575 	}
1576 	/*
1577 	 * Ok lets see if we can suck any up into the control structure that
1578 	 * are in seq if it makes sense.
1579 	 */
1580 	do_wakeup = 0;
1581 	/*
1582 	 * If the first fragment has not been seen there is no sense in
1583 	 * looking.
1584 	 */
1585 	if (control->first_frag_seen) {
1586 		next_fsn = control->fsn_included + 1;
1587 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1588 			if (at->rec.data.fsn == next_fsn) {
1589 				/* We can add this one now to the control */
1590 				SCTPDBG(SCTP_DEBUG_XXX,
1591 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1592 				    control, at,
1593 				    at->rec.data.fsn,
1594 				    next_fsn, control->fsn_included);
1595 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1596 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1597 				if (control->on_read_q) {
1598 					do_wakeup = 1;
1599 				} else {
1600 					/*
1601 					 * We only add to the
1602 					 * size-on-all-streams if its not on
1603 					 * the read q. The read q flag will
1604 					 * cause a sballoc so its accounted
1605 					 * for there.
1606 					 */
1607 					asoc->size_on_all_streams += lenadded;
1608 				}
1609 				next_fsn++;
1610 				if (control->end_added && control->pdapi_started) {
1611 					if (strm->pd_api_started) {
1612 						strm->pd_api_started = 0;
1613 						control->pdapi_started = 0;
1614 					}
1615 					if (control->on_read_q == 0) {
1616 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1617 						    control,
1618 						    &stcb->sctp_socket->so_rcv, control->end_added,
1619 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1620 					}
1621 					break;
1622 				}
1623 			} else {
1624 				break;
1625 			}
1626 		}
1627 	}
1628 	if (do_wakeup) {
1629 		/* Need to wakeup the reader */
1630 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1631 	}
1632 }
1633 
1634 static struct sctp_queued_to_read *
1635 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1636 {
1637 	struct sctp_queued_to_read *control;
1638 
1639 	if (ordered) {
1640 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1641 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1642 				break;
1643 			}
1644 		}
1645 	} else {
1646 		if (idata_supported) {
1647 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1648 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1649 					break;
1650 				}
1651 			}
1652 		} else {
1653 			control = TAILQ_FIRST(&strm->uno_inqueue);
1654 		}
1655 	}
1656 	return (control);
1657 }
1658 
1659 static int
1660 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1661     struct mbuf **m, int offset, int chk_length,
1662     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1663     int *break_flag, int last_chunk, uint8_t chk_type)
1664 {
1665 	/* Process a data chunk */
1666 	/* struct sctp_tmit_chunk *chk; */
1667 	struct sctp_tmit_chunk *chk;
1668 	uint32_t tsn, fsn, gap, mid;
1669 	struct mbuf *dmbuf;
1670 	int the_len;
1671 	int need_reasm_check = 0;
1672 	uint16_t sid;
1673 	struct mbuf *op_err;
1674 	char msg[SCTP_DIAG_INFO_LEN];
1675 	struct sctp_queued_to_read *control, *ncontrol;
1676 	uint32_t ppid;
1677 	uint8_t chk_flags;
1678 	struct sctp_stream_reset_list *liste;
1679 	int ordered;
1680 	size_t clen;
1681 	int created_control = 0;
1682 
1683 	if (chk_type == SCTP_IDATA) {
1684 		struct sctp_idata_chunk *chunk, chunk_buf;
1685 
1686 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1687 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1688 		chk_flags = chunk->ch.chunk_flags;
1689 		clen = sizeof(struct sctp_idata_chunk);
1690 		tsn = ntohl(chunk->dp.tsn);
1691 		sid = ntohs(chunk->dp.sid);
1692 		mid = ntohl(chunk->dp.mid);
1693 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1694 			fsn = 0;
1695 			ppid = chunk->dp.ppid_fsn.ppid;
1696 		} else {
1697 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1698 			ppid = 0xffffffff;	/* Use as an invalid value. */
1699 		}
1700 	} else {
1701 		struct sctp_data_chunk *chunk, chunk_buf;
1702 
1703 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1704 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1705 		chk_flags = chunk->ch.chunk_flags;
1706 		clen = sizeof(struct sctp_data_chunk);
1707 		tsn = ntohl(chunk->dp.tsn);
1708 		sid = ntohs(chunk->dp.sid);
1709 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1710 		fsn = tsn;
1711 		ppid = chunk->dp.ppid;
1712 	}
1713 	if ((size_t)chk_length == clen) {
1714 		/*
1715 		 * Need to send an abort since we had a empty data chunk.
1716 		 */
1717 		op_err = sctp_generate_no_user_data_cause(tsn);
1718 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1719 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1720 		*abort_flag = 1;
1721 		return (0);
1722 	}
1723 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1724 		asoc->send_sack = 1;
1725 	}
1726 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1727 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1728 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1729 	}
1730 	if (stcb == NULL) {
1731 		return (0);
1732 	}
1733 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1734 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1735 		/* It is a duplicate */
1736 		SCTP_STAT_INCR(sctps_recvdupdata);
1737 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1738 			/* Record a dup for the next outbound sack */
1739 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1740 			asoc->numduptsns++;
1741 		}
1742 		asoc->send_sack = 1;
1743 		return (0);
1744 	}
1745 	/* Calculate the number of TSN's between the base and this TSN */
1746 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1747 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1748 		/* Can't hold the bit in the mapping at max array, toss it */
1749 		return (0);
1750 	}
1751 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1752 		SCTP_TCB_LOCK_ASSERT(stcb);
1753 		if (sctp_expand_mapping_array(asoc, gap)) {
1754 			/* Can't expand, drop it */
1755 			return (0);
1756 		}
1757 	}
1758 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1759 		*high_tsn = tsn;
1760 	}
1761 	/* See if we have received this one already */
1762 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1763 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1764 		SCTP_STAT_INCR(sctps_recvdupdata);
1765 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1766 			/* Record a dup for the next outbound sack */
1767 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1768 			asoc->numduptsns++;
1769 		}
1770 		asoc->send_sack = 1;
1771 		return (0);
1772 	}
1773 	/*
1774 	 * Check to see about the GONE flag, duplicates would cause a sack
1775 	 * to be sent up above
1776 	 */
1777 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1778 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1779 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1780 		/*
1781 		 * wait a minute, this guy is gone, there is no longer a
1782 		 * receiver. Send peer an ABORT!
1783 		 */
1784 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1785 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1786 		*abort_flag = 1;
1787 		return (0);
1788 	}
1789 	/*
1790 	 * Now before going further we see if there is room. If NOT then we
1791 	 * MAY let one through only IF this TSN is the one we are waiting
1792 	 * for on a partial delivery API.
1793 	 */
1794 
1795 	/* Is the stream valid? */
1796 	if (sid >= asoc->streamincnt) {
1797 		struct sctp_error_invalid_stream *cause;
1798 
1799 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1800 		    0, M_NOWAIT, 1, MT_DATA);
1801 		if (op_err != NULL) {
1802 			/* add some space up front so prepend will work well */
1803 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1804 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1805 			/*
1806 			 * Error causes are just param's and this one has
1807 			 * two back to back phdr, one with the error type
1808 			 * and size, the other with the streamid and a rsvd
1809 			 */
1810 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1811 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1812 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1813 			cause->stream_id = htons(sid);
1814 			cause->reserved = htons(0);
1815 			sctp_queue_op_err(stcb, op_err);
1816 		}
1817 		SCTP_STAT_INCR(sctps_badsid);
1818 		SCTP_TCB_LOCK_ASSERT(stcb);
1819 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1820 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1821 			asoc->highest_tsn_inside_nr_map = tsn;
1822 		}
1823 		if (tsn == (asoc->cumulative_tsn + 1)) {
1824 			/* Update cum-ack */
1825 			asoc->cumulative_tsn = tsn;
1826 		}
1827 		return (0);
1828 	}
1829 	/*
1830 	 * If its a fragmented message, lets see if we can find the control
1831 	 * on the reassembly queues.
1832 	 */
1833 	if ((chk_type == SCTP_IDATA) &&
1834 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1835 	    (fsn == 0)) {
1836 		/*
1837 		 * The first *must* be fsn 0, and other (middle/end) pieces
1838 		 * can *not* be fsn 0. XXX: This can happen in case of a
1839 		 * wrap around. Ignore is for now.
1840 		 */
1841 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1842 		    mid, chk_flags);
1843 		goto err_out;
1844 	}
1845 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1846 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1847 	    chk_flags, control);
1848 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1849 		/* See if we can find the re-assembly entity */
1850 		if (control != NULL) {
1851 			/* We found something, does it belong? */
1852 			if (ordered && (mid != control->mid)) {
1853 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1854 		err_out:
1855 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1856 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1857 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1858 				*abort_flag = 1;
1859 				return (0);
1860 			}
1861 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1862 				/*
1863 				 * We can't have a switched order with an
1864 				 * unordered chunk
1865 				 */
1866 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1867 				    tsn);
1868 				goto err_out;
1869 			}
1870 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1871 				/*
1872 				 * We can't have a switched unordered with a
1873 				 * ordered chunk
1874 				 */
1875 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1876 				    tsn);
1877 				goto err_out;
1878 			}
1879 		}
1880 	} else {
1881 		/*
1882 		 * Its a complete segment. Lets validate we don't have a
1883 		 * re-assembly going on with the same Stream/Seq (for
1884 		 * ordered) or in the same Stream for unordered.
1885 		 */
1886 		if (control != NULL) {
1887 			if (ordered || asoc->idata_supported) {
1888 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1889 				    chk_flags, mid);
1890 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1891 				goto err_out;
1892 			} else {
1893 				if ((tsn == control->fsn_included + 1) &&
1894 				    (control->end_added == 0)) {
1895 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1896 					goto err_out;
1897 				} else {
1898 					control = NULL;
1899 				}
1900 			}
1901 		}
1902 	}
1903 	/* now do the tests */
1904 	if (((asoc->cnt_on_all_streams +
1905 	    asoc->cnt_on_reasm_queue +
1906 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1907 	    (((int)asoc->my_rwnd) <= 0)) {
1908 		/*
1909 		 * When we have NO room in the rwnd we check to make sure
1910 		 * the reader is doing its job...
1911 		 */
1912 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1913 			/* some to read, wake-up */
1914 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1915 			struct socket *so;
1916 
1917 			so = SCTP_INP_SO(stcb->sctp_ep);
1918 			atomic_add_int(&stcb->asoc.refcnt, 1);
1919 			SCTP_TCB_UNLOCK(stcb);
1920 			SCTP_SOCKET_LOCK(so, 1);
1921 			SCTP_TCB_LOCK(stcb);
1922 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1923 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1924 				/* assoc was freed while we were unlocked */
1925 				SCTP_SOCKET_UNLOCK(so, 1);
1926 				return (0);
1927 			}
1928 #endif
1929 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1930 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1931 			SCTP_SOCKET_UNLOCK(so, 1);
1932 #endif
1933 		}
1934 		/* now is it in the mapping array of what we have accepted? */
1935 		if (chk_type == SCTP_DATA) {
1936 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1937 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1938 				/* Nope not in the valid range dump it */
1939 		dump_packet:
1940 				sctp_set_rwnd(stcb, asoc);
1941 				if ((asoc->cnt_on_all_streams +
1942 				    asoc->cnt_on_reasm_queue +
1943 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1944 					SCTP_STAT_INCR(sctps_datadropchklmt);
1945 				} else {
1946 					SCTP_STAT_INCR(sctps_datadroprwnd);
1947 				}
1948 				*break_flag = 1;
1949 				return (0);
1950 			}
1951 		} else {
1952 			if (control == NULL) {
1953 				goto dump_packet;
1954 			}
1955 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1956 				goto dump_packet;
1957 			}
1958 		}
1959 	}
1960 #ifdef SCTP_ASOCLOG_OF_TSNS
1961 	SCTP_TCB_LOCK_ASSERT(stcb);
1962 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1963 		asoc->tsn_in_at = 0;
1964 		asoc->tsn_in_wrapped = 1;
1965 	}
1966 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1967 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1968 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1969 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1970 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1971 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1972 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1973 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1974 	asoc->tsn_in_at++;
1975 #endif
1976 	/*
1977 	 * Before we continue lets validate that we are not being fooled by
1978 	 * an evil attacker. We can only have Nk chunks based on our TSN
1979 	 * spread allowed by the mapping array N * 8 bits, so there is no
1980 	 * way our stream sequence numbers could have wrapped. We of course
1981 	 * only validate the FIRST fragment so the bit must be set.
1982 	 */
1983 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1984 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1985 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1986 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1987 		/* The incoming sseq is behind where we last delivered? */
1988 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1989 		    mid, asoc->strmin[sid].last_mid_delivered);
1990 
1991 		if (asoc->idata_supported) {
1992 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1993 			    asoc->strmin[sid].last_mid_delivered,
1994 			    tsn,
1995 			    sid,
1996 			    mid);
1997 		} else {
1998 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1999 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2000 			    tsn,
2001 			    sid,
2002 			    (uint16_t)mid);
2003 		}
2004 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2005 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2006 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2007 		*abort_flag = 1;
2008 		return (0);
2009 	}
2010 	if (chk_type == SCTP_IDATA) {
2011 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2012 	} else {
2013 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2014 	}
2015 	if (last_chunk == 0) {
2016 		if (chk_type == SCTP_IDATA) {
2017 			dmbuf = SCTP_M_COPYM(*m,
2018 			    (offset + sizeof(struct sctp_idata_chunk)),
2019 			    the_len, M_NOWAIT);
2020 		} else {
2021 			dmbuf = SCTP_M_COPYM(*m,
2022 			    (offset + sizeof(struct sctp_data_chunk)),
2023 			    the_len, M_NOWAIT);
2024 		}
2025 #ifdef SCTP_MBUF_LOGGING
2026 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2027 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2028 		}
2029 #endif
2030 	} else {
2031 		/* We can steal the last chunk */
2032 		int l_len;
2033 
2034 		dmbuf = *m;
2035 		/* lop off the top part */
2036 		if (chk_type == SCTP_IDATA) {
2037 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2038 		} else {
2039 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2040 		}
2041 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2042 			l_len = SCTP_BUF_LEN(dmbuf);
2043 		} else {
2044 			/*
2045 			 * need to count up the size hopefully does not hit
2046 			 * this to often :-0
2047 			 */
2048 			struct mbuf *lat;
2049 
2050 			l_len = 0;
2051 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2052 				l_len += SCTP_BUF_LEN(lat);
2053 			}
2054 		}
2055 		if (l_len > the_len) {
2056 			/* Trim the end round bytes off  too */
2057 			m_adj(dmbuf, -(l_len - the_len));
2058 		}
2059 	}
2060 	if (dmbuf == NULL) {
2061 		SCTP_STAT_INCR(sctps_nomem);
2062 		return (0);
2063 	}
2064 	/*
2065 	 * Now no matter what, we need a control, get one if we don't have
2066 	 * one (we may have gotten it above when we found the message was
2067 	 * fragmented
2068 	 */
2069 	if (control == NULL) {
2070 		sctp_alloc_a_readq(stcb, control);
2071 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2072 		    ppid,
2073 		    sid,
2074 		    chk_flags,
2075 		    NULL, fsn, mid);
2076 		if (control == NULL) {
2077 			SCTP_STAT_INCR(sctps_nomem);
2078 			return (0);
2079 		}
2080 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2081 			struct mbuf *mm;
2082 
2083 			control->data = dmbuf;
2084 			for (mm = control->data; mm; mm = mm->m_next) {
2085 				control->length += SCTP_BUF_LEN(mm);
2086 			}
2087 			control->tail_mbuf = NULL;
2088 			control->end_added = 1;
2089 			control->last_frag_seen = 1;
2090 			control->first_frag_seen = 1;
2091 			control->fsn_included = fsn;
2092 			control->top_fsn = fsn;
2093 		}
2094 		created_control = 1;
2095 	}
2096 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2097 	    chk_flags, ordered, mid, control);
2098 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2099 	    TAILQ_EMPTY(&asoc->resetHead) &&
2100 	    ((ordered == 0) ||
2101 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2102 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2103 		/* Candidate for express delivery */
2104 		/*
2105 		 * Its not fragmented, No PD-API is up, Nothing in the
2106 		 * delivery queue, Its un-ordered OR ordered and the next to
2107 		 * deliver AND nothing else is stuck on the stream queue,
2108 		 * And there is room for it in the socket buffer. Lets just
2109 		 * stuff it up the buffer....
2110 		 */
2111 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2112 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2113 			asoc->highest_tsn_inside_nr_map = tsn;
2114 		}
2115 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2116 		    control, mid);
2117 
2118 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2119 		    control, &stcb->sctp_socket->so_rcv,
2120 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2121 
2122 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2123 			/* for ordered, bump what we delivered */
2124 			asoc->strmin[sid].last_mid_delivered++;
2125 		}
2126 		SCTP_STAT_INCR(sctps_recvexpress);
2127 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2128 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2129 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2130 		}
2131 		control = NULL;
2132 		goto finish_express_del;
2133 	}
2134 	/* Now will we need a chunk too? */
2135 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2136 		sctp_alloc_a_chunk(stcb, chk);
2137 		if (chk == NULL) {
2138 			/* No memory so we drop the chunk */
2139 			SCTP_STAT_INCR(sctps_nomem);
2140 			if (last_chunk == 0) {
2141 				/* we copied it, free the copy */
2142 				sctp_m_freem(dmbuf);
2143 			}
2144 			return (0);
2145 		}
2146 		chk->rec.data.tsn = tsn;
2147 		chk->no_fr_allowed = 0;
2148 		chk->rec.data.fsn = fsn;
2149 		chk->rec.data.mid = mid;
2150 		chk->rec.data.sid = sid;
2151 		chk->rec.data.ppid = ppid;
2152 		chk->rec.data.context = stcb->asoc.context;
2153 		chk->rec.data.doing_fast_retransmit = 0;
2154 		chk->rec.data.rcv_flags = chk_flags;
2155 		chk->asoc = asoc;
2156 		chk->send_size = the_len;
2157 		chk->whoTo = net;
2158 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2159 		    chk,
2160 		    control, mid);
2161 		atomic_add_int(&net->ref_count, 1);
2162 		chk->data = dmbuf;
2163 	}
2164 	/* Set the appropriate TSN mark */
2165 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2166 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2167 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2168 			asoc->highest_tsn_inside_nr_map = tsn;
2169 		}
2170 	} else {
2171 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2172 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2173 			asoc->highest_tsn_inside_map = tsn;
2174 		}
2175 	}
2176 	/* Now is it complete (i.e. not fragmented)? */
2177 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2178 		/*
2179 		 * Special check for when streams are resetting. We could be
2180 		 * more smart about this and check the actual stream to see
2181 		 * if it is not being reset.. that way we would not create a
2182 		 * HOLB when amongst streams being reset and those not being
2183 		 * reset.
2184 		 *
2185 		 */
2186 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2187 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2188 			/*
2189 			 * yep its past where we need to reset... go ahead
2190 			 * and queue it.
2191 			 */
2192 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2193 				/* first one on */
2194 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2195 			} else {
2196 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2197 				unsigned char inserted = 0;
2198 
2199 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2200 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2201 
2202 						continue;
2203 					} else {
2204 						/* found it */
2205 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2206 						inserted = 1;
2207 						break;
2208 					}
2209 				}
2210 				if (inserted == 0) {
2211 					/*
2212 					 * must be put at end, use prevP
2213 					 * (all setup from loop) to setup
2214 					 * nextP.
2215 					 */
2216 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2217 				}
2218 			}
2219 			goto finish_express_del;
2220 		}
2221 		if (chk_flags & SCTP_DATA_UNORDERED) {
2222 			/* queue directly into socket buffer */
2223 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2224 			    control, mid);
2225 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2226 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2227 			    control,
2228 			    &stcb->sctp_socket->so_rcv, 1,
2229 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2230 
2231 		} else {
2232 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2233 			    mid);
2234 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2235 			if (*abort_flag) {
2236 				if (last_chunk) {
2237 					*m = NULL;
2238 				}
2239 				return (0);
2240 			}
2241 		}
2242 		goto finish_express_del;
2243 	}
2244 	/* If we reach here its a reassembly */
2245 	need_reasm_check = 1;
2246 	SCTPDBG(SCTP_DEBUG_XXX,
2247 	    "Queue data to stream for reasm control: %p MID: %u\n",
2248 	    control, mid);
2249 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2250 	if (*abort_flag) {
2251 		/*
2252 		 * the assoc is now gone and chk was put onto the reasm
2253 		 * queue, which has all been freed.
2254 		 */
2255 		if (last_chunk) {
2256 			*m = NULL;
2257 		}
2258 		return (0);
2259 	}
2260 finish_express_del:
2261 	/* Here we tidy up things */
2262 	if (tsn == (asoc->cumulative_tsn + 1)) {
2263 		/* Update cum-ack */
2264 		asoc->cumulative_tsn = tsn;
2265 	}
2266 	if (last_chunk) {
2267 		*m = NULL;
2268 	}
2269 	if (ordered) {
2270 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2271 	} else {
2272 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2273 	}
2274 	SCTP_STAT_INCR(sctps_recvdata);
2275 	/* Set it present please */
2276 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2277 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2278 	}
2279 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2280 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2281 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2282 	}
2283 	if (need_reasm_check) {
2284 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2285 		need_reasm_check = 0;
2286 	}
2287 	/* check the special flag for stream resets */
2288 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2289 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2290 		/*
2291 		 * we have finished working through the backlogged TSN's now
2292 		 * time to reset streams. 1: call reset function. 2: free
2293 		 * pending_reply space 3: distribute any chunks in
2294 		 * pending_reply_queue.
2295 		 */
2296 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2297 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2298 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2299 		SCTP_FREE(liste, SCTP_M_STRESET);
2300 		/* sa_ignore FREED_MEMORY */
2301 		liste = TAILQ_FIRST(&asoc->resetHead);
2302 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2303 			/* All can be removed */
2304 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2305 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2306 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2307 				if (*abort_flag) {
2308 					return (0);
2309 				}
2310 				if (need_reasm_check) {
2311 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2312 					need_reasm_check = 0;
2313 				}
2314 			}
2315 		} else {
2316 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2317 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2318 					break;
2319 				}
2320 				/*
2321 				 * if control->sinfo_tsn is <= liste->tsn we
2322 				 * can process it which is the NOT of
2323 				 * control->sinfo_tsn > liste->tsn
2324 				 */
2325 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2326 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2327 				if (*abort_flag) {
2328 					return (0);
2329 				}
2330 				if (need_reasm_check) {
2331 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2332 					need_reasm_check = 0;
2333 				}
2334 			}
2335 		}
2336 	}
2337 	return (1);
2338 }
2339 
2340 static const int8_t sctp_map_lookup_tab[256] = {
2341 	0, 1, 0, 2, 0, 1, 0, 3,
2342 	0, 1, 0, 2, 0, 1, 0, 4,
2343 	0, 1, 0, 2, 0, 1, 0, 3,
2344 	0, 1, 0, 2, 0, 1, 0, 5,
2345 	0, 1, 0, 2, 0, 1, 0, 3,
2346 	0, 1, 0, 2, 0, 1, 0, 4,
2347 	0, 1, 0, 2, 0, 1, 0, 3,
2348 	0, 1, 0, 2, 0, 1, 0, 6,
2349 	0, 1, 0, 2, 0, 1, 0, 3,
2350 	0, 1, 0, 2, 0, 1, 0, 4,
2351 	0, 1, 0, 2, 0, 1, 0, 3,
2352 	0, 1, 0, 2, 0, 1, 0, 5,
2353 	0, 1, 0, 2, 0, 1, 0, 3,
2354 	0, 1, 0, 2, 0, 1, 0, 4,
2355 	0, 1, 0, 2, 0, 1, 0, 3,
2356 	0, 1, 0, 2, 0, 1, 0, 7,
2357 	0, 1, 0, 2, 0, 1, 0, 3,
2358 	0, 1, 0, 2, 0, 1, 0, 4,
2359 	0, 1, 0, 2, 0, 1, 0, 3,
2360 	0, 1, 0, 2, 0, 1, 0, 5,
2361 	0, 1, 0, 2, 0, 1, 0, 3,
2362 	0, 1, 0, 2, 0, 1, 0, 4,
2363 	0, 1, 0, 2, 0, 1, 0, 3,
2364 	0, 1, 0, 2, 0, 1, 0, 6,
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 4,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 5,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 4,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 8
2373 };
2374 
2375 
2376 void
2377 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2378 {
2379 	/*
2380 	 * Now we also need to check the mapping array in a couple of ways.
2381 	 * 1) Did we move the cum-ack point?
2382 	 *
2383 	 * When you first glance at this you might think that all entries
2384 	 * that make up the position of the cum-ack would be in the
2385 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2386 	 * deliverable. Thats true with one exception, when its a fragmented
2387 	 * message we may not deliver the data until some threshold (or all
2388 	 * of it) is in place. So we must OR the nr_mapping_array and
2389 	 * mapping_array to get a true picture of the cum-ack.
2390 	 */
2391 	struct sctp_association *asoc;
2392 	int at;
2393 	uint8_t val;
2394 	int slide_from, slide_end, lgap, distance;
2395 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2396 
2397 	asoc = &stcb->asoc;
2398 
2399 	old_cumack = asoc->cumulative_tsn;
2400 	old_base = asoc->mapping_array_base_tsn;
2401 	old_highest = asoc->highest_tsn_inside_map;
2402 	/*
2403 	 * We could probably improve this a small bit by calculating the
2404 	 * offset of the current cum-ack as the starting point.
2405 	 */
2406 	at = 0;
2407 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2408 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2409 		if (val == 0xff) {
2410 			at += 8;
2411 		} else {
2412 			/* there is a 0 bit */
2413 			at += sctp_map_lookup_tab[val];
2414 			break;
2415 		}
2416 	}
2417 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2418 
2419 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2420 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2421 #ifdef INVARIANTS
2422 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2423 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2424 #else
2425 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2426 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2427 		sctp_print_mapping_array(asoc);
2428 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2429 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2430 		}
2431 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2432 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2433 #endif
2434 	}
2435 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2436 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2437 	} else {
2438 		highest_tsn = asoc->highest_tsn_inside_map;
2439 	}
2440 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2441 		/* The complete array was completed by a single FR */
2442 		/* highest becomes the cum-ack */
2443 		int clr;
2444 #ifdef INVARIANTS
2445 		unsigned int i;
2446 #endif
2447 
2448 		/* clear the array */
2449 		clr = ((at + 7) >> 3);
2450 		if (clr > asoc->mapping_array_size) {
2451 			clr = asoc->mapping_array_size;
2452 		}
2453 		memset(asoc->mapping_array, 0, clr);
2454 		memset(asoc->nr_mapping_array, 0, clr);
2455 #ifdef INVARIANTS
2456 		for (i = 0; i < asoc->mapping_array_size; i++) {
2457 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2458 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2459 				sctp_print_mapping_array(asoc);
2460 			}
2461 		}
2462 #endif
2463 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2464 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2465 	} else if (at >= 8) {
2466 		/* we can slide the mapping array down */
2467 		/* slide_from holds where we hit the first NON 0xff byte */
2468 
2469 		/*
2470 		 * now calculate the ceiling of the move using our highest
2471 		 * TSN value
2472 		 */
2473 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2474 		slide_end = (lgap >> 3);
2475 		if (slide_end < slide_from) {
2476 			sctp_print_mapping_array(asoc);
2477 #ifdef INVARIANTS
2478 			panic("impossible slide");
2479 #else
2480 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2481 			    lgap, slide_end, slide_from, at);
2482 			return;
2483 #endif
2484 		}
2485 		if (slide_end > asoc->mapping_array_size) {
2486 #ifdef INVARIANTS
2487 			panic("would overrun buffer");
2488 #else
2489 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2490 			    asoc->mapping_array_size, slide_end);
2491 			slide_end = asoc->mapping_array_size;
2492 #endif
2493 		}
2494 		distance = (slide_end - slide_from) + 1;
2495 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2496 			sctp_log_map(old_base, old_cumack, old_highest,
2497 			    SCTP_MAP_PREPARE_SLIDE);
2498 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2499 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2500 		}
2501 		if (distance + slide_from > asoc->mapping_array_size ||
2502 		    distance < 0) {
2503 			/*
2504 			 * Here we do NOT slide forward the array so that
2505 			 * hopefully when more data comes in to fill it up
2506 			 * we will be able to slide it forward. Really I
2507 			 * don't think this should happen :-0
2508 			 */
2509 
2510 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2511 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2512 				    (uint32_t)asoc->mapping_array_size,
2513 				    SCTP_MAP_SLIDE_NONE);
2514 			}
2515 		} else {
2516 			int ii;
2517 
2518 			for (ii = 0; ii < distance; ii++) {
2519 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2520 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2521 
2522 			}
2523 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2524 				asoc->mapping_array[ii] = 0;
2525 				asoc->nr_mapping_array[ii] = 0;
2526 			}
2527 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2528 				asoc->highest_tsn_inside_map += (slide_from << 3);
2529 			}
2530 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2531 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2532 			}
2533 			asoc->mapping_array_base_tsn += (slide_from << 3);
2534 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2535 				sctp_log_map(asoc->mapping_array_base_tsn,
2536 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2537 				    SCTP_MAP_SLIDE_RESULT);
2538 			}
2539 		}
2540 	}
2541 }
2542 
2543 void
2544 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2545 {
2546 	struct sctp_association *asoc;
2547 	uint32_t highest_tsn;
2548 	int is_a_gap;
2549 
2550 	sctp_slide_mapping_arrays(stcb);
2551 	asoc = &stcb->asoc;
2552 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2553 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2554 	} else {
2555 		highest_tsn = asoc->highest_tsn_inside_map;
2556 	}
2557 	/* Is there a gap now? */
2558 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2559 
2560 	/*
2561 	 * Now we need to see if we need to queue a sack or just start the
2562 	 * timer (if allowed).
2563 	 */
2564 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2565 		/*
2566 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2567 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2568 		 * SACK
2569 		 */
2570 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2571 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2572 			    stcb->sctp_ep, stcb, NULL,
2573 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2574 		}
2575 		sctp_send_shutdown(stcb,
2576 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2577 		if (is_a_gap) {
2578 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2579 		}
2580 	} else {
2581 		/*
2582 		 * CMT DAC algorithm: increase number of packets received
2583 		 * since last ack
2584 		 */
2585 		stcb->asoc.cmt_dac_pkts_rcvd++;
2586 
2587 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2588 							 * SACK */
2589 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2590 							 * longer is one */
2591 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2592 		    (is_a_gap) ||	/* is still a gap */
2593 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2594 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2595 		    ) {
2596 
2597 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2598 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2599 			    (stcb->asoc.send_sack == 0) &&
2600 			    (stcb->asoc.numduptsns == 0) &&
2601 			    (stcb->asoc.delayed_ack) &&
2602 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2603 
2604 				/*
2605 				 * CMT DAC algorithm: With CMT, delay acks
2606 				 * even in the face of
2607 				 *
2608 				 * reordering. Therefore, if acks that do
2609 				 * not have to be sent because of the above
2610 				 * reasons, will be delayed. That is, acks
2611 				 * that would have been sent due to gap
2612 				 * reports will be delayed with DAC. Start
2613 				 * the delayed ack timer.
2614 				 */
2615 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2616 				    stcb->sctp_ep, stcb, NULL);
2617 			} else {
2618 				/*
2619 				 * Ok we must build a SACK since the timer
2620 				 * is pending, we got our first packet OR
2621 				 * there are gaps or duplicates.
2622 				 */
2623 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2624 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2625 			}
2626 		} else {
2627 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2628 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2629 				    stcb->sctp_ep, stcb, NULL);
2630 			}
2631 		}
2632 	}
2633 }
2634 
2635 int
2636 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2637     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2638     struct sctp_nets *net, uint32_t *high_tsn)
2639 {
2640 	struct sctp_chunkhdr *ch, chunk_buf;
2641 	struct sctp_association *asoc;
2642 	int num_chunks = 0;	/* number of control chunks processed */
2643 	int stop_proc = 0;
2644 	int break_flag, last_chunk;
2645 	int abort_flag = 0, was_a_gap;
2646 	struct mbuf *m;
2647 	uint32_t highest_tsn;
2648 	uint16_t chk_length;
2649 
2650 	/* set the rwnd */
2651 	sctp_set_rwnd(stcb, &stcb->asoc);
2652 
2653 	m = *mm;
2654 	SCTP_TCB_LOCK_ASSERT(stcb);
2655 	asoc = &stcb->asoc;
2656 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2657 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2658 	} else {
2659 		highest_tsn = asoc->highest_tsn_inside_map;
2660 	}
2661 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2662 	/*
2663 	 * setup where we got the last DATA packet from for any SACK that
2664 	 * may need to go out. Don't bump the net. This is done ONLY when a
2665 	 * chunk is assigned.
2666 	 */
2667 	asoc->last_data_chunk_from = net;
2668 
2669 	/*-
2670 	 * Now before we proceed we must figure out if this is a wasted
2671 	 * cluster... i.e. it is a small packet sent in and yet the driver
2672 	 * underneath allocated a full cluster for it. If so we must copy it
2673 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2674 	 * with cluster starvation. Note for __Panda__ we don't do this
2675 	 * since it has clusters all the way down to 64 bytes.
2676 	 */
2677 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2678 		/* we only handle mbufs that are singletons.. not chains */
2679 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2680 		if (m) {
2681 			/* ok lets see if we can copy the data up */
2682 			caddr_t *from, *to;
2683 
2684 			/* get the pointers and copy */
2685 			to = mtod(m, caddr_t *);
2686 			from = mtod((*mm), caddr_t *);
2687 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2688 			/* copy the length and free up the old */
2689 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2690 			sctp_m_freem(*mm);
2691 			/* success, back copy */
2692 			*mm = m;
2693 		} else {
2694 			/* We are in trouble in the mbuf world .. yikes */
2695 			m = *mm;
2696 		}
2697 	}
2698 	/* get pointer to the first chunk header */
2699 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2700 	    sizeof(struct sctp_chunkhdr),
2701 	    (uint8_t *)&chunk_buf);
2702 	if (ch == NULL) {
2703 		return (1);
2704 	}
2705 	/*
2706 	 * process all DATA chunks...
2707 	 */
2708 	*high_tsn = asoc->cumulative_tsn;
2709 	break_flag = 0;
2710 	asoc->data_pkts_seen++;
2711 	while (stop_proc == 0) {
2712 		/* validate chunk length */
2713 		chk_length = ntohs(ch->chunk_length);
2714 		if (length - *offset < chk_length) {
2715 			/* all done, mutulated chunk */
2716 			stop_proc = 1;
2717 			continue;
2718 		}
2719 		if ((asoc->idata_supported == 1) &&
2720 		    (ch->chunk_type == SCTP_DATA)) {
2721 			struct mbuf *op_err;
2722 			char msg[SCTP_DIAG_INFO_LEN];
2723 
2724 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2725 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2726 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2727 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2728 			return (2);
2729 		}
2730 		if ((asoc->idata_supported == 0) &&
2731 		    (ch->chunk_type == SCTP_IDATA)) {
2732 			struct mbuf *op_err;
2733 			char msg[SCTP_DIAG_INFO_LEN];
2734 
2735 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2736 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2737 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2738 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2739 			return (2);
2740 		}
2741 		if ((ch->chunk_type == SCTP_DATA) ||
2742 		    (ch->chunk_type == SCTP_IDATA)) {
2743 			uint16_t clen;
2744 
2745 			if (ch->chunk_type == SCTP_DATA) {
2746 				clen = sizeof(struct sctp_data_chunk);
2747 			} else {
2748 				clen = sizeof(struct sctp_idata_chunk);
2749 			}
2750 			if (chk_length < clen) {
2751 				/*
2752 				 * Need to send an abort since we had a
2753 				 * invalid data chunk.
2754 				 */
2755 				struct mbuf *op_err;
2756 				char msg[SCTP_DIAG_INFO_LEN];
2757 
2758 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2759 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2760 				    chk_length);
2761 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2762 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2763 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2764 				return (2);
2765 			}
2766 #ifdef SCTP_AUDITING_ENABLED
2767 			sctp_audit_log(0xB1, 0);
2768 #endif
2769 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2770 				last_chunk = 1;
2771 			} else {
2772 				last_chunk = 0;
2773 			}
2774 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2775 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2776 			    last_chunk, ch->chunk_type)) {
2777 				num_chunks++;
2778 			}
2779 			if (abort_flag)
2780 				return (2);
2781 
2782 			if (break_flag) {
2783 				/*
2784 				 * Set because of out of rwnd space and no
2785 				 * drop rep space left.
2786 				 */
2787 				stop_proc = 1;
2788 				continue;
2789 			}
2790 		} else {
2791 			/* not a data chunk in the data region */
2792 			switch (ch->chunk_type) {
2793 			case SCTP_INITIATION:
2794 			case SCTP_INITIATION_ACK:
2795 			case SCTP_SELECTIVE_ACK:
2796 			case SCTP_NR_SELECTIVE_ACK:
2797 			case SCTP_HEARTBEAT_REQUEST:
2798 			case SCTP_HEARTBEAT_ACK:
2799 			case SCTP_ABORT_ASSOCIATION:
2800 			case SCTP_SHUTDOWN:
2801 			case SCTP_SHUTDOWN_ACK:
2802 			case SCTP_OPERATION_ERROR:
2803 			case SCTP_COOKIE_ECHO:
2804 			case SCTP_COOKIE_ACK:
2805 			case SCTP_ECN_ECHO:
2806 			case SCTP_ECN_CWR:
2807 			case SCTP_SHUTDOWN_COMPLETE:
2808 			case SCTP_AUTHENTICATION:
2809 			case SCTP_ASCONF_ACK:
2810 			case SCTP_PACKET_DROPPED:
2811 			case SCTP_STREAM_RESET:
2812 			case SCTP_FORWARD_CUM_TSN:
2813 			case SCTP_ASCONF:
2814 				{
2815 					/*
2816 					 * Now, what do we do with KNOWN
2817 					 * chunks that are NOT in the right
2818 					 * place?
2819 					 *
2820 					 * For now, I do nothing but ignore
2821 					 * them. We may later want to add
2822 					 * sysctl stuff to switch out and do
2823 					 * either an ABORT() or possibly
2824 					 * process them.
2825 					 */
2826 					struct mbuf *op_err;
2827 					char msg[SCTP_DIAG_INFO_LEN];
2828 
2829 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2830 					    ch->chunk_type);
2831 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2832 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2833 					return (2);
2834 				}
2835 			default:
2836 				/*
2837 				 * Unknown chunk type: use bit rules after
2838 				 * checking length
2839 				 */
2840 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2841 					/*
2842 					 * Need to send an abort since we
2843 					 * had a invalid chunk.
2844 					 */
2845 					struct mbuf *op_err;
2846 					char msg[SCTP_DIAG_INFO_LEN];
2847 
2848 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2849 					    chk_length);
2850 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2851 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2852 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2853 					return (2);
2854 				}
2855 				if (ch->chunk_type & 0x40) {
2856 					/* Add a error report to the queue */
2857 					struct mbuf *op_err;
2858 					struct sctp_gen_error_cause *cause;
2859 
2860 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2861 					    0, M_NOWAIT, 1, MT_DATA);
2862 					if (op_err != NULL) {
2863 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2864 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2865 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2866 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2867 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2868 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2869 							sctp_queue_op_err(stcb, op_err);
2870 						} else {
2871 							sctp_m_freem(op_err);
2872 						}
2873 					}
2874 				}
2875 				if ((ch->chunk_type & 0x80) == 0) {
2876 					/* discard the rest of this packet */
2877 					stop_proc = 1;
2878 				}	/* else skip this bad chunk and
2879 					 * continue... */
2880 				break;
2881 			}	/* switch of chunk type */
2882 		}
2883 		*offset += SCTP_SIZE32(chk_length);
2884 		if ((*offset >= length) || stop_proc) {
2885 			/* no more data left in the mbuf chain */
2886 			stop_proc = 1;
2887 			continue;
2888 		}
2889 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2890 		    sizeof(struct sctp_chunkhdr),
2891 		    (uint8_t *)&chunk_buf);
2892 		if (ch == NULL) {
2893 			*offset = length;
2894 			stop_proc = 1;
2895 			continue;
2896 		}
2897 	}
2898 	if (break_flag) {
2899 		/*
2900 		 * we need to report rwnd overrun drops.
2901 		 */
2902 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2903 	}
2904 	if (num_chunks) {
2905 		/*
2906 		 * Did we get data, if so update the time for auto-close and
2907 		 * give peer credit for being alive.
2908 		 */
2909 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2910 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2911 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2912 			    stcb->asoc.overall_error_count,
2913 			    0,
2914 			    SCTP_FROM_SCTP_INDATA,
2915 			    __LINE__);
2916 		}
2917 		stcb->asoc.overall_error_count = 0;
2918 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2919 	}
2920 	/* now service all of the reassm queue if needed */
2921 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2922 		/* Assure that we ack right away */
2923 		stcb->asoc.send_sack = 1;
2924 	}
2925 	/* Start a sack timer or QUEUE a SACK for sending */
2926 	sctp_sack_check(stcb, was_a_gap);
2927 	return (0);
2928 }
2929 
2930 static int
2931 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2932     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2933     int *num_frs,
2934     uint32_t *biggest_newly_acked_tsn,
2935     uint32_t *this_sack_lowest_newack,
2936     int *rto_ok)
2937 {
2938 	struct sctp_tmit_chunk *tp1;
2939 	unsigned int theTSN;
2940 	int j, wake_him = 0, circled = 0;
2941 
2942 	/* Recover the tp1 we last saw */
2943 	tp1 = *p_tp1;
2944 	if (tp1 == NULL) {
2945 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2946 	}
2947 	for (j = frag_strt; j <= frag_end; j++) {
2948 		theTSN = j + last_tsn;
2949 		while (tp1) {
2950 			if (tp1->rec.data.doing_fast_retransmit)
2951 				(*num_frs) += 1;
2952 
2953 			/*-
2954 			 * CMT: CUCv2 algorithm. For each TSN being
2955 			 * processed from the sent queue, track the
2956 			 * next expected pseudo-cumack, or
2957 			 * rtx_pseudo_cumack, if required. Separate
2958 			 * cumack trackers for first transmissions,
2959 			 * and retransmissions.
2960 			 */
2961 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2962 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2963 			    (tp1->snd_count == 1)) {
2964 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2965 				tp1->whoTo->find_pseudo_cumack = 0;
2966 			}
2967 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2968 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2969 			    (tp1->snd_count > 1)) {
2970 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2971 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2972 			}
2973 			if (tp1->rec.data.tsn == theTSN) {
2974 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2975 					/*-
2976 					 * must be held until
2977 					 * cum-ack passes
2978 					 */
2979 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2980 						/*-
2981 						 * If it is less than RESEND, it is
2982 						 * now no-longer in flight.
2983 						 * Higher values may already be set
2984 						 * via previous Gap Ack Blocks...
2985 						 * i.e. ACKED or RESEND.
2986 						 */
2987 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2988 						    *biggest_newly_acked_tsn)) {
2989 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2990 						}
2991 						/*-
2992 						 * CMT: SFR algo (and HTNA) - set
2993 						 * saw_newack to 1 for dest being
2994 						 * newly acked. update
2995 						 * this_sack_highest_newack if
2996 						 * appropriate.
2997 						 */
2998 						if (tp1->rec.data.chunk_was_revoked == 0)
2999 							tp1->whoTo->saw_newack = 1;
3000 
3001 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3002 						    tp1->whoTo->this_sack_highest_newack)) {
3003 							tp1->whoTo->this_sack_highest_newack =
3004 							    tp1->rec.data.tsn;
3005 						}
3006 						/*-
3007 						 * CMT DAC algo: also update
3008 						 * this_sack_lowest_newack
3009 						 */
3010 						if (*this_sack_lowest_newack == 0) {
3011 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3012 								sctp_log_sack(*this_sack_lowest_newack,
3013 								    last_tsn,
3014 								    tp1->rec.data.tsn,
3015 								    0,
3016 								    0,
3017 								    SCTP_LOG_TSN_ACKED);
3018 							}
3019 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3020 						}
3021 						/*-
3022 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3023 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3024 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3025 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3026 						 * Separate pseudo_cumack trackers for first transmissions and
3027 						 * retransmissions.
3028 						 */
3029 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3030 							if (tp1->rec.data.chunk_was_revoked == 0) {
3031 								tp1->whoTo->new_pseudo_cumack = 1;
3032 							}
3033 							tp1->whoTo->find_pseudo_cumack = 1;
3034 						}
3035 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3036 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3037 						}
3038 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3039 							if (tp1->rec.data.chunk_was_revoked == 0) {
3040 								tp1->whoTo->new_pseudo_cumack = 1;
3041 							}
3042 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3043 						}
3044 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3045 							sctp_log_sack(*biggest_newly_acked_tsn,
3046 							    last_tsn,
3047 							    tp1->rec.data.tsn,
3048 							    frag_strt,
3049 							    frag_end,
3050 							    SCTP_LOG_TSN_ACKED);
3051 						}
3052 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3053 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3054 							    tp1->whoTo->flight_size,
3055 							    tp1->book_size,
3056 							    (uint32_t)(uintptr_t)tp1->whoTo,
3057 							    tp1->rec.data.tsn);
3058 						}
3059 						sctp_flight_size_decrease(tp1);
3060 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3061 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3062 							    tp1);
3063 						}
3064 						sctp_total_flight_decrease(stcb, tp1);
3065 
3066 						tp1->whoTo->net_ack += tp1->send_size;
3067 						if (tp1->snd_count < 2) {
3068 							/*-
3069 							 * True non-retransmited chunk
3070 							 */
3071 							tp1->whoTo->net_ack2 += tp1->send_size;
3072 
3073 							/*-
3074 							 * update RTO too ?
3075 							 */
3076 							if (tp1->do_rtt) {
3077 								if (*rto_ok) {
3078 									tp1->whoTo->RTO =
3079 									    sctp_calculate_rto(stcb,
3080 									    &stcb->asoc,
3081 									    tp1->whoTo,
3082 									    &tp1->sent_rcv_time,
3083 									    SCTP_RTT_FROM_DATA);
3084 									*rto_ok = 0;
3085 								}
3086 								if (tp1->whoTo->rto_needed == 0) {
3087 									tp1->whoTo->rto_needed = 1;
3088 								}
3089 								tp1->do_rtt = 0;
3090 							}
3091 						}
3092 					}
3093 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3094 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3095 						    stcb->asoc.this_sack_highest_gap)) {
3096 							stcb->asoc.this_sack_highest_gap =
3097 							    tp1->rec.data.tsn;
3098 						}
3099 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3100 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3101 #ifdef SCTP_AUDITING_ENABLED
3102 							sctp_audit_log(0xB2,
3103 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3104 #endif
3105 						}
3106 					}
3107 					/*-
3108 					 * All chunks NOT UNSENT fall through here and are marked
3109 					 * (leave PR-SCTP ones that are to skip alone though)
3110 					 */
3111 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3112 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3113 						tp1->sent = SCTP_DATAGRAM_MARKED;
3114 					}
3115 					if (tp1->rec.data.chunk_was_revoked) {
3116 						/* deflate the cwnd */
3117 						tp1->whoTo->cwnd -= tp1->book_size;
3118 						tp1->rec.data.chunk_was_revoked = 0;
3119 					}
3120 					/* NR Sack code here */
3121 					if (nr_sacking &&
3122 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3123 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3124 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3125 #ifdef INVARIANTS
3126 						} else {
3127 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3128 #endif
3129 						}
3130 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3131 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3132 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3133 							stcb->asoc.trigger_reset = 1;
3134 						}
3135 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3136 						if (tp1->data) {
3137 							/*
3138 							 * sa_ignore
3139 							 * NO_NULL_CHK
3140 							 */
3141 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3142 							sctp_m_freem(tp1->data);
3143 							tp1->data = NULL;
3144 						}
3145 						wake_him++;
3146 					}
3147 				}
3148 				break;
3149 			}	/* if (tp1->tsn == theTSN) */
3150 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3151 				break;
3152 			}
3153 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3154 			if ((tp1 == NULL) && (circled == 0)) {
3155 				circled++;
3156 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3157 			}
3158 		}		/* end while (tp1) */
3159 		if (tp1 == NULL) {
3160 			circled = 0;
3161 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3162 		}
3163 		/* In case the fragments were not in order we must reset */
3164 	}			/* end for (j = fragStart */
3165 	*p_tp1 = tp1;
3166 	return (wake_him);	/* Return value only used for nr-sack */
3167 }
3168 
3169 
3170 static int
3171 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3172     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3173     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3174     int num_seg, int num_nr_seg, int *rto_ok)
3175 {
3176 	struct sctp_gap_ack_block *frag, block;
3177 	struct sctp_tmit_chunk *tp1;
3178 	int i;
3179 	int num_frs = 0;
3180 	int chunk_freed;
3181 	int non_revocable;
3182 	uint16_t frag_strt, frag_end, prev_frag_end;
3183 
3184 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3185 	prev_frag_end = 0;
3186 	chunk_freed = 0;
3187 
3188 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3189 		if (i == num_seg) {
3190 			prev_frag_end = 0;
3191 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3192 		}
3193 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3194 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3195 		*offset += sizeof(block);
3196 		if (frag == NULL) {
3197 			return (chunk_freed);
3198 		}
3199 		frag_strt = ntohs(frag->start);
3200 		frag_end = ntohs(frag->end);
3201 
3202 		if (frag_strt > frag_end) {
3203 			/* This gap report is malformed, skip it. */
3204 			continue;
3205 		}
3206 		if (frag_strt <= prev_frag_end) {
3207 			/* This gap report is not in order, so restart. */
3208 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3209 		}
3210 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3211 			*biggest_tsn_acked = last_tsn + frag_end;
3212 		}
3213 		if (i < num_seg) {
3214 			non_revocable = 0;
3215 		} else {
3216 			non_revocable = 1;
3217 		}
3218 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3219 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3220 		    this_sack_lowest_newack, rto_ok)) {
3221 			chunk_freed = 1;
3222 		}
3223 		prev_frag_end = frag_end;
3224 	}
3225 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3226 		if (num_frs)
3227 			sctp_log_fr(*biggest_tsn_acked,
3228 			    *biggest_newly_acked_tsn,
3229 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3230 	}
3231 	return (chunk_freed);
3232 }
3233 
3234 static void
3235 sctp_check_for_revoked(struct sctp_tcb *stcb,
3236     struct sctp_association *asoc, uint32_t cumack,
3237     uint32_t biggest_tsn_acked)
3238 {
3239 	struct sctp_tmit_chunk *tp1;
3240 
3241 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3242 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3243 			/*
3244 			 * ok this guy is either ACK or MARKED. If it is
3245 			 * ACKED it has been previously acked but not this
3246 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3247 			 * again.
3248 			 */
3249 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3250 				break;
3251 			}
3252 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3253 				/* it has been revoked */
3254 				tp1->sent = SCTP_DATAGRAM_SENT;
3255 				tp1->rec.data.chunk_was_revoked = 1;
3256 				/*
3257 				 * We must add this stuff back in to assure
3258 				 * timers and such get started.
3259 				 */
3260 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3261 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3262 					    tp1->whoTo->flight_size,
3263 					    tp1->book_size,
3264 					    (uint32_t)(uintptr_t)tp1->whoTo,
3265 					    tp1->rec.data.tsn);
3266 				}
3267 				sctp_flight_size_increase(tp1);
3268 				sctp_total_flight_increase(stcb, tp1);
3269 				/*
3270 				 * We inflate the cwnd to compensate for our
3271 				 * artificial inflation of the flight_size.
3272 				 */
3273 				tp1->whoTo->cwnd += tp1->book_size;
3274 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3275 					sctp_log_sack(asoc->last_acked_seq,
3276 					    cumack,
3277 					    tp1->rec.data.tsn,
3278 					    0,
3279 					    0,
3280 					    SCTP_LOG_TSN_REVOKED);
3281 				}
3282 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3283 				/* it has been re-acked in this SACK */
3284 				tp1->sent = SCTP_DATAGRAM_ACKED;
3285 			}
3286 		}
3287 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3288 			break;
3289 	}
3290 }
3291 
3292 
3293 static void
3294 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3295     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3296 {
3297 	struct sctp_tmit_chunk *tp1;
3298 	int strike_flag = 0;
3299 	struct timeval now;
3300 	int tot_retrans = 0;
3301 	uint32_t sending_seq;
3302 	struct sctp_nets *net;
3303 	int num_dests_sacked = 0;
3304 
3305 	/*
3306 	 * select the sending_seq, this is either the next thing ready to be
3307 	 * sent but not transmitted, OR, the next seq we assign.
3308 	 */
3309 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3310 	if (tp1 == NULL) {
3311 		sending_seq = asoc->sending_seq;
3312 	} else {
3313 		sending_seq = tp1->rec.data.tsn;
3314 	}
3315 
3316 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3317 	if ((asoc->sctp_cmt_on_off > 0) &&
3318 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3319 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3320 			if (net->saw_newack)
3321 				num_dests_sacked++;
3322 		}
3323 	}
3324 	if (stcb->asoc.prsctp_supported) {
3325 		(void)SCTP_GETTIME_TIMEVAL(&now);
3326 	}
3327 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3328 		strike_flag = 0;
3329 		if (tp1->no_fr_allowed) {
3330 			/* this one had a timeout or something */
3331 			continue;
3332 		}
3333 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3334 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3335 				sctp_log_fr(biggest_tsn_newly_acked,
3336 				    tp1->rec.data.tsn,
3337 				    tp1->sent,
3338 				    SCTP_FR_LOG_CHECK_STRIKE);
3339 		}
3340 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3341 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3342 			/* done */
3343 			break;
3344 		}
3345 		if (stcb->asoc.prsctp_supported) {
3346 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3347 				/* Is it expired? */
3348 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3349 					/* Yes so drop it */
3350 					if (tp1->data != NULL) {
3351 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3352 						    SCTP_SO_NOT_LOCKED);
3353 					}
3354 					continue;
3355 				}
3356 			}
3357 		}
3358 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3359 			/* we are beyond the tsn in the sack  */
3360 			break;
3361 		}
3362 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3363 			/* either a RESEND, ACKED, or MARKED */
3364 			/* skip */
3365 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3366 				/* Continue strikin FWD-TSN chunks */
3367 				tp1->rec.data.fwd_tsn_cnt++;
3368 			}
3369 			continue;
3370 		}
3371 		/*
3372 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3373 		 */
3374 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3375 			/*
3376 			 * No new acks were receieved for data sent to this
3377 			 * dest. Therefore, according to the SFR algo for
3378 			 * CMT, no data sent to this dest can be marked for
3379 			 * FR using this SACK.
3380 			 */
3381 			continue;
3382 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3383 		    tp1->whoTo->this_sack_highest_newack)) {
3384 			/*
3385 			 * CMT: New acks were receieved for data sent to
3386 			 * this dest. But no new acks were seen for data
3387 			 * sent after tp1. Therefore, according to the SFR
3388 			 * algo for CMT, tp1 cannot be marked for FR using
3389 			 * this SACK. This step covers part of the DAC algo
3390 			 * and the HTNA algo as well.
3391 			 */
3392 			continue;
3393 		}
3394 		/*
3395 		 * Here we check to see if we were have already done a FR
3396 		 * and if so we see if the biggest TSN we saw in the sack is
3397 		 * smaller than the recovery point. If so we don't strike
3398 		 * the tsn... otherwise we CAN strike the TSN.
3399 		 */
3400 		/*
3401 		 * @@@ JRI: Check for CMT if (accum_moved &&
3402 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3403 		 * 0)) {
3404 		 */
3405 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3406 			/*
3407 			 * Strike the TSN if in fast-recovery and cum-ack
3408 			 * moved.
3409 			 */
3410 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3411 				sctp_log_fr(biggest_tsn_newly_acked,
3412 				    tp1->rec.data.tsn,
3413 				    tp1->sent,
3414 				    SCTP_FR_LOG_STRIKE_CHUNK);
3415 			}
3416 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3417 				tp1->sent++;
3418 			}
3419 			if ((asoc->sctp_cmt_on_off > 0) &&
3420 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3421 				/*
3422 				 * CMT DAC algorithm: If SACK flag is set to
3423 				 * 0, then lowest_newack test will not pass
3424 				 * because it would have been set to the
3425 				 * cumack earlier. If not already to be
3426 				 * rtx'd, If not a mixed sack and if tp1 is
3427 				 * not between two sacked TSNs, then mark by
3428 				 * one more. NOTE that we are marking by one
3429 				 * additional time since the SACK DAC flag
3430 				 * indicates that two packets have been
3431 				 * received after this missing TSN.
3432 				 */
3433 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3434 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3435 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3436 						sctp_log_fr(16 + num_dests_sacked,
3437 						    tp1->rec.data.tsn,
3438 						    tp1->sent,
3439 						    SCTP_FR_LOG_STRIKE_CHUNK);
3440 					}
3441 					tp1->sent++;
3442 				}
3443 			}
3444 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3445 		    (asoc->sctp_cmt_on_off == 0)) {
3446 			/*
3447 			 * For those that have done a FR we must take
3448 			 * special consideration if we strike. I.e the
3449 			 * biggest_newly_acked must be higher than the
3450 			 * sending_seq at the time we did the FR.
3451 			 */
3452 			if (
3453 #ifdef SCTP_FR_TO_ALTERNATE
3454 			/*
3455 			 * If FR's go to new networks, then we must only do
3456 			 * this for singly homed asoc's. However if the FR's
3457 			 * go to the same network (Armando's work) then its
3458 			 * ok to FR multiple times.
3459 			 */
3460 			    (asoc->numnets < 2)
3461 #else
3462 			    (1)
3463 #endif
3464 			    ) {
3465 
3466 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3467 				    tp1->rec.data.fast_retran_tsn)) {
3468 					/*
3469 					 * Strike the TSN, since this ack is
3470 					 * beyond where things were when we
3471 					 * did a FR.
3472 					 */
3473 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3474 						sctp_log_fr(biggest_tsn_newly_acked,
3475 						    tp1->rec.data.tsn,
3476 						    tp1->sent,
3477 						    SCTP_FR_LOG_STRIKE_CHUNK);
3478 					}
3479 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3480 						tp1->sent++;
3481 					}
3482 					strike_flag = 1;
3483 					if ((asoc->sctp_cmt_on_off > 0) &&
3484 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3485 						/*
3486 						 * CMT DAC algorithm: If
3487 						 * SACK flag is set to 0,
3488 						 * then lowest_newack test
3489 						 * will not pass because it
3490 						 * would have been set to
3491 						 * the cumack earlier. If
3492 						 * not already to be rtx'd,
3493 						 * If not a mixed sack and
3494 						 * if tp1 is not between two
3495 						 * sacked TSNs, then mark by
3496 						 * one more. NOTE that we
3497 						 * are marking by one
3498 						 * additional time since the
3499 						 * SACK DAC flag indicates
3500 						 * that two packets have
3501 						 * been received after this
3502 						 * missing TSN.
3503 						 */
3504 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3505 						    (num_dests_sacked == 1) &&
3506 						    SCTP_TSN_GT(this_sack_lowest_newack,
3507 						    tp1->rec.data.tsn)) {
3508 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3509 								sctp_log_fr(32 + num_dests_sacked,
3510 								    tp1->rec.data.tsn,
3511 								    tp1->sent,
3512 								    SCTP_FR_LOG_STRIKE_CHUNK);
3513 							}
3514 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3515 								tp1->sent++;
3516 							}
3517 						}
3518 					}
3519 				}
3520 			}
3521 			/*
3522 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3523 			 * algo covers HTNA.
3524 			 */
3525 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3526 		    biggest_tsn_newly_acked)) {
3527 			/*
3528 			 * We don't strike these: This is the  HTNA
3529 			 * algorithm i.e. we don't strike If our TSN is
3530 			 * larger than the Highest TSN Newly Acked.
3531 			 */
3532 			;
3533 		} else {
3534 			/* Strike the TSN */
3535 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3536 				sctp_log_fr(biggest_tsn_newly_acked,
3537 				    tp1->rec.data.tsn,
3538 				    tp1->sent,
3539 				    SCTP_FR_LOG_STRIKE_CHUNK);
3540 			}
3541 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3542 				tp1->sent++;
3543 			}
3544 			if ((asoc->sctp_cmt_on_off > 0) &&
3545 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3546 				/*
3547 				 * CMT DAC algorithm: If SACK flag is set to
3548 				 * 0, then lowest_newack test will not pass
3549 				 * because it would have been set to the
3550 				 * cumack earlier. If not already to be
3551 				 * rtx'd, If not a mixed sack and if tp1 is
3552 				 * not between two sacked TSNs, then mark by
3553 				 * one more. NOTE that we are marking by one
3554 				 * additional time since the SACK DAC flag
3555 				 * indicates that two packets have been
3556 				 * received after this missing TSN.
3557 				 */
3558 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3559 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3560 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3561 						sctp_log_fr(48 + num_dests_sacked,
3562 						    tp1->rec.data.tsn,
3563 						    tp1->sent,
3564 						    SCTP_FR_LOG_STRIKE_CHUNK);
3565 					}
3566 					tp1->sent++;
3567 				}
3568 			}
3569 		}
3570 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3571 			struct sctp_nets *alt;
3572 
3573 			/* fix counts and things */
3574 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3575 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3576 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3577 				    tp1->book_size,
3578 				    (uint32_t)(uintptr_t)tp1->whoTo,
3579 				    tp1->rec.data.tsn);
3580 			}
3581 			if (tp1->whoTo) {
3582 				tp1->whoTo->net_ack++;
3583 				sctp_flight_size_decrease(tp1);
3584 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3585 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3586 					    tp1);
3587 				}
3588 			}
3589 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3590 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3591 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3592 			}
3593 			/* add back to the rwnd */
3594 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3595 
3596 			/* remove from the total flight */
3597 			sctp_total_flight_decrease(stcb, tp1);
3598 
3599 			if ((stcb->asoc.prsctp_supported) &&
3600 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3601 				/*
3602 				 * Has it been retransmitted tv_sec times? -
3603 				 * we store the retran count there.
3604 				 */
3605 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3606 					/* Yes, so drop it */
3607 					if (tp1->data != NULL) {
3608 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3609 						    SCTP_SO_NOT_LOCKED);
3610 					}
3611 					/* Make sure to flag we had a FR */
3612 					tp1->whoTo->net_ack++;
3613 					continue;
3614 				}
3615 			}
3616 			/*
3617 			 * SCTP_PRINTF("OK, we are now ready to FR this
3618 			 * guy\n");
3619 			 */
3620 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3621 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3622 				    0, SCTP_FR_MARKED);
3623 			}
3624 			if (strike_flag) {
3625 				/* This is a subsequent FR */
3626 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3627 			}
3628 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3629 			if (asoc->sctp_cmt_on_off > 0) {
3630 				/*
3631 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3632 				 * If CMT is being used, then pick dest with
3633 				 * largest ssthresh for any retransmission.
3634 				 */
3635 				tp1->no_fr_allowed = 1;
3636 				alt = tp1->whoTo;
3637 				/* sa_ignore NO_NULL_CHK */
3638 				if (asoc->sctp_cmt_pf > 0) {
3639 					/*
3640 					 * JRS 5/18/07 - If CMT PF is on,
3641 					 * use the PF version of
3642 					 * find_alt_net()
3643 					 */
3644 					alt = sctp_find_alternate_net(stcb, alt, 2);
3645 				} else {
3646 					/*
3647 					 * JRS 5/18/07 - If only CMT is on,
3648 					 * use the CMT version of
3649 					 * find_alt_net()
3650 					 */
3651 					/* sa_ignore NO_NULL_CHK */
3652 					alt = sctp_find_alternate_net(stcb, alt, 1);
3653 				}
3654 				if (alt == NULL) {
3655 					alt = tp1->whoTo;
3656 				}
3657 				/*
3658 				 * CUCv2: If a different dest is picked for
3659 				 * the retransmission, then new
3660 				 * (rtx-)pseudo_cumack needs to be tracked
3661 				 * for orig dest. Let CUCv2 track new (rtx-)
3662 				 * pseudo-cumack always.
3663 				 */
3664 				if (tp1->whoTo) {
3665 					tp1->whoTo->find_pseudo_cumack = 1;
3666 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3667 				}
3668 			} else {/* CMT is OFF */
3669 
3670 #ifdef SCTP_FR_TO_ALTERNATE
3671 				/* Can we find an alternate? */
3672 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3673 #else
3674 				/*
3675 				 * default behavior is to NOT retransmit
3676 				 * FR's to an alternate. Armando Caro's
3677 				 * paper details why.
3678 				 */
3679 				alt = tp1->whoTo;
3680 #endif
3681 			}
3682 
3683 			tp1->rec.data.doing_fast_retransmit = 1;
3684 			tot_retrans++;
3685 			/* mark the sending seq for possible subsequent FR's */
3686 			/*
3687 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3688 			 * (uint32_t)tpi->rec.data.tsn);
3689 			 */
3690 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3691 				/*
3692 				 * If the queue of send is empty then its
3693 				 * the next sequence number that will be
3694 				 * assigned so we subtract one from this to
3695 				 * get the one we last sent.
3696 				 */
3697 				tp1->rec.data.fast_retran_tsn = sending_seq;
3698 			} else {
3699 				/*
3700 				 * If there are chunks on the send queue
3701 				 * (unsent data that has made it from the
3702 				 * stream queues but not out the door, we
3703 				 * take the first one (which will have the
3704 				 * lowest TSN) and subtract one to get the
3705 				 * one we last sent.
3706 				 */
3707 				struct sctp_tmit_chunk *ttt;
3708 
3709 				ttt = TAILQ_FIRST(&asoc->send_queue);
3710 				tp1->rec.data.fast_retran_tsn =
3711 				    ttt->rec.data.tsn;
3712 			}
3713 
3714 			if (tp1->do_rtt) {
3715 				/*
3716 				 * this guy had a RTO calculation pending on
3717 				 * it, cancel it
3718 				 */
3719 				if ((tp1->whoTo != NULL) &&
3720 				    (tp1->whoTo->rto_needed == 0)) {
3721 					tp1->whoTo->rto_needed = 1;
3722 				}
3723 				tp1->do_rtt = 0;
3724 			}
3725 			if (alt != tp1->whoTo) {
3726 				/* yes, there is an alternate. */
3727 				sctp_free_remote_addr(tp1->whoTo);
3728 				/* sa_ignore FREED_MEMORY */
3729 				tp1->whoTo = alt;
3730 				atomic_add_int(&alt->ref_count, 1);
3731 			}
3732 		}
3733 	}
3734 }
3735 
3736 struct sctp_tmit_chunk *
3737 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3738     struct sctp_association *asoc)
3739 {
3740 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3741 	struct timeval now;
3742 	int now_filled = 0;
3743 
3744 	if (asoc->prsctp_supported == 0) {
3745 		return (NULL);
3746 	}
3747 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3748 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3749 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3750 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3751 			/* no chance to advance, out of here */
3752 			break;
3753 		}
3754 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3755 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3756 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3757 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3758 				    asoc->advanced_peer_ack_point,
3759 				    tp1->rec.data.tsn, 0, 0);
3760 			}
3761 		}
3762 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3763 			/*
3764 			 * We can't fwd-tsn past any that are reliable aka
3765 			 * retransmitted until the asoc fails.
3766 			 */
3767 			break;
3768 		}
3769 		if (!now_filled) {
3770 			(void)SCTP_GETTIME_TIMEVAL(&now);
3771 			now_filled = 1;
3772 		}
3773 		/*
3774 		 * now we got a chunk which is marked for another
3775 		 * retransmission to a PR-stream but has run out its chances
3776 		 * already maybe OR has been marked to skip now. Can we skip
3777 		 * it if its a resend?
3778 		 */
3779 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3780 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3781 			/*
3782 			 * Now is this one marked for resend and its time is
3783 			 * now up?
3784 			 */
3785 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3786 				/* Yes so drop it */
3787 				if (tp1->data) {
3788 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3789 					    1, SCTP_SO_NOT_LOCKED);
3790 				}
3791 			} else {
3792 				/*
3793 				 * No, we are done when hit one for resend
3794 				 * whos time as not expired.
3795 				 */
3796 				break;
3797 			}
3798 		}
3799 		/*
3800 		 * Ok now if this chunk is marked to drop it we can clean up
3801 		 * the chunk, advance our peer ack point and we can check
3802 		 * the next chunk.
3803 		 */
3804 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3805 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3806 			/* advance PeerAckPoint goes forward */
3807 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3808 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3809 				a_adv = tp1;
3810 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3811 				/* No update but we do save the chk */
3812 				a_adv = tp1;
3813 			}
3814 		} else {
3815 			/*
3816 			 * If it is still in RESEND we can advance no
3817 			 * further
3818 			 */
3819 			break;
3820 		}
3821 	}
3822 	return (a_adv);
3823 }
3824 
3825 static int
3826 sctp_fs_audit(struct sctp_association *asoc)
3827 {
3828 	struct sctp_tmit_chunk *chk;
3829 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3830 	int ret;
3831 #ifndef INVARIANTS
3832 	int entry_flight, entry_cnt;
3833 #endif
3834 
3835 	ret = 0;
3836 #ifndef INVARIANTS
3837 	entry_flight = asoc->total_flight;
3838 	entry_cnt = asoc->total_flight_count;
3839 #endif
3840 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3841 		return (0);
3842 
3843 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3844 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3845 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3846 			    chk->rec.data.tsn,
3847 			    chk->send_size,
3848 			    chk->snd_count);
3849 			inflight++;
3850 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3851 			resend++;
3852 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3853 			inbetween++;
3854 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3855 			above++;
3856 		} else {
3857 			acked++;
3858 		}
3859 	}
3860 
3861 	if ((inflight > 0) || (inbetween > 0)) {
3862 #ifdef INVARIANTS
3863 		panic("Flight size-express incorrect? \n");
3864 #else
3865 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3866 		    entry_flight, entry_cnt);
3867 
3868 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3869 		    inflight, inbetween, resend, above, acked);
3870 		ret = 1;
3871 #endif
3872 	}
3873 	return (ret);
3874 }
3875 
3876 
3877 static void
3878 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3879     struct sctp_association *asoc,
3880     struct sctp_tmit_chunk *tp1)
3881 {
3882 	tp1->window_probe = 0;
3883 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3884 		/* TSN's skipped we do NOT move back. */
3885 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3886 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3887 		    tp1->book_size,
3888 		    (uint32_t)(uintptr_t)tp1->whoTo,
3889 		    tp1->rec.data.tsn);
3890 		return;
3891 	}
3892 	/* First setup this by shrinking flight */
3893 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3894 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3895 		    tp1);
3896 	}
3897 	sctp_flight_size_decrease(tp1);
3898 	sctp_total_flight_decrease(stcb, tp1);
3899 	/* Now mark for resend */
3900 	tp1->sent = SCTP_DATAGRAM_RESEND;
3901 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3902 
3903 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3904 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3905 		    tp1->whoTo->flight_size,
3906 		    tp1->book_size,
3907 		    (uint32_t)(uintptr_t)tp1->whoTo,
3908 		    tp1->rec.data.tsn);
3909 	}
3910 }
3911 
3912 void
3913 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3914     uint32_t rwnd, int *abort_now, int ecne_seen)
3915 {
3916 	struct sctp_nets *net;
3917 	struct sctp_association *asoc;
3918 	struct sctp_tmit_chunk *tp1, *tp2;
3919 	uint32_t old_rwnd;
3920 	int win_probe_recovery = 0;
3921 	int win_probe_recovered = 0;
3922 	int j, done_once = 0;
3923 	int rto_ok = 1;
3924 	uint32_t send_s;
3925 
3926 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3927 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3928 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3929 	}
3930 	SCTP_TCB_LOCK_ASSERT(stcb);
3931 #ifdef SCTP_ASOCLOG_OF_TSNS
3932 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3933 	stcb->asoc.cumack_log_at++;
3934 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3935 		stcb->asoc.cumack_log_at = 0;
3936 	}
3937 #endif
3938 	asoc = &stcb->asoc;
3939 	old_rwnd = asoc->peers_rwnd;
3940 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3941 		/* old ack */
3942 		return;
3943 	} else if (asoc->last_acked_seq == cumack) {
3944 		/* Window update sack */
3945 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3946 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3947 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3948 			/* SWS sender side engages */
3949 			asoc->peers_rwnd = 0;
3950 		}
3951 		if (asoc->peers_rwnd > old_rwnd) {
3952 			goto again;
3953 		}
3954 		return;
3955 	}
3956 	/* First setup for CC stuff */
3957 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3958 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3959 			/* Drag along the window_tsn for cwr's */
3960 			net->cwr_window_tsn = cumack;
3961 		}
3962 		net->prev_cwnd = net->cwnd;
3963 		net->net_ack = 0;
3964 		net->net_ack2 = 0;
3965 
3966 		/*
3967 		 * CMT: Reset CUC and Fast recovery algo variables before
3968 		 * SACK processing
3969 		 */
3970 		net->new_pseudo_cumack = 0;
3971 		net->will_exit_fast_recovery = 0;
3972 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3973 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3974 		}
3975 	}
3976 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3977 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3978 		    sctpchunk_listhead);
3979 		send_s = tp1->rec.data.tsn + 1;
3980 	} else {
3981 		send_s = asoc->sending_seq;
3982 	}
3983 	if (SCTP_TSN_GE(cumack, send_s)) {
3984 		struct mbuf *op_err;
3985 		char msg[SCTP_DIAG_INFO_LEN];
3986 
3987 		*abort_now = 1;
3988 		/* XXX */
3989 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3990 		    cumack, send_s);
3991 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3992 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3993 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3994 		return;
3995 	}
3996 	asoc->this_sack_highest_gap = cumack;
3997 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3998 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3999 		    stcb->asoc.overall_error_count,
4000 		    0,
4001 		    SCTP_FROM_SCTP_INDATA,
4002 		    __LINE__);
4003 	}
4004 	stcb->asoc.overall_error_count = 0;
4005 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4006 		/* process the new consecutive TSN first */
4007 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4008 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4009 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4010 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4011 				}
4012 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4013 					/*
4014 					 * If it is less than ACKED, it is
4015 					 * now no-longer in flight. Higher
4016 					 * values may occur during marking
4017 					 */
4018 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4019 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4020 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4021 							    tp1->whoTo->flight_size,
4022 							    tp1->book_size,
4023 							    (uint32_t)(uintptr_t)tp1->whoTo,
4024 							    tp1->rec.data.tsn);
4025 						}
4026 						sctp_flight_size_decrease(tp1);
4027 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4028 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4029 							    tp1);
4030 						}
4031 						/* sa_ignore NO_NULL_CHK */
4032 						sctp_total_flight_decrease(stcb, tp1);
4033 					}
4034 					tp1->whoTo->net_ack += tp1->send_size;
4035 					if (tp1->snd_count < 2) {
4036 						/*
4037 						 * True non-retransmited
4038 						 * chunk
4039 						 */
4040 						tp1->whoTo->net_ack2 +=
4041 						    tp1->send_size;
4042 
4043 						/* update RTO too? */
4044 						if (tp1->do_rtt) {
4045 							if (rto_ok) {
4046 								tp1->whoTo->RTO =
4047 								/*
4048 								 * sa_ignore
4049 								 * NO_NULL_CHK
4050 								 */
4051 								    sctp_calculate_rto(stcb,
4052 								    asoc, tp1->whoTo,
4053 								    &tp1->sent_rcv_time,
4054 								    SCTP_RTT_FROM_DATA);
4055 								rto_ok = 0;
4056 							}
4057 							if (tp1->whoTo->rto_needed == 0) {
4058 								tp1->whoTo->rto_needed = 1;
4059 							}
4060 							tp1->do_rtt = 0;
4061 						}
4062 					}
4063 					/*
4064 					 * CMT: CUCv2 algorithm. From the
4065 					 * cumack'd TSNs, for each TSN being
4066 					 * acked for the first time, set the
4067 					 * following variables for the
4068 					 * corresp destination.
4069 					 * new_pseudo_cumack will trigger a
4070 					 * cwnd update.
4071 					 * find_(rtx_)pseudo_cumack will
4072 					 * trigger search for the next
4073 					 * expected (rtx-)pseudo-cumack.
4074 					 */
4075 					tp1->whoTo->new_pseudo_cumack = 1;
4076 					tp1->whoTo->find_pseudo_cumack = 1;
4077 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4078 
4079 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4080 						/* sa_ignore NO_NULL_CHK */
4081 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4082 					}
4083 				}
4084 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4085 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4086 				}
4087 				if (tp1->rec.data.chunk_was_revoked) {
4088 					/* deflate the cwnd */
4089 					tp1->whoTo->cwnd -= tp1->book_size;
4090 					tp1->rec.data.chunk_was_revoked = 0;
4091 				}
4092 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4093 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4094 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4095 #ifdef INVARIANTS
4096 					} else {
4097 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4098 #endif
4099 					}
4100 				}
4101 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4102 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4103 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4104 					asoc->trigger_reset = 1;
4105 				}
4106 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4107 				if (tp1->data) {
4108 					/* sa_ignore NO_NULL_CHK */
4109 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4110 					sctp_m_freem(tp1->data);
4111 					tp1->data = NULL;
4112 				}
4113 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4114 					sctp_log_sack(asoc->last_acked_seq,
4115 					    cumack,
4116 					    tp1->rec.data.tsn,
4117 					    0,
4118 					    0,
4119 					    SCTP_LOG_FREE_SENT);
4120 				}
4121 				asoc->sent_queue_cnt--;
4122 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4123 			} else {
4124 				break;
4125 			}
4126 		}
4127 
4128 	}
4129 	/* sa_ignore NO_NULL_CHK */
4130 	if (stcb->sctp_socket) {
4131 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4132 		struct socket *so;
4133 
4134 #endif
4135 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4136 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4137 			/* sa_ignore NO_NULL_CHK */
4138 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4139 		}
4140 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4141 		so = SCTP_INP_SO(stcb->sctp_ep);
4142 		atomic_add_int(&stcb->asoc.refcnt, 1);
4143 		SCTP_TCB_UNLOCK(stcb);
4144 		SCTP_SOCKET_LOCK(so, 1);
4145 		SCTP_TCB_LOCK(stcb);
4146 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4147 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4148 			/* assoc was freed while we were unlocked */
4149 			SCTP_SOCKET_UNLOCK(so, 1);
4150 			return;
4151 		}
4152 #endif
4153 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4154 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4155 		SCTP_SOCKET_UNLOCK(so, 1);
4156 #endif
4157 	} else {
4158 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4159 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4160 		}
4161 	}
4162 
4163 	/* JRS - Use the congestion control given in the CC module */
4164 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4165 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4166 			if (net->net_ack2 > 0) {
4167 				/*
4168 				 * Karn's rule applies to clearing error
4169 				 * count, this is optional.
4170 				 */
4171 				net->error_count = 0;
4172 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4173 					/* addr came good */
4174 					net->dest_state |= SCTP_ADDR_REACHABLE;
4175 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4176 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4177 				}
4178 				if (net == stcb->asoc.primary_destination) {
4179 					if (stcb->asoc.alternate) {
4180 						/*
4181 						 * release the alternate,
4182 						 * primary is good
4183 						 */
4184 						sctp_free_remote_addr(stcb->asoc.alternate);
4185 						stcb->asoc.alternate = NULL;
4186 					}
4187 				}
4188 				if (net->dest_state & SCTP_ADDR_PF) {
4189 					net->dest_state &= ~SCTP_ADDR_PF;
4190 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4191 					    stcb->sctp_ep, stcb, net,
4192 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4193 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4194 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4195 					/* Done with this net */
4196 					net->net_ack = 0;
4197 				}
4198 				/* restore any doubled timers */
4199 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4200 				if (net->RTO < stcb->asoc.minrto) {
4201 					net->RTO = stcb->asoc.minrto;
4202 				}
4203 				if (net->RTO > stcb->asoc.maxrto) {
4204 					net->RTO = stcb->asoc.maxrto;
4205 				}
4206 			}
4207 		}
4208 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4209 	}
4210 	asoc->last_acked_seq = cumack;
4211 
4212 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4213 		/* nothing left in-flight */
4214 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4215 			net->flight_size = 0;
4216 			net->partial_bytes_acked = 0;
4217 		}
4218 		asoc->total_flight = 0;
4219 		asoc->total_flight_count = 0;
4220 	}
4221 	/* RWND update */
4222 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4223 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4224 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4225 		/* SWS sender side engages */
4226 		asoc->peers_rwnd = 0;
4227 	}
4228 	if (asoc->peers_rwnd > old_rwnd) {
4229 		win_probe_recovery = 1;
4230 	}
4231 	/* Now assure a timer where data is queued at */
4232 again:
4233 	j = 0;
4234 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4235 		if (win_probe_recovery && (net->window_probe)) {
4236 			win_probe_recovered = 1;
4237 			/*
4238 			 * Find first chunk that was used with window probe
4239 			 * and clear the sent
4240 			 */
4241 			/* sa_ignore FREED_MEMORY */
4242 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4243 				if (tp1->window_probe) {
4244 					/* move back to data send queue */
4245 					sctp_window_probe_recovery(stcb, asoc, tp1);
4246 					break;
4247 				}
4248 			}
4249 		}
4250 		if (net->flight_size) {
4251 			j++;
4252 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4253 			if (net->window_probe) {
4254 				net->window_probe = 0;
4255 			}
4256 		} else {
4257 			if (net->window_probe) {
4258 				/*
4259 				 * In window probes we must assure a timer
4260 				 * is still running there
4261 				 */
4262 				net->window_probe = 0;
4263 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4264 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4265 				}
4266 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4267 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4268 				    stcb, net,
4269 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4270 			}
4271 		}
4272 	}
4273 	if ((j == 0) &&
4274 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4275 	    (asoc->sent_queue_retran_cnt == 0) &&
4276 	    (win_probe_recovered == 0) &&
4277 	    (done_once == 0)) {
4278 		/*
4279 		 * huh, this should not happen unless all packets are
4280 		 * PR-SCTP and marked to skip of course.
4281 		 */
4282 		if (sctp_fs_audit(asoc)) {
4283 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4284 				net->flight_size = 0;
4285 			}
4286 			asoc->total_flight = 0;
4287 			asoc->total_flight_count = 0;
4288 			asoc->sent_queue_retran_cnt = 0;
4289 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4290 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4291 					sctp_flight_size_increase(tp1);
4292 					sctp_total_flight_increase(stcb, tp1);
4293 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4294 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4295 				}
4296 			}
4297 		}
4298 		done_once = 1;
4299 		goto again;
4300 	}
4301 	/**********************************/
4302 	/* Now what about shutdown issues */
4303 	/**********************************/
4304 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4305 		/* nothing left on sendqueue.. consider done */
4306 		/* clean up */
4307 		if ((asoc->stream_queue_cnt == 1) &&
4308 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4309 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4310 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4311 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4312 		}
4313 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4314 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4315 		    (asoc->stream_queue_cnt == 1) &&
4316 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4317 			struct mbuf *op_err;
4318 
4319 			*abort_now = 1;
4320 			/* XXX */
4321 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4322 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4323 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4324 			return;
4325 		}
4326 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4327 		    (asoc->stream_queue_cnt == 0)) {
4328 			struct sctp_nets *netp;
4329 
4330 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4331 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4332 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4333 			}
4334 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4335 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4336 			sctp_stop_timers_for_shutdown(stcb);
4337 			if (asoc->alternate) {
4338 				netp = asoc->alternate;
4339 			} else {
4340 				netp = asoc->primary_destination;
4341 			}
4342 			sctp_send_shutdown(stcb, netp);
4343 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4344 			    stcb->sctp_ep, stcb, netp);
4345 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4346 			    stcb->sctp_ep, stcb, netp);
4347 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4348 		    (asoc->stream_queue_cnt == 0)) {
4349 			struct sctp_nets *netp;
4350 
4351 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4352 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4353 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4354 			sctp_stop_timers_for_shutdown(stcb);
4355 			if (asoc->alternate) {
4356 				netp = asoc->alternate;
4357 			} else {
4358 				netp = asoc->primary_destination;
4359 			}
4360 			sctp_send_shutdown_ack(stcb, netp);
4361 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4362 			    stcb->sctp_ep, stcb, netp);
4363 		}
4364 	}
4365 	/*********************************************/
4366 	/* Here we perform PR-SCTP procedures        */
4367 	/* (section 4.2)                             */
4368 	/*********************************************/
4369 	/* C1. update advancedPeerAckPoint */
4370 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4371 		asoc->advanced_peer_ack_point = cumack;
4372 	}
4373 	/* PR-Sctp issues need to be addressed too */
4374 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4375 		struct sctp_tmit_chunk *lchk;
4376 		uint32_t old_adv_peer_ack_point;
4377 
4378 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4379 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4380 		/* C3. See if we need to send a Fwd-TSN */
4381 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4382 			/*
4383 			 * ISSUE with ECN, see FWD-TSN processing.
4384 			 */
4385 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4386 				send_forward_tsn(stcb, asoc);
4387 			} else if (lchk) {
4388 				/* try to FR fwd-tsn's that get lost too */
4389 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4390 					send_forward_tsn(stcb, asoc);
4391 				}
4392 			}
4393 		}
4394 		if (lchk) {
4395 			/* Assure a timer is up */
4396 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4397 			    stcb->sctp_ep, stcb, lchk->whoTo);
4398 		}
4399 	}
4400 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4401 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4402 		    rwnd,
4403 		    stcb->asoc.peers_rwnd,
4404 		    stcb->asoc.total_flight,
4405 		    stcb->asoc.total_output_queue_size);
4406 	}
4407 }
4408 
4409 void
4410 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4411     struct sctp_tcb *stcb,
4412     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4413     int *abort_now, uint8_t flags,
4414     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4415 {
4416 	struct sctp_association *asoc;
4417 	struct sctp_tmit_chunk *tp1, *tp2;
4418 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4419 	uint16_t wake_him = 0;
4420 	uint32_t send_s = 0;
4421 	long j;
4422 	int accum_moved = 0;
4423 	int will_exit_fast_recovery = 0;
4424 	uint32_t a_rwnd, old_rwnd;
4425 	int win_probe_recovery = 0;
4426 	int win_probe_recovered = 0;
4427 	struct sctp_nets *net = NULL;
4428 	int done_once;
4429 	int rto_ok = 1;
4430 	uint8_t reneged_all = 0;
4431 	uint8_t cmt_dac_flag;
4432 
4433 	/*
4434 	 * we take any chance we can to service our queues since we cannot
4435 	 * get awoken when the socket is read from :<
4436 	 */
4437 	/*
4438 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4439 	 * old sack, if so discard. 2) If there is nothing left in the send
4440 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4441 	 * too, update any rwnd change and verify no timers are running.
4442 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4443 	 * moved process these first and note that it moved. 4) Process any
4444 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4445 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4446 	 * sync up flightsizes and things, stop all timers and also check
4447 	 * for shutdown_pending state. If so then go ahead and send off the
4448 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4449 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4450 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4451 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4452 	 * if in shutdown_recv state.
4453 	 */
4454 	SCTP_TCB_LOCK_ASSERT(stcb);
4455 	/* CMT DAC algo */
4456 	this_sack_lowest_newack = 0;
4457 	SCTP_STAT_INCR(sctps_slowpath_sack);
4458 	last_tsn = cum_ack;
4459 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4460 #ifdef SCTP_ASOCLOG_OF_TSNS
4461 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4462 	stcb->asoc.cumack_log_at++;
4463 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4464 		stcb->asoc.cumack_log_at = 0;
4465 	}
4466 #endif
4467 	a_rwnd = rwnd;
4468 
4469 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4470 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4471 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4472 	}
4473 	old_rwnd = stcb->asoc.peers_rwnd;
4474 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4475 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4476 		    stcb->asoc.overall_error_count,
4477 		    0,
4478 		    SCTP_FROM_SCTP_INDATA,
4479 		    __LINE__);
4480 	}
4481 	stcb->asoc.overall_error_count = 0;
4482 	asoc = &stcb->asoc;
4483 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4484 		sctp_log_sack(asoc->last_acked_seq,
4485 		    cum_ack,
4486 		    0,
4487 		    num_seg,
4488 		    num_dup,
4489 		    SCTP_LOG_NEW_SACK);
4490 	}
4491 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4492 		uint16_t i;
4493 		uint32_t *dupdata, dblock;
4494 
4495 		for (i = 0; i < num_dup; i++) {
4496 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4497 			    sizeof(uint32_t), (uint8_t *)&dblock);
4498 			if (dupdata == NULL) {
4499 				break;
4500 			}
4501 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4502 		}
4503 	}
4504 	/* reality check */
4505 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4506 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4507 		    sctpchunk_listhead);
4508 		send_s = tp1->rec.data.tsn + 1;
4509 	} else {
4510 		tp1 = NULL;
4511 		send_s = asoc->sending_seq;
4512 	}
4513 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4514 		struct mbuf *op_err;
4515 		char msg[SCTP_DIAG_INFO_LEN];
4516 
4517 		/*
4518 		 * no way, we have not even sent this TSN out yet. Peer is
4519 		 * hopelessly messed up with us.
4520 		 */
4521 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4522 		    cum_ack, send_s);
4523 		if (tp1) {
4524 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4525 			    tp1->rec.data.tsn, (void *)tp1);
4526 		}
4527 hopeless_peer:
4528 		*abort_now = 1;
4529 		/* XXX */
4530 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4531 		    cum_ack, send_s);
4532 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4533 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4534 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4535 		return;
4536 	}
4537 	/**********************/
4538 	/* 1) check the range */
4539 	/**********************/
4540 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4541 		/* acking something behind */
4542 		return;
4543 	}
4544 	/* update the Rwnd of the peer */
4545 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4546 	    TAILQ_EMPTY(&asoc->send_queue) &&
4547 	    (asoc->stream_queue_cnt == 0)) {
4548 		/* nothing left on send/sent and strmq */
4549 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4550 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4551 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4552 		}
4553 		asoc->peers_rwnd = a_rwnd;
4554 		if (asoc->sent_queue_retran_cnt) {
4555 			asoc->sent_queue_retran_cnt = 0;
4556 		}
4557 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4558 			/* SWS sender side engages */
4559 			asoc->peers_rwnd = 0;
4560 		}
4561 		/* stop any timers */
4562 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4563 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4564 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4565 			net->partial_bytes_acked = 0;
4566 			net->flight_size = 0;
4567 		}
4568 		asoc->total_flight = 0;
4569 		asoc->total_flight_count = 0;
4570 		return;
4571 	}
4572 	/*
4573 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4574 	 * things. The total byte count acked is tracked in netAckSz AND
4575 	 * netAck2 is used to track the total bytes acked that are un-
4576 	 * amibguious and were never retransmitted. We track these on a per
4577 	 * destination address basis.
4578 	 */
4579 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4580 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4581 			/* Drag along the window_tsn for cwr's */
4582 			net->cwr_window_tsn = cum_ack;
4583 		}
4584 		net->prev_cwnd = net->cwnd;
4585 		net->net_ack = 0;
4586 		net->net_ack2 = 0;
4587 
4588 		/*
4589 		 * CMT: Reset CUC and Fast recovery algo variables before
4590 		 * SACK processing
4591 		 */
4592 		net->new_pseudo_cumack = 0;
4593 		net->will_exit_fast_recovery = 0;
4594 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4595 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4596 		}
4597 	}
4598 	/* process the new consecutive TSN first */
4599 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4600 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4601 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4602 				accum_moved = 1;
4603 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4604 					/*
4605 					 * If it is less than ACKED, it is
4606 					 * now no-longer in flight. Higher
4607 					 * values may occur during marking
4608 					 */
4609 					if ((tp1->whoTo->dest_state &
4610 					    SCTP_ADDR_UNCONFIRMED) &&
4611 					    (tp1->snd_count < 2)) {
4612 						/*
4613 						 * If there was no retran
4614 						 * and the address is
4615 						 * un-confirmed and we sent
4616 						 * there and are now
4617 						 * sacked.. its confirmed,
4618 						 * mark it so.
4619 						 */
4620 						tp1->whoTo->dest_state &=
4621 						    ~SCTP_ADDR_UNCONFIRMED;
4622 					}
4623 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4624 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4625 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4626 							    tp1->whoTo->flight_size,
4627 							    tp1->book_size,
4628 							    (uint32_t)(uintptr_t)tp1->whoTo,
4629 							    tp1->rec.data.tsn);
4630 						}
4631 						sctp_flight_size_decrease(tp1);
4632 						sctp_total_flight_decrease(stcb, tp1);
4633 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4634 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4635 							    tp1);
4636 						}
4637 					}
4638 					tp1->whoTo->net_ack += tp1->send_size;
4639 
4640 					/* CMT SFR and DAC algos */
4641 					this_sack_lowest_newack = tp1->rec.data.tsn;
4642 					tp1->whoTo->saw_newack = 1;
4643 
4644 					if (tp1->snd_count < 2) {
4645 						/*
4646 						 * True non-retransmited
4647 						 * chunk
4648 						 */
4649 						tp1->whoTo->net_ack2 +=
4650 						    tp1->send_size;
4651 
4652 						/* update RTO too? */
4653 						if (tp1->do_rtt) {
4654 							if (rto_ok) {
4655 								tp1->whoTo->RTO =
4656 								    sctp_calculate_rto(stcb,
4657 								    asoc, tp1->whoTo,
4658 								    &tp1->sent_rcv_time,
4659 								    SCTP_RTT_FROM_DATA);
4660 								rto_ok = 0;
4661 							}
4662 							if (tp1->whoTo->rto_needed == 0) {
4663 								tp1->whoTo->rto_needed = 1;
4664 							}
4665 							tp1->do_rtt = 0;
4666 						}
4667 					}
4668 					/*
4669 					 * CMT: CUCv2 algorithm. From the
4670 					 * cumack'd TSNs, for each TSN being
4671 					 * acked for the first time, set the
4672 					 * following variables for the
4673 					 * corresp destination.
4674 					 * new_pseudo_cumack will trigger a
4675 					 * cwnd update.
4676 					 * find_(rtx_)pseudo_cumack will
4677 					 * trigger search for the next
4678 					 * expected (rtx-)pseudo-cumack.
4679 					 */
4680 					tp1->whoTo->new_pseudo_cumack = 1;
4681 					tp1->whoTo->find_pseudo_cumack = 1;
4682 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4683 
4684 
4685 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4686 						sctp_log_sack(asoc->last_acked_seq,
4687 						    cum_ack,
4688 						    tp1->rec.data.tsn,
4689 						    0,
4690 						    0,
4691 						    SCTP_LOG_TSN_ACKED);
4692 					}
4693 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4694 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4695 					}
4696 				}
4697 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4698 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4699 #ifdef SCTP_AUDITING_ENABLED
4700 					sctp_audit_log(0xB3,
4701 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4702 #endif
4703 				}
4704 				if (tp1->rec.data.chunk_was_revoked) {
4705 					/* deflate the cwnd */
4706 					tp1->whoTo->cwnd -= tp1->book_size;
4707 					tp1->rec.data.chunk_was_revoked = 0;
4708 				}
4709 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4710 					tp1->sent = SCTP_DATAGRAM_ACKED;
4711 				}
4712 			}
4713 		} else {
4714 			break;
4715 		}
4716 	}
4717 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4718 	/* always set this up to cum-ack */
4719 	asoc->this_sack_highest_gap = last_tsn;
4720 
4721 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4722 
4723 		/*
4724 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4725 		 * to be greater than the cumack. Also reset saw_newack to 0
4726 		 * for all dests.
4727 		 */
4728 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4729 			net->saw_newack = 0;
4730 			net->this_sack_highest_newack = last_tsn;
4731 		}
4732 
4733 		/*
4734 		 * thisSackHighestGap will increase while handling NEW
4735 		 * segments this_sack_highest_newack will increase while
4736 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4737 		 * used for CMT DAC algo. saw_newack will also change.
4738 		 */
4739 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4740 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4741 		    num_seg, num_nr_seg, &rto_ok)) {
4742 			wake_him++;
4743 		}
4744 		/*
4745 		 * validate the biggest_tsn_acked in the gap acks if strict
4746 		 * adherence is wanted.
4747 		 */
4748 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4749 			/*
4750 			 * peer is either confused or we are under attack.
4751 			 * We must abort.
4752 			 */
4753 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4754 			    biggest_tsn_acked, send_s);
4755 			goto hopeless_peer;
4756 		}
4757 	}
4758 	/*******************************************/
4759 	/* cancel ALL T3-send timer if accum moved */
4760 	/*******************************************/
4761 	if (asoc->sctp_cmt_on_off > 0) {
4762 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4763 			if (net->new_pseudo_cumack)
4764 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4765 				    stcb, net,
4766 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4767 
4768 		}
4769 	} else {
4770 		if (accum_moved) {
4771 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4772 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4773 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4774 			}
4775 		}
4776 	}
4777 	/********************************************/
4778 	/* drop the acked chunks from the sentqueue */
4779 	/********************************************/
4780 	asoc->last_acked_seq = cum_ack;
4781 
4782 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4783 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4784 			break;
4785 		}
4786 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4787 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4788 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4789 #ifdef INVARIANTS
4790 			} else {
4791 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4792 #endif
4793 			}
4794 		}
4795 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4796 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4797 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4798 			asoc->trigger_reset = 1;
4799 		}
4800 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4801 		if (PR_SCTP_ENABLED(tp1->flags)) {
4802 			if (asoc->pr_sctp_cnt != 0)
4803 				asoc->pr_sctp_cnt--;
4804 		}
4805 		asoc->sent_queue_cnt--;
4806 		if (tp1->data) {
4807 			/* sa_ignore NO_NULL_CHK */
4808 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4809 			sctp_m_freem(tp1->data);
4810 			tp1->data = NULL;
4811 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4812 				asoc->sent_queue_cnt_removeable--;
4813 			}
4814 		}
4815 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4816 			sctp_log_sack(asoc->last_acked_seq,
4817 			    cum_ack,
4818 			    tp1->rec.data.tsn,
4819 			    0,
4820 			    0,
4821 			    SCTP_LOG_FREE_SENT);
4822 		}
4823 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4824 		wake_him++;
4825 	}
4826 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4827 #ifdef INVARIANTS
4828 		panic("Warning flight size is positive and should be 0");
4829 #else
4830 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4831 		    asoc->total_flight);
4832 #endif
4833 		asoc->total_flight = 0;
4834 	}
4835 	/* sa_ignore NO_NULL_CHK */
4836 	if ((wake_him) && (stcb->sctp_socket)) {
4837 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4838 		struct socket *so;
4839 
4840 #endif
4841 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4842 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4843 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4844 		}
4845 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4846 		so = SCTP_INP_SO(stcb->sctp_ep);
4847 		atomic_add_int(&stcb->asoc.refcnt, 1);
4848 		SCTP_TCB_UNLOCK(stcb);
4849 		SCTP_SOCKET_LOCK(so, 1);
4850 		SCTP_TCB_LOCK(stcb);
4851 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4852 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4853 			/* assoc was freed while we were unlocked */
4854 			SCTP_SOCKET_UNLOCK(so, 1);
4855 			return;
4856 		}
4857 #endif
4858 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4859 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4860 		SCTP_SOCKET_UNLOCK(so, 1);
4861 #endif
4862 	} else {
4863 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4864 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4865 		}
4866 	}
4867 
4868 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4869 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4870 			/* Setup so we will exit RFC2582 fast recovery */
4871 			will_exit_fast_recovery = 1;
4872 		}
4873 	}
4874 	/*
4875 	 * Check for revoked fragments:
4876 	 *
4877 	 * if Previous sack - Had no frags then we can't have any revoked if
4878 	 * Previous sack - Had frag's then - If we now have frags aka
4879 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4880 	 * some of them. else - The peer revoked all ACKED fragments, since
4881 	 * we had some before and now we have NONE.
4882 	 */
4883 
4884 	if (num_seg) {
4885 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4886 		asoc->saw_sack_with_frags = 1;
4887 	} else if (asoc->saw_sack_with_frags) {
4888 		int cnt_revoked = 0;
4889 
4890 		/* Peer revoked all dg's marked or acked */
4891 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4892 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4893 				tp1->sent = SCTP_DATAGRAM_SENT;
4894 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4895 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4896 					    tp1->whoTo->flight_size,
4897 					    tp1->book_size,
4898 					    (uint32_t)(uintptr_t)tp1->whoTo,
4899 					    tp1->rec.data.tsn);
4900 				}
4901 				sctp_flight_size_increase(tp1);
4902 				sctp_total_flight_increase(stcb, tp1);
4903 				tp1->rec.data.chunk_was_revoked = 1;
4904 				/*
4905 				 * To ensure that this increase in
4906 				 * flightsize, which is artificial, does not
4907 				 * throttle the sender, we also increase the
4908 				 * cwnd artificially.
4909 				 */
4910 				tp1->whoTo->cwnd += tp1->book_size;
4911 				cnt_revoked++;
4912 			}
4913 		}
4914 		if (cnt_revoked) {
4915 			reneged_all = 1;
4916 		}
4917 		asoc->saw_sack_with_frags = 0;
4918 	}
4919 	if (num_nr_seg > 0)
4920 		asoc->saw_sack_with_nr_frags = 1;
4921 	else
4922 		asoc->saw_sack_with_nr_frags = 0;
4923 
4924 	/* JRS - Use the congestion control given in the CC module */
4925 	if (ecne_seen == 0) {
4926 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4927 			if (net->net_ack2 > 0) {
4928 				/*
4929 				 * Karn's rule applies to clearing error
4930 				 * count, this is optional.
4931 				 */
4932 				net->error_count = 0;
4933 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4934 					/* addr came good */
4935 					net->dest_state |= SCTP_ADDR_REACHABLE;
4936 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4937 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4938 				}
4939 				if (net == stcb->asoc.primary_destination) {
4940 					if (stcb->asoc.alternate) {
4941 						/*
4942 						 * release the alternate,
4943 						 * primary is good
4944 						 */
4945 						sctp_free_remote_addr(stcb->asoc.alternate);
4946 						stcb->asoc.alternate = NULL;
4947 					}
4948 				}
4949 				if (net->dest_state & SCTP_ADDR_PF) {
4950 					net->dest_state &= ~SCTP_ADDR_PF;
4951 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4952 					    stcb->sctp_ep, stcb, net,
4953 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4954 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4955 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4956 					/* Done with this net */
4957 					net->net_ack = 0;
4958 				}
4959 				/* restore any doubled timers */
4960 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4961 				if (net->RTO < stcb->asoc.minrto) {
4962 					net->RTO = stcb->asoc.minrto;
4963 				}
4964 				if (net->RTO > stcb->asoc.maxrto) {
4965 					net->RTO = stcb->asoc.maxrto;
4966 				}
4967 			}
4968 		}
4969 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4970 	}
4971 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4972 		/* nothing left in-flight */
4973 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4974 			/* stop all timers */
4975 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4976 			    stcb, net,
4977 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4978 			net->flight_size = 0;
4979 			net->partial_bytes_acked = 0;
4980 		}
4981 		asoc->total_flight = 0;
4982 		asoc->total_flight_count = 0;
4983 	}
4984 	/**********************************/
4985 	/* Now what about shutdown issues */
4986 	/**********************************/
4987 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4988 		/* nothing left on sendqueue.. consider done */
4989 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4990 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4991 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4992 		}
4993 		asoc->peers_rwnd = a_rwnd;
4994 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4995 			/* SWS sender side engages */
4996 			asoc->peers_rwnd = 0;
4997 		}
4998 		/* clean up */
4999 		if ((asoc->stream_queue_cnt == 1) &&
5000 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5001 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5002 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5003 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5004 		}
5005 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5006 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5007 		    (asoc->stream_queue_cnt == 1) &&
5008 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5009 			struct mbuf *op_err;
5010 
5011 			*abort_now = 1;
5012 			/* XXX */
5013 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5014 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5015 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5016 			return;
5017 		}
5018 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5019 		    (asoc->stream_queue_cnt == 0)) {
5020 			struct sctp_nets *netp;
5021 
5022 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5023 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5024 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5025 			}
5026 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5027 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5028 			sctp_stop_timers_for_shutdown(stcb);
5029 			if (asoc->alternate) {
5030 				netp = asoc->alternate;
5031 			} else {
5032 				netp = asoc->primary_destination;
5033 			}
5034 			sctp_send_shutdown(stcb, netp);
5035 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5036 			    stcb->sctp_ep, stcb, netp);
5037 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5038 			    stcb->sctp_ep, stcb, netp);
5039 			return;
5040 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5041 		    (asoc->stream_queue_cnt == 0)) {
5042 			struct sctp_nets *netp;
5043 
5044 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5045 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5046 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5047 			sctp_stop_timers_for_shutdown(stcb);
5048 			if (asoc->alternate) {
5049 				netp = asoc->alternate;
5050 			} else {
5051 				netp = asoc->primary_destination;
5052 			}
5053 			sctp_send_shutdown_ack(stcb, netp);
5054 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5055 			    stcb->sctp_ep, stcb, netp);
5056 			return;
5057 		}
5058 	}
5059 	/*
5060 	 * Now here we are going to recycle net_ack for a different use...
5061 	 * HEADS UP.
5062 	 */
5063 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5064 		net->net_ack = 0;
5065 	}
5066 
5067 	/*
5068 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5069 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5070 	 * automatically ensure that.
5071 	 */
5072 	if ((asoc->sctp_cmt_on_off > 0) &&
5073 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5074 	    (cmt_dac_flag == 0)) {
5075 		this_sack_lowest_newack = cum_ack;
5076 	}
5077 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5078 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5079 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5080 	}
5081 	/* JRS - Use the congestion control given in the CC module */
5082 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5083 
5084 	/* Now are we exiting loss recovery ? */
5085 	if (will_exit_fast_recovery) {
5086 		/* Ok, we must exit fast recovery */
5087 		asoc->fast_retran_loss_recovery = 0;
5088 	}
5089 	if ((asoc->sat_t3_loss_recovery) &&
5090 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5091 		/* end satellite t3 loss recovery */
5092 		asoc->sat_t3_loss_recovery = 0;
5093 	}
5094 	/*
5095 	 * CMT Fast recovery
5096 	 */
5097 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5098 		if (net->will_exit_fast_recovery) {
5099 			/* Ok, we must exit fast recovery */
5100 			net->fast_retran_loss_recovery = 0;
5101 		}
5102 	}
5103 
5104 	/* Adjust and set the new rwnd value */
5105 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5106 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5107 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5108 	}
5109 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5110 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5111 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5112 		/* SWS sender side engages */
5113 		asoc->peers_rwnd = 0;
5114 	}
5115 	if (asoc->peers_rwnd > old_rwnd) {
5116 		win_probe_recovery = 1;
5117 	}
5118 	/*
5119 	 * Now we must setup so we have a timer up for anyone with
5120 	 * outstanding data.
5121 	 */
5122 	done_once = 0;
5123 again:
5124 	j = 0;
5125 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5126 		if (win_probe_recovery && (net->window_probe)) {
5127 			win_probe_recovered = 1;
5128 			/*-
5129 			 * Find first chunk that was used with
5130 			 * window probe and clear the event. Put
5131 			 * it back into the send queue as if has
5132 			 * not been sent.
5133 			 */
5134 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5135 				if (tp1->window_probe) {
5136 					sctp_window_probe_recovery(stcb, asoc, tp1);
5137 					break;
5138 				}
5139 			}
5140 		}
5141 		if (net->flight_size) {
5142 			j++;
5143 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5144 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5145 				    stcb->sctp_ep, stcb, net);
5146 			}
5147 			if (net->window_probe) {
5148 				net->window_probe = 0;
5149 			}
5150 		} else {
5151 			if (net->window_probe) {
5152 				/*
5153 				 * In window probes we must assure a timer
5154 				 * is still running there
5155 				 */
5156 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5157 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5158 					    stcb->sctp_ep, stcb, net);
5159 
5160 				}
5161 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5162 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5163 				    stcb, net,
5164 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5165 			}
5166 		}
5167 	}
5168 	if ((j == 0) &&
5169 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5170 	    (asoc->sent_queue_retran_cnt == 0) &&
5171 	    (win_probe_recovered == 0) &&
5172 	    (done_once == 0)) {
5173 		/*
5174 		 * huh, this should not happen unless all packets are
5175 		 * PR-SCTP and marked to skip of course.
5176 		 */
5177 		if (sctp_fs_audit(asoc)) {
5178 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5179 				net->flight_size = 0;
5180 			}
5181 			asoc->total_flight = 0;
5182 			asoc->total_flight_count = 0;
5183 			asoc->sent_queue_retran_cnt = 0;
5184 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5185 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5186 					sctp_flight_size_increase(tp1);
5187 					sctp_total_flight_increase(stcb, tp1);
5188 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5189 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5190 				}
5191 			}
5192 		}
5193 		done_once = 1;
5194 		goto again;
5195 	}
5196 	/*********************************************/
5197 	/* Here we perform PR-SCTP procedures        */
5198 	/* (section 4.2)                             */
5199 	/*********************************************/
5200 	/* C1. update advancedPeerAckPoint */
5201 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5202 		asoc->advanced_peer_ack_point = cum_ack;
5203 	}
5204 	/* C2. try to further move advancedPeerAckPoint ahead */
5205 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5206 		struct sctp_tmit_chunk *lchk;
5207 		uint32_t old_adv_peer_ack_point;
5208 
5209 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5210 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5211 		/* C3. See if we need to send a Fwd-TSN */
5212 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5213 			/*
5214 			 * ISSUE with ECN, see FWD-TSN processing.
5215 			 */
5216 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5217 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5218 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5219 				    old_adv_peer_ack_point);
5220 			}
5221 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5222 				send_forward_tsn(stcb, asoc);
5223 			} else if (lchk) {
5224 				/* try to FR fwd-tsn's that get lost too */
5225 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5226 					send_forward_tsn(stcb, asoc);
5227 				}
5228 			}
5229 		}
5230 		if (lchk) {
5231 			/* Assure a timer is up */
5232 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5233 			    stcb->sctp_ep, stcb, lchk->whoTo);
5234 		}
5235 	}
5236 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5237 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5238 		    a_rwnd,
5239 		    stcb->asoc.peers_rwnd,
5240 		    stcb->asoc.total_flight,
5241 		    stcb->asoc.total_output_queue_size);
5242 	}
5243 }
5244 
5245 void
5246 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5247 {
5248 	/* Copy cum-ack */
5249 	uint32_t cum_ack, a_rwnd;
5250 
5251 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5252 	/* Arrange so a_rwnd does NOT change */
5253 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5254 
5255 	/* Now call the express sack handling */
5256 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5257 }
5258 
5259 static void
5260 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5261     struct sctp_stream_in *strmin)
5262 {
5263 	struct sctp_queued_to_read *control, *ncontrol;
5264 	struct sctp_association *asoc;
5265 	uint32_t mid;
5266 	int need_reasm_check = 0;
5267 
5268 	asoc = &stcb->asoc;
5269 	mid = strmin->last_mid_delivered;
5270 	/*
5271 	 * First deliver anything prior to and including the stream no that
5272 	 * came in.
5273 	 */
5274 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5275 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5276 			/* this is deliverable now */
5277 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5278 				if (control->on_strm_q) {
5279 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5280 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5281 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5282 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5283 #ifdef INVARIANTS
5284 					} else {
5285 						panic("strmin: %p ctl: %p unknown %d",
5286 						    strmin, control, control->on_strm_q);
5287 #endif
5288 					}
5289 					control->on_strm_q = 0;
5290 				}
5291 				/* subtract pending on streams */
5292 				if (asoc->size_on_all_streams >= control->length) {
5293 					asoc->size_on_all_streams -= control->length;
5294 				} else {
5295 #ifdef INVARIANTS
5296 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5297 #else
5298 					asoc->size_on_all_streams = 0;
5299 #endif
5300 				}
5301 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5302 				/* deliver it to at least the delivery-q */
5303 				if (stcb->sctp_socket) {
5304 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5305 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5306 					    control,
5307 					    &stcb->sctp_socket->so_rcv,
5308 					    1, SCTP_READ_LOCK_HELD,
5309 					    SCTP_SO_NOT_LOCKED);
5310 				}
5311 			} else {
5312 				/* Its a fragmented message */
5313 				if (control->first_frag_seen) {
5314 					/*
5315 					 * Make it so this is next to
5316 					 * deliver, we restore later
5317 					 */
5318 					strmin->last_mid_delivered = control->mid - 1;
5319 					need_reasm_check = 1;
5320 					break;
5321 				}
5322 			}
5323 		} else {
5324 			/* no more delivery now. */
5325 			break;
5326 		}
5327 	}
5328 	if (need_reasm_check) {
5329 		int ret;
5330 
5331 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5332 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5333 			/* Restore the next to deliver unless we are ahead */
5334 			strmin->last_mid_delivered = mid;
5335 		}
5336 		if (ret == 0) {
5337 			/* Left the front Partial one on */
5338 			return;
5339 		}
5340 		need_reasm_check = 0;
5341 	}
5342 	/*
5343 	 * now we must deliver things in queue the normal way  if any are
5344 	 * now ready.
5345 	 */
5346 	mid = strmin->last_mid_delivered + 1;
5347 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5348 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5349 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5350 				/* this is deliverable now */
5351 				if (control->on_strm_q) {
5352 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5353 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5354 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5355 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5356 #ifdef INVARIANTS
5357 					} else {
5358 						panic("strmin: %p ctl: %p unknown %d",
5359 						    strmin, control, control->on_strm_q);
5360 #endif
5361 					}
5362 					control->on_strm_q = 0;
5363 				}
5364 				/* subtract pending on streams */
5365 				if (asoc->size_on_all_streams >= control->length) {
5366 					asoc->size_on_all_streams -= control->length;
5367 				} else {
5368 #ifdef INVARIANTS
5369 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5370 #else
5371 					asoc->size_on_all_streams = 0;
5372 #endif
5373 				}
5374 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5375 				/* deliver it to at least the delivery-q */
5376 				strmin->last_mid_delivered = control->mid;
5377 				if (stcb->sctp_socket) {
5378 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5379 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5380 					    control,
5381 					    &stcb->sctp_socket->so_rcv, 1,
5382 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5383 
5384 				}
5385 				mid = strmin->last_mid_delivered + 1;
5386 			} else {
5387 				/* Its a fragmented message */
5388 				if (control->first_frag_seen) {
5389 					/*
5390 					 * Make it so this is next to
5391 					 * deliver
5392 					 */
5393 					strmin->last_mid_delivered = control->mid - 1;
5394 					need_reasm_check = 1;
5395 					break;
5396 				}
5397 			}
5398 		} else {
5399 			break;
5400 		}
5401 	}
5402 	if (need_reasm_check) {
5403 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5404 	}
5405 }
5406 
5407 
5408 
5409 static void
5410 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5411     struct sctp_association *asoc,
5412     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5413 {
5414 	struct sctp_queued_to_read *control;
5415 	struct sctp_stream_in *strm;
5416 	struct sctp_tmit_chunk *chk, *nchk;
5417 	int cnt_removed = 0;
5418 
5419 	/*
5420 	 * For now large messages held on the stream reasm that are complete
5421 	 * will be tossed too. We could in theory do more work to spin
5422 	 * through and stop after dumping one msg aka seeing the start of a
5423 	 * new msg at the head, and call the delivery function... to see if
5424 	 * it can be delivered... But for now we just dump everything on the
5425 	 * queue.
5426 	 */
5427 	strm = &asoc->strmin[stream];
5428 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5429 	if (control == NULL) {
5430 		/* Not found */
5431 		return;
5432 	}
5433 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5434 		return;
5435 	}
5436 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5437 		/* Purge hanging chunks */
5438 		if (!asoc->idata_supported && (ordered == 0)) {
5439 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5440 				break;
5441 			}
5442 		}
5443 		cnt_removed++;
5444 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5445 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5446 			asoc->size_on_reasm_queue -= chk->send_size;
5447 		} else {
5448 #ifdef INVARIANTS
5449 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5450 #else
5451 			asoc->size_on_reasm_queue = 0;
5452 #endif
5453 		}
5454 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5455 		if (chk->data) {
5456 			sctp_m_freem(chk->data);
5457 			chk->data = NULL;
5458 		}
5459 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5460 	}
5461 	if (!TAILQ_EMPTY(&control->reasm)) {
5462 		/* This has to be old data, unordered */
5463 		if (control->data) {
5464 			sctp_m_freem(control->data);
5465 			control->data = NULL;
5466 		}
5467 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5468 		chk = TAILQ_FIRST(&control->reasm);
5469 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5470 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5471 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5472 			    chk, SCTP_READ_LOCK_HELD);
5473 		}
5474 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5475 		return;
5476 	}
5477 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5478 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5479 		if (asoc->size_on_all_streams >= control->length) {
5480 			asoc->size_on_all_streams -= control->length;
5481 		} else {
5482 #ifdef INVARIANTS
5483 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5484 #else
5485 			asoc->size_on_all_streams = 0;
5486 #endif
5487 		}
5488 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5489 		control->on_strm_q = 0;
5490 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5491 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5492 		control->on_strm_q = 0;
5493 #ifdef INVARIANTS
5494 	} else if (control->on_strm_q) {
5495 		panic("strm: %p ctl: %p unknown %d",
5496 		    strm, control, control->on_strm_q);
5497 #endif
5498 	}
5499 	control->on_strm_q = 0;
5500 	if (control->on_read_q == 0) {
5501 		sctp_free_remote_addr(control->whoFrom);
5502 		if (control->data) {
5503 			sctp_m_freem(control->data);
5504 			control->data = NULL;
5505 		}
5506 		sctp_free_a_readq(stcb, control);
5507 	}
5508 }
5509 
5510 void
5511 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5512     struct sctp_forward_tsn_chunk *fwd,
5513     int *abort_flag, struct mbuf *m, int offset)
5514 {
5515 	/* The pr-sctp fwd tsn */
5516 	/*
5517 	 * here we will perform all the data receiver side steps for
5518 	 * processing FwdTSN, as required in by pr-sctp draft:
5519 	 *
5520 	 * Assume we get FwdTSN(x):
5521 	 *
5522 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5523 	 * + others we have 3) examine and update re-ordering queue on
5524 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5525 	 * report where we are.
5526 	 */
5527 	struct sctp_association *asoc;
5528 	uint32_t new_cum_tsn, gap;
5529 	unsigned int i, fwd_sz, m_size;
5530 	uint32_t str_seq;
5531 	struct sctp_stream_in *strm;
5532 	struct sctp_queued_to_read *control, *sv;
5533 
5534 	asoc = &stcb->asoc;
5535 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5536 		SCTPDBG(SCTP_DEBUG_INDATA1,
5537 		    "Bad size too small/big fwd-tsn\n");
5538 		return;
5539 	}
5540 	m_size = (stcb->asoc.mapping_array_size << 3);
5541 	/*************************************************************/
5542 	/* 1. Here we update local cumTSN and shift the bitmap array */
5543 	/*************************************************************/
5544 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5545 
5546 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5547 		/* Already got there ... */
5548 		return;
5549 	}
5550 	/*
5551 	 * now we know the new TSN is more advanced, let's find the actual
5552 	 * gap
5553 	 */
5554 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5555 	asoc->cumulative_tsn = new_cum_tsn;
5556 	if (gap >= m_size) {
5557 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5558 			struct mbuf *op_err;
5559 			char msg[SCTP_DIAG_INFO_LEN];
5560 
5561 			/*
5562 			 * out of range (of single byte chunks in the rwnd I
5563 			 * give out). This must be an attacker.
5564 			 */
5565 			*abort_flag = 1;
5566 			snprintf(msg, sizeof(msg),
5567 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5568 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5569 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5570 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5571 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5572 			return;
5573 		}
5574 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5575 
5576 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5577 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5578 		asoc->highest_tsn_inside_map = new_cum_tsn;
5579 
5580 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5581 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5582 
5583 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5584 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5585 		}
5586 	} else {
5587 		SCTP_TCB_LOCK_ASSERT(stcb);
5588 		for (i = 0; i <= gap; i++) {
5589 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5590 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5591 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5592 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5593 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5594 				}
5595 			}
5596 		}
5597 	}
5598 	/*************************************************************/
5599 	/* 2. Clear up re-assembly queue                             */
5600 	/*************************************************************/
5601 
5602 	/* This is now done as part of clearing up the stream/seq */
5603 	if (asoc->idata_supported == 0) {
5604 		uint16_t sid;
5605 
5606 		/* Flush all the un-ordered data based on cum-tsn */
5607 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5608 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5609 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5610 		}
5611 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5612 	}
5613 	/*******************************************************/
5614 	/* 3. Update the PR-stream re-ordering queues and fix  */
5615 	/* delivery issues as needed.                       */
5616 	/*******************************************************/
5617 	fwd_sz -= sizeof(*fwd);
5618 	if (m && fwd_sz) {
5619 		/* New method. */
5620 		unsigned int num_str;
5621 		uint32_t mid, cur_mid;
5622 		uint16_t sid;
5623 		uint16_t ordered, flags;
5624 		struct sctp_strseq *stseq, strseqbuf;
5625 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5626 
5627 		offset += sizeof(*fwd);
5628 
5629 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5630 		if (asoc->idata_supported) {
5631 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5632 		} else {
5633 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5634 		}
5635 		for (i = 0; i < num_str; i++) {
5636 			if (asoc->idata_supported) {
5637 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5638 				    sizeof(struct sctp_strseq_mid),
5639 				    (uint8_t *)&strseqbuf_m);
5640 				offset += sizeof(struct sctp_strseq_mid);
5641 				if (stseq_m == NULL) {
5642 					break;
5643 				}
5644 				sid = ntohs(stseq_m->sid);
5645 				mid = ntohl(stseq_m->mid);
5646 				flags = ntohs(stseq_m->flags);
5647 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5648 					ordered = 0;
5649 				} else {
5650 					ordered = 1;
5651 				}
5652 			} else {
5653 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5654 				    sizeof(struct sctp_strseq),
5655 				    (uint8_t *)&strseqbuf);
5656 				offset += sizeof(struct sctp_strseq);
5657 				if (stseq == NULL) {
5658 					break;
5659 				}
5660 				sid = ntohs(stseq->sid);
5661 				mid = (uint32_t)ntohs(stseq->ssn);
5662 				ordered = 1;
5663 			}
5664 			/* Convert */
5665 
5666 			/* now process */
5667 
5668 			/*
5669 			 * Ok we now look for the stream/seq on the read
5670 			 * queue where its not all delivered. If we find it
5671 			 * we transmute the read entry into a PDI_ABORTED.
5672 			 */
5673 			if (sid >= asoc->streamincnt) {
5674 				/* screwed up streams, stop!  */
5675 				break;
5676 			}
5677 			if ((asoc->str_of_pdapi == sid) &&
5678 			    (asoc->ssn_of_pdapi == mid)) {
5679 				/*
5680 				 * If this is the one we were partially
5681 				 * delivering now then we no longer are.
5682 				 * Note this will change with the reassembly
5683 				 * re-write.
5684 				 */
5685 				asoc->fragmented_delivery_inprogress = 0;
5686 			}
5687 			strm = &asoc->strmin[sid];
5688 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5689 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5690 			}
5691 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5692 				if ((control->sinfo_stream == sid) &&
5693 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5694 					str_seq = (sid << 16) | (0x0000ffff & mid);
5695 					control->pdapi_aborted = 1;
5696 					sv = stcb->asoc.control_pdapi;
5697 					control->end_added = 1;
5698 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5699 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5700 						if (asoc->size_on_all_streams >= control->length) {
5701 							asoc->size_on_all_streams -= control->length;
5702 						} else {
5703 #ifdef INVARIANTS
5704 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5705 #else
5706 							asoc->size_on_all_streams = 0;
5707 #endif
5708 						}
5709 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5710 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5711 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5712 #ifdef INVARIANTS
5713 					} else if (control->on_strm_q) {
5714 						panic("strm: %p ctl: %p unknown %d",
5715 						    strm, control, control->on_strm_q);
5716 #endif
5717 					}
5718 					control->on_strm_q = 0;
5719 					stcb->asoc.control_pdapi = control;
5720 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5721 					    stcb,
5722 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5723 					    (void *)&str_seq,
5724 					    SCTP_SO_NOT_LOCKED);
5725 					stcb->asoc.control_pdapi = sv;
5726 					break;
5727 				} else if ((control->sinfo_stream == sid) &&
5728 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5729 					/* We are past our victim SSN */
5730 					break;
5731 				}
5732 			}
5733 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5734 				/* Update the sequence number */
5735 				strm->last_mid_delivered = mid;
5736 			}
5737 			/* now kick the stream the new way */
5738 			/* sa_ignore NO_NULL_CHK */
5739 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5740 		}
5741 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5742 	}
5743 	/*
5744 	 * Now slide thing forward.
5745 	 */
5746 	sctp_slide_mapping_arrays(stcb);
5747 }
5748