xref: /netbsd-src/sys/netinet/sctputil.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$KAME: sctputil.c,v 1.39 2005/06/16 20:54:06 jinmei Exp $	*/
2 /*	$NetBSD: sctputil.c,v 1.15 2019/08/13 19:55:40 rjs Exp $	*/
3 
4 /*
5  * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Cisco Systems, Inc.
19  * 4. Neither the name of the project nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: sctputil.c,v 1.15 2019/08/13 19:55:40 rjs Exp $");
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42 #include "opt_sctp.h"
43 #endif /* _KERNEL_OPT */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/kernel.h>
56 #include <sys/sysctl.h>
57 
58 #include <sys/callout.h>
59 
60 #include <net/route.h>
61 
62 #ifdef INET6
63 #include <sys/domain.h>
64 #endif
65 
66 #include <machine/limits.h>
67 
68 #include <net/if.h>
69 #include <net/if_types.h>
70 #include <net/route.h>
71 
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_pcb.h>
76 #include <netinet/in_var.h>
77 #include <netinet/ip_var.h>
78 
79 #ifdef INET6
80 #include <netinet/ip6.h>
81 #include <netinet6/ip6_var.h>
82 #include <netinet6/scope6_var.h>
83 #include <netinet6/in6_pcb.h>
84 
85 #endif /* INET6 */
86 
87 #include <netinet/sctp_pcb.h>
88 
89 #ifdef IPSEC
90 #include <netipsec/ipsec.h>
91 #include <netipsec/key.h>
92 #endif /* IPSEC */
93 
94 #include <netinet/sctputil.h>
95 #include <netinet/sctp_var.h>
96 #ifdef INET6
97 #include <netinet6/sctp6_var.h>
98 #endif
99 #include <netinet/sctp_header.h>
100 #include <netinet/sctp_output.h>
101 #include <netinet/sctp_hashdriver.h>
102 #include <netinet/sctp_uio.h>
103 #include <netinet/sctp_timer.h>
104 #include <netinet/sctp_crc32.h>
105 #include <netinet/sctp_indata.h>	/* for sctp_deliver_data() */
106 #define NUMBER_OF_MTU_SIZES 18
107 
108 #ifdef SCTP_DEBUG
109 extern u_int32_t sctp_debug_on;
110 #endif
111 
112 #ifdef SCTP_STAT_LOGGING
113 int sctp_cwnd_log_at=0;
114 int sctp_cwnd_log_rolled=0;
115 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
116 
117 void sctp_clr_stat_log(void)
118 {
119 	sctp_cwnd_log_at=0;
120 	sctp_cwnd_log_rolled=0;
121 }
122 
123 void
124 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
125 {
126 
127 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
128 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
129 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
130 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
131 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
132 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
133 	sctp_cwnd_log_at++;
134 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
135 		sctp_cwnd_log_at = 0;
136 		sctp_cwnd_log_rolled = 1;
137 	}
138 
139 }
140 
141 void
142 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
143 {
144 
145 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
146 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
147 	sctp_clog[sctp_cwnd_log_at].x.map.base = map;
148 	sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
149 	sctp_clog[sctp_cwnd_log_at].x.map.high = high;
150 	sctp_cwnd_log_at++;
151 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
152 		sctp_cwnd_log_at = 0;
153 		sctp_cwnd_log_rolled = 1;
154 	}
155 }
156 
157 void
158 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
159     int from)
160 {
161 
162 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
163 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
164 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
165 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
166 	sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
167 	sctp_cwnd_log_at++;
168 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
169 		sctp_cwnd_log_at = 0;
170 		sctp_cwnd_log_rolled = 1;
171 	}
172 }
173 
174 void
175 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
176     int from)
177 {
178 
179 	if (chk == NULL) {
180 		printf("Gak log of NULL?\n");
181 		return;
182 	}
183 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
184 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
185 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
186 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
187 	if (poschk != NULL) {
188 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
189 		    poschk->rec.data.TSN_seq;
190 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
191 		    poschk->rec.data.stream_seq;
192 	} else {
193 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
194 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
195 	}
196 	sctp_cwnd_log_at++;
197 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
198 		sctp_cwnd_log_at = 0;
199 		sctp_cwnd_log_rolled = 1;
200 	}
201 }
202 
203 void
204 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
205 {
206 
207 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
208 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
209 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
210 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
211 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
212 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
213 	sctp_cwnd_log_at++;
214 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
215 		sctp_cwnd_log_at = 0;
216 		sctp_cwnd_log_rolled = 1;
217 	}
218 }
219 
220 void
221 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
222 {
223 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
224 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
225 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
226 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
227 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
228 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
229 	sctp_cwnd_log_at++;
230 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
231 		sctp_cwnd_log_at = 0;
232 		sctp_cwnd_log_rolled = 1;
233 	}
234 }
235 
236 void
237 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
238 {
239 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
240 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
241 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
242 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
243 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
244 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
245 	sctp_cwnd_log_at++;
246 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
247 		sctp_cwnd_log_at = 0;
248 		sctp_cwnd_log_rolled = 1;
249 	}
250 }
251 
252 void
253 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
254 {
255 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
256 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
257 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
258 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
259 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
260 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
261 	sctp_cwnd_log_at++;
262 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
263 		sctp_cwnd_log_at = 0;
264 		sctp_cwnd_log_rolled = 1;
265 	}
266 }
267 
268 void
269 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
270 {
271 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
272 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
273 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
274 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change  = book;
275 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
276 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
277 	sctp_cwnd_log_at++;
278 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
279 		sctp_cwnd_log_at = 0;
280 		sctp_cwnd_log_rolled = 1;
281 	}
282 }
283 
284 void
285 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
286 {
287 
288 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
289 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
290 	sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.sb_mbmax/1024);
291 	sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
292 	sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.sb_hiwat/1024);
293 	sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
294 	sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
295 	sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
296 	sctp_cwnd_log_at++;
297 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
298 		sctp_cwnd_log_at = 0;
299 		sctp_cwnd_log_rolled = 1;
300 	}
301 }
302 
303 int
304 sctp_fill_stat_log(struct mbuf *m)
305 {
306 	struct sctp_cwnd_log_req *req;
307 	int size_limit, num, i, at, cnt_out=0;
308 
309 	if (m == NULL)
310 		return (EINVAL);
311 
312 	size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
313 	if (size_limit < sizeof(struct sctp_cwnd_log)) {
314 		return (EINVAL);
315 	}
316 	req = mtod(m, struct sctp_cwnd_log_req *);
317 	num = size_limit/sizeof(struct sctp_cwnd_log);
318 	if (sctp_cwnd_log_rolled) {
319 		req->num_in_log = SCTP_STAT_LOG_SIZE;
320 	} else {
321 		req->num_in_log = sctp_cwnd_log_at;
322 		/* if the log has not rolled, we don't
323 		 * let you have old data.
324 		 */
325  		if (req->end_at > sctp_cwnd_log_at) {
326 			req->end_at = sctp_cwnd_log_at;
327 		}
328 	}
329 	if ((num < SCTP_STAT_LOG_SIZE) &&
330 	    ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
331 		/* we can't return all of it */
332 		if (((req->start_at == 0) && (req->end_at == 0)) ||
333 		    (req->start_at >= SCTP_STAT_LOG_SIZE) ||
334 		    (req->end_at >= SCTP_STAT_LOG_SIZE)) {
335 			/* No user request or user is wacked. */
336 			req->num_ret = num;
337 			req->end_at = sctp_cwnd_log_at - 1;
338 			if ((sctp_cwnd_log_at - num) < 0) {
339 				int cc;
340 				cc = num - sctp_cwnd_log_at;
341 				req->start_at = SCTP_STAT_LOG_SIZE - cc;
342 			} else {
343 				req->start_at = sctp_cwnd_log_at - num;
344 			}
345 		} else {
346 			/* a user request */
347 			int cc;
348 			if (req->start_at > req->end_at) {
349 				cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
350 				    (req->end_at + 1);
351 			} else {
352 
353 				cc = req->end_at - req->start_at;
354 			}
355 			if (cc < num) {
356 				num = cc;
357 			}
358 			req->num_ret = num;
359 		}
360 	} else {
361 		/* We can return all  of it */
362 		req->start_at = 0;
363 		req->end_at = sctp_cwnd_log_at - 1;
364 		req->num_ret = sctp_cwnd_log_at;
365 	}
366 	for (i = 0, at = req->start_at; i < req->num_ret; i++) {
367 		req->log[i] = sctp_clog[at];
368 		cnt_out++;
369 		at++;
370 		if (at >= SCTP_STAT_LOG_SIZE)
371 			at = 0;
372 	}
373 	m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
374 	return (0);
375 }
376 
377 #endif
378 
379 #ifdef SCTP_AUDITING_ENABLED
380 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
381 static int sctp_audit_indx = 0;
382 
383 static
384 void sctp_print_audit_report(void)
385 {
386 	int i;
387 	int cnt;
388 	cnt = 0;
389 	for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
390 		if ((sctp_audit_data[i][0] == 0xe0) &&
391 		    (sctp_audit_data[i][1] == 0x01)) {
392 			cnt = 0;
393 			printf("\n");
394 		} else if (sctp_audit_data[i][0] == 0xf0) {
395 			cnt = 0;
396 			printf("\n");
397 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
398 		    (sctp_audit_data[i][1] == 0x01)) {
399 			printf("\n");
400 			cnt = 0;
401 		}
402 		printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
403 		    (uint32_t)sctp_audit_data[i][1]);
404 		cnt++;
405 		if ((cnt % 14) == 0)
406 			printf("\n");
407 	}
408 	for (i=0;i<sctp_audit_indx;i++) {
409 		if ((sctp_audit_data[i][0] == 0xe0) &&
410 		    (sctp_audit_data[i][1] == 0x01)) {
411 			cnt = 0;
412 			printf("\n");
413 		} else if (sctp_audit_data[i][0] == 0xf0) {
414 			cnt = 0;
415 			printf("\n");
416 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
417 			 (sctp_audit_data[i][1] == 0x01)) {
418 			printf("\n");
419 			cnt = 0;
420 		}
421 		printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
422 		    (uint32_t)sctp_audit_data[i][1]);
423 		cnt++;
424 		if ((cnt % 14) == 0)
425 			printf("\n");
426 	}
427 	printf("\n");
428 }
429 
430 void sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
431     struct sctp_nets *net)
432 {
433 	int resend_cnt, tot_out, rep, tot_book_cnt;
434 	struct sctp_nets *lnet;
435 	struct sctp_tmit_chunk *chk;
436 
437 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
438 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
439 	sctp_audit_indx++;
440 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
441 		sctp_audit_indx = 0;
442 	}
443 	if (inp == NULL) {
444 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
445 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
446 		sctp_audit_indx++;
447 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
448 			sctp_audit_indx = 0;
449 		}
450 		return;
451 	}
452 	if (stcb == NULL) {
453 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
454 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
455 		sctp_audit_indx++;
456 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
457 			sctp_audit_indx = 0;
458 		}
459 		return;
460 	}
461 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
462 	sctp_audit_data[sctp_audit_indx][1] =
463 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
464 	sctp_audit_indx++;
465 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
466 		sctp_audit_indx = 0;
467 	}
468 	rep = 0;
469 	tot_book_cnt = 0;
470 	resend_cnt = tot_out = 0;
471 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
472 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
473 			resend_cnt++;
474 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
475 			tot_out += chk->book_size;
476 			tot_book_cnt++;
477 		}
478 	}
479 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
480 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
481 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
482 		sctp_audit_indx++;
483 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
484 			sctp_audit_indx = 0;
485 		}
486 		printf("resend_cnt:%d asoc-tot:%d\n",
487 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
488 		rep = 1;
489 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
490 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
491 		sctp_audit_data[sctp_audit_indx][1] =
492 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
493 		sctp_audit_indx++;
494 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
495 			sctp_audit_indx = 0;
496 		}
497 	}
498 	if (tot_out != stcb->asoc.total_flight) {
499 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
500 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
501 		sctp_audit_indx++;
502 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
503 			sctp_audit_indx = 0;
504 		}
505 		rep = 1;
506 		printf("tot_flt:%d asoc_tot:%d\n", tot_out,
507 		    (int)stcb->asoc.total_flight);
508 		stcb->asoc.total_flight = tot_out;
509 	}
510 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
511 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
512 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
513 		sctp_audit_indx++;
514 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
515 			sctp_audit_indx = 0;
516 		}
517 		rep = 1;
518 		printf("tot_flt_book:%d\n", tot_book);
519 
520 		stcb->asoc.total_flight_count = tot_book_cnt;
521 	}
522 	tot_out = 0;
523 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
524 		tot_out += lnet->flight_size;
525 	}
526 	if (tot_out != stcb->asoc.total_flight) {
527 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
528 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
529 		sctp_audit_indx++;
530 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
531 			sctp_audit_indx = 0;
532 		}
533 		rep = 1;
534 		printf("real flight:%d net total was %d\n",
535 		    stcb->asoc.total_flight, tot_out);
536 		/* now corrective action */
537 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
538 			tot_out = 0;
539 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
540 				if ((chk->whoTo == lnet) &&
541 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
542 					tot_out += chk->book_size;
543 				}
544 			}
545 			if (lnet->flight_size != tot_out) {
546 				printf("net:%x flight was %d corrected to %d\n",
547 				    (uint32_t)lnet, lnet->flight_size, tot_out);
548 				lnet->flight_size = tot_out;
549 			}
550 
551 		}
552 	}
553 
554 	if (rep) {
555 		sctp_print_audit_report();
556 	}
557 }
558 
559 void
560 sctp_audit_log(u_int8_t ev, u_int8_t fd)
561 {
562 	sctp_audit_data[sctp_audit_indx][0] = ev;
563 	sctp_audit_data[sctp_audit_indx][1] = fd;
564 	sctp_audit_indx++;
565 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
566 		sctp_audit_indx = 0;
567 	}
568 }
569 
570 #endif
571 
572 /*
573  * a list of sizes based on typical mtu's, used only if next hop
574  * size not returned.
575  */
576 static int sctp_mtu_sizes[] = {
577 	68,
578 	296,
579 	508,
580 	512,
581 	544,
582 	576,
583 	1006,
584 	1492,
585 	1500,
586 	1536,
587 	2002,
588 	2048,
589 	4352,
590 	4464,
591 	8166,
592 	17914,
593 	32000,
594 	65535
595 };
596 
597 int
598 find_next_best_mtu(int totsz)
599 {
600 	int i, perfer;
601 	/*
602 	 * if we are in here we must find the next best fit based on the
603 	 * size of the dg that failed to be sent.
604 	 */
605 	perfer = 0;
606 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
607 		if (totsz < sctp_mtu_sizes[i]) {
608 			perfer = i - 1;
609 			if (perfer < 0)
610 				perfer = 0;
611 			break;
612 		}
613 	}
614 	return (sctp_mtu_sizes[perfer]);
615 }
616 
617 void
618 sctp_fill_random_store(struct sctp_pcb *m)
619 {
620 	/*
621 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
622 	 * and our counter. The result becomes our good random numbers and
623 	 * we then setup to give these out. Note that we do no lockig
624 	 * to protect this. This is ok, since if competing folks call
625 	 * this we will get more gobbled gook in the random store whic
626 	 * is what we want. There is a danger that two guys will use
627 	 * the same random numbers, but thats ok too since that
628 	 * is random as well :->
629 	 */
630 	m->store_at = 0;
631 	sctp_hash_digest((char *)m->random_numbers, sizeof(m->random_numbers),
632 			 (char *)&m->random_counter, sizeof(m->random_counter),
633 			 (char *)m->random_store);
634 	m->random_counter++;
635 }
636 
637 uint32_t
638 sctp_select_initial_TSN(struct sctp_pcb *m)
639 {
640 	/*
641 	 * A true implementation should use random selection process to
642 	 * get the initial stream sequence number, using RFC1750 as a
643 	 * good guideline
644 	 */
645 	u_long x, *xp;
646 	uint8_t *p;
647 
648 	if (m->initial_sequence_debug != 0) {
649 		u_int32_t ret;
650 		ret = m->initial_sequence_debug;
651 		m->initial_sequence_debug++;
652 		return (ret);
653 	}
654 	if ((m->store_at+sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
655 		/* Refill the random store */
656 		sctp_fill_random_store(m);
657 	}
658 	p = &m->random_store[(int)m->store_at];
659 	xp = (u_long *)p;
660 	x = *xp;
661 	m->store_at += sizeof(u_long);
662 	return (x);
663 }
664 
665 u_int32_t sctp_select_a_tag(struct sctp_inpcb *m)
666 {
667 	u_long x, not_done;
668 	struct timeval now;
669 
670 	SCTP_GETTIME_TIMEVAL(&now);
671 	not_done = 1;
672 	while (not_done) {
673 		x = sctp_select_initial_TSN(&m->sctp_ep);
674 		if (x == 0) {
675 			/* we never use 0 */
676 			continue;
677 		}
678 		if (sctp_is_vtag_good(m, x, &now)) {
679 			not_done = 0;
680 		}
681 	}
682 	return (x);
683 }
684 
685 
686 int
687 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
688 	       int for_a_init, uint32_t override_tag )
689 {
690 	/*
691 	 * Anything set to zero is taken care of by the allocation
692 	 * routine's bzero
693 	 */
694 
695 	/*
696 	 * Up front select what scoping to apply on addresses I tell my peer
697 	 * Not sure what to do with these right now, we will need to come up
698 	 * with a way to set them. We may need to pass them through from the
699 	 * caller in the sctp_aloc_assoc() function.
700 	 */
701 	int i;
702 	/* init all variables to a known value.*/
703 	asoc->state = SCTP_STATE_INUSE;
704 	asoc->max_burst = m->sctp_ep.max_burst;
705 	asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
706 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
707 
708 	if (override_tag) {
709 		asoc->my_vtag = override_tag;
710 	} else {
711 		asoc->my_vtag = sctp_select_a_tag(m);
712 	}
713 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
714 		sctp_select_initial_TSN(&m->sctp_ep);
715 	asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
716 	/* we are opptimisitic here */
717 	asoc->peer_supports_asconf = 1;
718 	asoc->peer_supports_asconf_setprim = 1;
719 	asoc->peer_supports_pktdrop = 1;
720 
721 	asoc->sent_queue_retran_cnt = 0;
722 	/* This will need to be adjusted */
723 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
724 	asoc->last_acked_seq = asoc->init_seq_number - 1;
725 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
726 	asoc->asconf_seq_in = asoc->last_acked_seq;
727 
728 	/* here we are different, we hold the next one we expect */
729 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
730 
731 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
732 	asoc->initial_rto = m->sctp_ep.initial_rto;
733 
734 	asoc->max_init_times = m->sctp_ep.max_init_times;
735 	asoc->max_send_times = m->sctp_ep.max_send_times;
736 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
737 
738 	/* ECN Nonce initialization */
739 	asoc->ecn_nonce_allowed = 0;
740 	asoc->receiver_nonce_sum = 1;
741 	asoc->nonce_sum_expect_base = 1;
742 	asoc->nonce_sum_check = 1;
743 	asoc->nonce_resync_tsn = 0;
744 	asoc->nonce_wait_for_ecne = 0;
745 	asoc->nonce_wait_tsn = 0;
746 
747 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
748 		struct in6pcb *inp6;
749 
750 
751 		/* Its a V6 socket */
752 		inp6 = (struct in6pcb *)m;
753 		asoc->ipv6_addr_legal = 1;
754 		/* Now look at the binding flag to see if V4 will be legal */
755 	if (
756 #if defined(__OpenBSD__)
757 		(0) /* we always do dual bind */
758 #elif defined (__NetBSD__)
759 		(inp6->in6p_flags & IN6P_IPV6_V6ONLY)
760 #else
761 		(inp6->inp_flags & IN6P_IPV6_V6ONLY)
762 #endif
763 	     == 0) {
764 			asoc->ipv4_addr_legal = 1;
765 		} else {
766 			/* V4 addresses are NOT legal on the association */
767 			asoc->ipv4_addr_legal = 0;
768 		}
769 	} else {
770 		/* Its a V4 socket, no - V6 */
771 		asoc->ipv4_addr_legal = 1;
772 		asoc->ipv6_addr_legal = 0;
773 	}
774 
775 
776 	asoc->my_rwnd = uimax(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
777 	asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
778 
779 	asoc->smallest_mtu = m->sctp_frag_point;
780 	asoc->minrto = m->sctp_ep.sctp_minrto;
781 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
782 
783 	LIST_INIT(&asoc->sctp_local_addr_list);
784 	TAILQ_INIT(&asoc->nets);
785 	TAILQ_INIT(&asoc->pending_reply_queue);
786 	asoc->last_asconf_ack_sent = NULL;
787 	/* Setup to fill the hb random cache at first HB */
788 	asoc->hb_random_idx = 4;
789 
790 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
791 
792 	/*
793 	 * Now the stream parameters, here we allocate space for all
794 	 * streams that we request by default.
795 	 */
796 	asoc->streamoutcnt = asoc->pre_open_streams =
797 	    m->sctp_ep.pre_open_stream_count;
798 	asoc->strmout = malloc(asoc->streamoutcnt *
799 	    sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
800 	if (asoc->strmout == NULL) {
801 		/* big trouble no memory */
802 		return (ENOMEM);
803 	}
804 	for (i = 0; i < asoc->streamoutcnt; i++) {
805 		/*
806 		 * inbound side must be set to 0xffff,
807 		 * also NOTE when we get the INIT-ACK back (for INIT sender)
808 		 * we MUST reduce the count (streamoutcnt) but first check
809 		 * if we sent to any of the upper streams that were dropped
810 		 * (if some were). Those that were dropped must be notified
811 		 * to the upper layer as failed to send.
812 		 */
813 		asoc->strmout[i].next_sequence_sent = 0x0;
814 		TAILQ_INIT(&asoc->strmout[i].outqueue);
815 		asoc->strmout[i].stream_no = i;
816 		asoc->strmout[i].next_spoke.tqe_next = 0;
817 		asoc->strmout[i].next_spoke.tqe_prev = 0;
818 	}
819 	/* Now the mapping array */
820 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
821 	asoc->mapping_array = malloc(asoc->mapping_array_size,
822 	       M_PCB, M_NOWAIT);
823 	if (asoc->mapping_array == NULL) {
824 		free(asoc->strmout, M_PCB);
825 		return (ENOMEM);
826 	}
827 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
828 	/* Now the init of the other outqueues */
829 	TAILQ_INIT(&asoc->out_wheel);
830 	TAILQ_INIT(&asoc->control_send_queue);
831 	TAILQ_INIT(&asoc->send_queue);
832 	TAILQ_INIT(&asoc->sent_queue);
833 	TAILQ_INIT(&asoc->reasmqueue);
834 	TAILQ_INIT(&asoc->delivery_queue);
835 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
836 
837 	TAILQ_INIT(&asoc->asconf_queue);
838 	return (0);
839 }
840 
841 int
842 sctp_expand_mapping_array(struct sctp_association *asoc)
843 {
844 	/* mapping array needs to grow */
845 	u_int8_t *new_array;
846 	uint16_t new_size, old_size;
847 
848 	old_size = asoc->mapping_array_size;
849 	new_size = old_size + SCTP_MAPPING_ARRAY_INCR;
850 	new_array = malloc(new_size, M_PCB, M_NOWAIT);
851 	if (new_array == NULL) {
852 		/* can't get more, forget it */
853 		printf("No memory for expansion of SCTP mapping array %d\n",
854 		       new_size);
855 		return (-1);
856 	}
857 	memcpy(new_array, asoc->mapping_array, old_size);
858 	memset(new_array + old_size, 0, SCTP_MAPPING_ARRAY_INCR);
859 	free(asoc->mapping_array, M_PCB);
860 	asoc->mapping_array = new_array;
861 	asoc->mapping_array_size = new_size;
862 	return (0);
863 }
864 
865 static void
866 sctp_timeout_handler(void *t)
867 {
868 	struct sctp_inpcb *inp;
869 	struct sctp_tcb *stcb;
870 	struct sctp_nets *net;
871 	struct sctp_timer *tmr;
872 	int did_output;
873 
874 	mutex_enter(softnet_lock);
875 	tmr = (struct sctp_timer *)t;
876 	inp = (struct sctp_inpcb *)tmr->ep;
877 	stcb = (struct sctp_tcb *)tmr->tcb;
878 	net = (struct sctp_nets *)tmr->net;
879 	did_output = 1;
880 
881 #ifdef SCTP_AUDITING_ENABLED
882 	sctp_audit_log(0xF0, (u_int8_t)tmr->type);
883 	sctp_auditing(3, inp, stcb, net);
884 #endif
885 	sctp_pegs[SCTP_TIMERS_EXP]++;
886 
887 	if (inp == NULL) {
888 		return;
889 	}
890 
891 	SCTP_INP_WLOCK(inp);
892 	if (inp->sctp_socket == 0) {
893 		mutex_exit(softnet_lock);
894 		SCTP_INP_WUNLOCK(inp);
895 		return;
896 	}
897 	if (stcb) {
898 		if (stcb->asoc.state == 0) {
899 			mutex_exit(softnet_lock);
900 			SCTP_INP_WUNLOCK(inp);
901 			return;
902 		}
903 	}
904 #ifdef SCTP_DEBUG
905 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
906 		printf("Timer type %d goes off\n", tmr->type);
907 	}
908 #endif /* SCTP_DEBUG */
909 #ifndef __NetBSD__
910 	if (!callout_active(&tmr->timer)) {
911 		SCTP_INP_WUNLOCK(inp);
912 		return;
913 	}
914 #endif
915 	if (stcb) {
916 		SCTP_TCB_LOCK(stcb);
917 	}
918 	SCTP_INP_INCR_REF(inp);
919 	SCTP_INP_WUNLOCK(inp);
920 
921 	switch (tmr->type) {
922 	case SCTP_TIMER_TYPE_ITERATOR:
923 	{
924 		struct sctp_iterator *it;
925 		it = (struct sctp_iterator *)inp;
926 		sctp_iterator_timer(it);
927 	}
928 	break;
929 	/* call the handler for the appropriate timer type */
930 	case SCTP_TIMER_TYPE_SEND:
931 		sctp_pegs[SCTP_TMIT_TIMER]++;
932 		stcb->asoc.num_send_timers_up--;
933 		if (stcb->asoc.num_send_timers_up < 0) {
934 			stcb->asoc.num_send_timers_up = 0;
935 		}
936 		if (sctp_t3rxt_timer(inp, stcb, net)) {
937 			/* no need to unlock on tcb its gone */
938 
939 			goto out_decr;
940 		}
941 #ifdef SCTP_AUDITING_ENABLED
942 		sctp_auditing(4, inp, stcb, net);
943 #endif
944 		sctp_chunk_output(inp, stcb, 1);
945 		if ((stcb->asoc.num_send_timers_up == 0) &&
946 		    (stcb->asoc.sent_queue_cnt > 0)
947 			) {
948 			struct sctp_tmit_chunk *chk;
949 			/*
950 			 * safeguard. If there on some on the sent queue
951 			 * somewhere but no timers running something is
952 			 * wrong... so we start a timer on the first chunk
953 			 * on the send queue on whatever net it is sent to.
954 			 */
955 			sctp_pegs[SCTP_T3_SAFEGRD]++;
956 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
957 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
958 					 chk->whoTo);
959 		}
960 		break;
961 	case SCTP_TIMER_TYPE_INIT:
962 		if (sctp_t1init_timer(inp, stcb, net)) {
963 			/* no need to unlock on tcb its gone */
964 			goto out_decr;
965 		}
966 		/* We do output but not here */
967 		did_output = 0;
968 		break;
969 	case SCTP_TIMER_TYPE_RECV:
970 		sctp_pegs[SCTP_RECV_TIMER]++;
971 		sctp_send_sack(stcb);
972 #ifdef SCTP_AUDITING_ENABLED
973 		sctp_auditing(4, inp, stcb, net);
974 #endif
975 		sctp_chunk_output(inp, stcb, 4);
976 		break;
977 	case SCTP_TIMER_TYPE_SHUTDOWN:
978 		if (sctp_shutdown_timer(inp, stcb, net) ) {
979 			/* no need to unlock on tcb its gone */
980 			goto out_decr;
981 		}
982 #ifdef SCTP_AUDITING_ENABLED
983 		sctp_auditing(4, inp, stcb, net);
984 #endif
985 		sctp_chunk_output(inp, stcb, 5);
986 		break;
987 	case SCTP_TIMER_TYPE_HEARTBEAT:
988 		if (sctp_heartbeat_timer(inp, stcb, net)) {
989 			/* no need to unlock on tcb its gone */
990 			goto out_decr;
991 		}
992 #ifdef SCTP_AUDITING_ENABLED
993 		sctp_auditing(4, inp, stcb, net);
994 #endif
995 		sctp_chunk_output(inp, stcb, 6);
996 		break;
997 	case SCTP_TIMER_TYPE_COOKIE:
998 		if (sctp_cookie_timer(inp, stcb, net)) {
999 			/* no need to unlock on tcb its gone */
1000 			goto out_decr;
1001 		}
1002 #ifdef SCTP_AUDITING_ENABLED
1003 		sctp_auditing(4, inp, stcb, net);
1004 #endif
1005 		sctp_chunk_output(inp, stcb, 1);
1006 		break;
1007 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1008 	{
1009 		struct timeval tv;
1010 		int i, secret;
1011 		SCTP_GETTIME_TIMEVAL(&tv);
1012 		SCTP_INP_WLOCK(inp);
1013 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1014 		inp->sctp_ep.last_secret_number =
1015 			inp->sctp_ep.current_secret_number;
1016 		inp->sctp_ep.current_secret_number++;
1017 		if (inp->sctp_ep.current_secret_number >=
1018 		    SCTP_HOW_MANY_SECRETS) {
1019 			inp->sctp_ep.current_secret_number = 0;
1020 		}
1021 		secret = (int)inp->sctp_ep.current_secret_number;
1022 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1023 			inp->sctp_ep.secret_key[secret][i] =
1024 				sctp_select_initial_TSN(&inp->sctp_ep);
1025 		}
1026 		SCTP_INP_WUNLOCK(inp);
1027 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1028 	}
1029 	did_output = 0;
1030 	break;
1031 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1032 		sctp_pathmtu_timer(inp, stcb, net);
1033 		did_output = 0;
1034 		break;
1035 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1036 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1037 			/* no need to unlock on tcb its gone */
1038 			goto out_decr;
1039 		}
1040 #ifdef SCTP_AUDITING_ENABLED
1041 		sctp_auditing(4, inp, stcb, net);
1042 #endif
1043 		sctp_chunk_output(inp, stcb, 7);
1044 		break;
1045 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1046 		sctp_abort_an_association(inp, stcb,
1047 					  SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1048 		/* no need to unlock on tcb its gone */
1049 		goto out_decr;
1050 		break;
1051 
1052 	case SCTP_TIMER_TYPE_STRRESET:
1053 		if (sctp_strreset_timer(inp, stcb, net)) {
1054 			/* no need to unlock on tcb its gone */
1055 			goto out_decr;
1056 		}
1057 		sctp_chunk_output(inp, stcb, 9);
1058 		break;
1059 
1060 	case SCTP_TIMER_TYPE_ASCONF:
1061 		if (sctp_asconf_timer(inp, stcb, net)) {
1062 			/* no need to unlock on tcb its gone */
1063 			goto out_decr;
1064 		}
1065 #ifdef SCTP_AUDITING_ENABLED
1066 		sctp_auditing(4, inp, stcb, net);
1067 #endif
1068 		sctp_chunk_output(inp, stcb, 8);
1069 		break;
1070 
1071 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1072 		sctp_autoclose_timer(inp, stcb, net);
1073 		sctp_chunk_output(inp, stcb, 10);
1074 		did_output = 0;
1075 		break;
1076 	case SCTP_TIMER_TYPE_INPKILL:
1077 		/* special case, take away our
1078 		 * increment since WE are the killer
1079 		 */
1080 		SCTP_INP_WLOCK(inp);
1081 		SCTP_INP_DECR_REF(inp);
1082 		SCTP_INP_WUNLOCK(inp);
1083 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1084 		sctp_inpcb_free(inp, 1);
1085 		goto out_no_decr;
1086 		break;
1087 	default:
1088 #ifdef SCTP_DEBUG
1089 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1090 			printf("sctp_timeout_handler:unknown timer %d\n",
1091 			       tmr->type);
1092 		}
1093 #endif /* SCTP_DEBUG */
1094 		break;
1095 	};
1096 #ifdef SCTP_AUDITING_ENABLED
1097 	sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1098 	sctp_auditing(5, inp, stcb, net);
1099 #endif
1100 	if (did_output) {
1101 		/*
1102 		 * Now we need to clean up the control chunk chain if an
1103 		 * ECNE is on it. It must be marked as UNSENT again so next
1104 		 * call will continue to send it until such time that we get
1105 		 * a CWR, to remove it. It is, however, less likely that we
1106 		 * will find a ecn echo on the chain though.
1107 		 */
1108 		sctp_fix_ecn_echo(&stcb->asoc);
1109 	}
1110 	if (stcb) {
1111 		SCTP_TCB_UNLOCK(stcb);
1112 	}
1113  out_decr:
1114 	SCTP_INP_WLOCK(inp);
1115 	SCTP_INP_DECR_REF(inp);
1116 	SCTP_INP_WUNLOCK(inp);
1117 
1118  out_no_decr:
1119 
1120 	mutex_exit(softnet_lock);
1121 }
1122 
1123 int
1124 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1125     struct sctp_nets *net)
1126 {
1127 	int to_ticks;
1128 	struct sctp_timer *tmr;
1129 
1130 	if (inp == NULL)
1131 		return (EFAULT);
1132 
1133 	to_ticks = 0;
1134 
1135 	tmr = NULL;
1136 	switch (t_type) {
1137 	case SCTP_TIMER_TYPE_ITERATOR:
1138 	{
1139 		struct sctp_iterator *it;
1140 		it = (struct sctp_iterator *)inp;
1141 		tmr = &it->tmr;
1142 		to_ticks = SCTP_ITERATOR_TICKS;
1143 	}
1144 	break;
1145 	case SCTP_TIMER_TYPE_SEND:
1146 		/* Here we use the RTO timer */
1147 	{
1148 		int rto_val;
1149 		if ((stcb == NULL) || (net == NULL)) {
1150 			return (EFAULT);
1151 		}
1152 		tmr = &net->rxt_timer;
1153 		if (net->RTO == 0) {
1154 			rto_val = stcb->asoc.initial_rto;
1155 		} else {
1156 			rto_val = net->RTO;
1157 		}
1158 		to_ticks = MSEC_TO_TICKS(rto_val);
1159 	}
1160 	break;
1161 	case SCTP_TIMER_TYPE_INIT:
1162 		/*
1163 		 * Here we use the INIT timer default
1164 		 * usually about 1 minute.
1165 		 */
1166 		if ((stcb == NULL) || (net == NULL)) {
1167 			return (EFAULT);
1168 		}
1169 		tmr = &net->rxt_timer;
1170 		if (net->RTO == 0) {
1171 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1172 		} else {
1173 			to_ticks = MSEC_TO_TICKS(net->RTO);
1174 		}
1175 		break;
1176 	case SCTP_TIMER_TYPE_RECV:
1177 		/*
1178 		 * Here we use the Delayed-Ack timer value from the inp
1179 		 * ususually about 200ms.
1180 		 */
1181 		if (stcb == NULL) {
1182 			return (EFAULT);
1183 		}
1184 		tmr = &stcb->asoc.dack_timer;
1185 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1186 		break;
1187 	case SCTP_TIMER_TYPE_SHUTDOWN:
1188 		/* Here we use the RTO of the destination. */
1189 		if ((stcb == NULL) || (net == NULL)) {
1190 			return (EFAULT);
1191 		}
1192 
1193 		if (net->RTO == 0) {
1194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1195 		} else {
1196 			to_ticks = MSEC_TO_TICKS(net->RTO);
1197 		}
1198 		tmr = &net->rxt_timer;
1199 		break;
1200 	case SCTP_TIMER_TYPE_HEARTBEAT:
1201 		/*
1202 		 * the net is used here so that we can add in the RTO.
1203 		 * Even though we use a different timer. We also add the
1204 		 * HB timer PLUS a random jitter.
1205 		 */
1206 		if (stcb == NULL) {
1207 			return (EFAULT);
1208 		}
1209 		{
1210 			uint32_t rndval;
1211 			uint8_t this_random;
1212 			int cnt_of_unconf=0;
1213 			struct sctp_nets *lnet;
1214 
1215 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1216 				if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1217 					cnt_of_unconf++;
1218 				}
1219 			}
1220 #ifdef SCTP_DEBUG
1221 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1222 				printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1223 				       cnt_of_unconf, stcb->asoc.heart_beat_delay);
1224 			}
1225 #endif
1226 			if (stcb->asoc.hb_random_idx > 3) {
1227 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1228 				memcpy(stcb->asoc.hb_random_values, &rndval,
1229 				       sizeof(stcb->asoc.hb_random_values));
1230 				this_random = stcb->asoc.hb_random_values[0];
1231 				stcb->asoc.hb_random_idx = 0;
1232 				stcb->asoc.hb_ect_randombit = 0;
1233 			} else {
1234 				this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1235 				stcb->asoc.hb_random_idx++;
1236 				stcb->asoc.hb_ect_randombit = 0;
1237 			}
1238 			/*
1239 			 * this_random will be 0 - 256 ms
1240 			 * RTO is in ms.
1241 			 */
1242 			if ((stcb->asoc.heart_beat_delay == 0) &&
1243 			    (cnt_of_unconf == 0)) {
1244 				/* no HB on this inp after confirmations */
1245 				return (0);
1246 			}
1247 			if (net) {
1248 				int delay;
1249 				delay = stcb->asoc.heart_beat_delay;
1250 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1251 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1252 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1253 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1254 					    delay = 0;
1255 					}
1256 				}
1257 				if (net->RTO == 0) {
1258 					/* Never been checked */
1259 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
1260 				} else {
1261 					/* set rto_val to the ms */
1262 					to_ticks = delay + net->RTO + this_random;
1263 				}
1264 			} else {
1265 				if (cnt_of_unconf) {
1266 					to_ticks = this_random + stcb->asoc.initial_rto;
1267 				} else {
1268 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1269 				}
1270 			}
1271 			/*
1272 			 * Now we must convert the to_ticks that are now in
1273 			 * ms to ticks.
1274 			 */
1275 			to_ticks *= hz;
1276 			to_ticks /= 1000;
1277 #ifdef SCTP_DEBUG
1278 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1279 				printf("Timer to expire in %d ticks\n", to_ticks);
1280 			}
1281 #endif
1282 			tmr = &stcb->asoc.hb_timer;
1283 		}
1284 		break;
1285 	case SCTP_TIMER_TYPE_COOKIE:
1286 		/*
1287 		 * Here we can use the RTO timer from the network since
1288 		 * one RTT was compelete. If a retran happened then we will
1289 		 * be using the RTO initial value.
1290 		 */
1291 		if ((stcb == NULL) || (net == NULL)) {
1292 			return (EFAULT);
1293 		}
1294 		if (net->RTO == 0) {
1295 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1296 		} else {
1297 			to_ticks = MSEC_TO_TICKS(net->RTO);
1298 		}
1299 		tmr = &net->rxt_timer;
1300 		break;
1301 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1302 		/*
1303 		 * nothing needed but the endpoint here
1304 		 * ususually about 60 minutes.
1305 		 */
1306 		tmr = &inp->sctp_ep.signature_change;
1307 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1308 		break;
1309 	case SCTP_TIMER_TYPE_INPKILL:
1310 		/*
1311 		 * The inp is setup to die. We re-use the
1312 		 * signature_chage timer since that has
1313 		 * stopped and we are in the GONE state.
1314 		 */
1315 		tmr = &inp->sctp_ep.signature_change;
1316 		to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1317 		break;
1318 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1319 		/*
1320 		 * Here we use the value found in the EP for PMTU
1321 		 * ususually about 10 minutes.
1322 		 */
1323 		if (stcb == NULL) {
1324 			return (EFAULT);
1325 		}
1326 		if (net == NULL) {
1327 			return (EFAULT);
1328 		}
1329 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1330 		tmr = &net->pmtu_timer;
1331 		break;
1332 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1333 		/* Here we use the RTO of the destination */
1334 		if ((stcb == NULL) || (net == NULL)) {
1335 			return (EFAULT);
1336 		}
1337 		if (net->RTO == 0) {
1338 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1339 		} else {
1340 			to_ticks = MSEC_TO_TICKS(net->RTO);
1341 		}
1342 		tmr = &net->rxt_timer;
1343 		break;
1344 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1345 		/*
1346 		 * Here we use the endpoints shutdown guard timer
1347 		 * usually about 3 minutes.
1348 		 */
1349 		if (stcb == NULL) {
1350 			return (EFAULT);
1351 		}
1352 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1353 		tmr = &stcb->asoc.shut_guard_timer;
1354 		break;
1355 	case SCTP_TIMER_TYPE_STRRESET:
1356 		/*
1357 		 * Here the timer comes from the inp
1358 		 * but its value is from the RTO.
1359 		 */
1360 		if ((stcb == NULL) || (net == NULL)) {
1361 			return (EFAULT);
1362 		}
1363 		if (net->RTO == 0) {
1364 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1365 		} else {
1366 			to_ticks = MSEC_TO_TICKS(net->RTO);
1367 		}
1368 		tmr = &stcb->asoc.strreset_timer;
1369 		break;
1370 
1371 	case SCTP_TIMER_TYPE_ASCONF:
1372 		/*
1373 		 * Here the timer comes from the inp
1374 		 * but its value is from the RTO.
1375 		 */
1376 		if ((stcb == NULL) || (net == NULL)) {
1377 			return (EFAULT);
1378 		}
1379 		if (net->RTO == 0) {
1380 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1381 		} else {
1382 			to_ticks = MSEC_TO_TICKS(net->RTO);
1383 		}
1384 		tmr = &stcb->asoc.asconf_timer;
1385 		break;
1386 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1387 		if (stcb == NULL) {
1388 			return (EFAULT);
1389 		}
1390 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
1391 			/* Really an error since stcb is NOT set to autoclose */
1392 			return (0);
1393 		}
1394 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
1395 		tmr = &stcb->asoc.autoclose_timer;
1396 		break;
1397 	default:
1398 #ifdef SCTP_DEBUG
1399 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1400 			printf("sctp_timer_start:Unknown timer type %d\n",
1401 			       t_type);
1402 		}
1403 #endif /* SCTP_DEBUG */
1404 		return (EFAULT);
1405 		break;
1406 	};
1407 	if ((to_ticks <= 0) || (tmr == NULL)) {
1408 #ifdef SCTP_DEBUG
1409 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1410 			printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1411 			       t_type, to_ticks, tmr);
1412 		}
1413 #endif /* SCTP_DEBUG */
1414 		return (EFAULT);
1415 	}
1416 	if (callout_pending(&tmr->timer)) {
1417 		/*
1418 		 * we do NOT allow you to have it already running.
1419 		 * if it is we leave the current one up unchanged
1420 		 */
1421 		return (EALREADY);
1422 	}
1423 	/* At this point we can proceed */
1424 	if (t_type == SCTP_TIMER_TYPE_SEND) {
1425 		stcb->asoc.num_send_timers_up++;
1426 	}
1427 	tmr->type = t_type;
1428 	tmr->ep = (void *)inp;
1429 	tmr->tcb = (void *)stcb;
1430 	tmr->net = (void *)net;
1431 	callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1432 	return (0);
1433 }
1434 
1435 int
1436 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1437 		struct sctp_nets *net)
1438 {
1439 	struct sctp_timer *tmr;
1440 
1441 	if (inp == NULL)
1442 		return (EFAULT);
1443 
1444 	tmr = NULL;
1445 	switch (t_type) {
1446 	case SCTP_TIMER_TYPE_ITERATOR:
1447 	{
1448 		struct sctp_iterator *it;
1449 		it = (struct sctp_iterator *)inp;
1450 		tmr = &it->tmr;
1451 	}
1452 	break;
1453 	case SCTP_TIMER_TYPE_SEND:
1454 		if ((stcb == NULL) || (net == NULL)) {
1455 			return (EFAULT);
1456 		}
1457 		tmr = &net->rxt_timer;
1458 		break;
1459 	case SCTP_TIMER_TYPE_INIT:
1460 		if ((stcb == NULL) || (net == NULL)) {
1461 			return (EFAULT);
1462 		}
1463 		tmr = &net->rxt_timer;
1464 		break;
1465 	case SCTP_TIMER_TYPE_RECV:
1466 		if (stcb == NULL) {
1467 			return (EFAULT);
1468 		}
1469 		tmr = &stcb->asoc.dack_timer;
1470 		break;
1471 	case SCTP_TIMER_TYPE_SHUTDOWN:
1472 		if ((stcb == NULL) || (net == NULL)) {
1473 			return (EFAULT);
1474 		}
1475 		tmr = &net->rxt_timer;
1476 		break;
1477 	case SCTP_TIMER_TYPE_HEARTBEAT:
1478 		if (stcb == NULL) {
1479 			return (EFAULT);
1480 		}
1481 		tmr = &stcb->asoc.hb_timer;
1482 		break;
1483 	case SCTP_TIMER_TYPE_COOKIE:
1484 		if ((stcb == NULL) || (net == NULL)) {
1485 			return (EFAULT);
1486 		}
1487 		tmr = &net->rxt_timer;
1488 		break;
1489 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1490 		/* nothing needed but the endpoint here */
1491 		tmr = &inp->sctp_ep.signature_change;
1492 		/* We re-use the newcookie timer for
1493 		 * the INP kill timer. We must assure
1494 		 * that we do not kill it by accident.
1495 		 */
1496 		break;
1497 	case SCTP_TIMER_TYPE_INPKILL:
1498 		/*
1499 		 * The inp is setup to die. We re-use the
1500 		 * signature_chage timer since that has
1501 		 * stopped and we are in the GONE state.
1502 		 */
1503 		tmr = &inp->sctp_ep.signature_change;
1504 		break;
1505 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1506 		if (stcb == NULL) {
1507 			return (EFAULT);
1508 		}
1509 		if (net == NULL) {
1510 			return (EFAULT);
1511 		}
1512 		tmr = &net->pmtu_timer;
1513 		break;
1514 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1515 		if ((stcb == NULL) || (net == NULL)) {
1516 			return (EFAULT);
1517 		}
1518 		tmr = &net->rxt_timer;
1519 		break;
1520 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1521 		if (stcb == NULL) {
1522 			return (EFAULT);
1523 		}
1524 		tmr = &stcb->asoc.shut_guard_timer;
1525 		break;
1526 	case SCTP_TIMER_TYPE_STRRESET:
1527 		if (stcb == NULL) {
1528 			return (EFAULT);
1529 		}
1530 		tmr = &stcb->asoc.strreset_timer;
1531 		break;
1532 	case SCTP_TIMER_TYPE_ASCONF:
1533 		if (stcb == NULL) {
1534 			return (EFAULT);
1535 		}
1536 		tmr = &stcb->asoc.asconf_timer;
1537 		break;
1538 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1539 		if (stcb == NULL) {
1540 			return (EFAULT);
1541 		}
1542 		tmr = &stcb->asoc.autoclose_timer;
1543 		break;
1544 	default:
1545 #ifdef SCTP_DEBUG
1546 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1547 			printf("sctp_timer_stop:Unknown timer type %d\n",
1548 			       t_type);
1549 		}
1550 #endif /* SCTP_DEBUG */
1551 		break;
1552 	};
1553 	if (tmr == NULL)
1554 		return (EFAULT);
1555 
1556 	if ((tmr->type != t_type) && tmr->type) {
1557 		/*
1558 		 * Ok we have a timer that is under joint use. Cookie timer
1559 		 * per chance with the SEND timer. We therefore are NOT
1560 		 * running the timer that the caller wants stopped.  So just
1561 		 * return.
1562 		 */
1563 		return (0);
1564 	}
1565 	if (t_type == SCTP_TIMER_TYPE_SEND) {
1566 		stcb->asoc.num_send_timers_up--;
1567 		if (stcb->asoc.num_send_timers_up < 0) {
1568 			stcb->asoc.num_send_timers_up = 0;
1569 		}
1570 	}
1571 	callout_stop(&tmr->timer);
1572 	return (0);
1573 }
1574 
1575 u_int32_t
1576 sctp_calculate_len(struct mbuf *m)
1577 {
1578 	u_int32_t tlen=0;
1579 	struct mbuf *at;
1580 	at = m;
1581 	while (at) {
1582 		tlen += at->m_len;
1583 		at = at->m_next;
1584 	}
1585 	return (tlen);
1586 }
1587 
1588 uint32_t
1589 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1590 {
1591 	/*
1592 	 * given a mbuf chain with a packetheader offset by 'offset'
1593 	 * pointing at a sctphdr (with csum set to 0) go through
1594 	 * the chain of m_next's and calculate the SCTP checksum.
1595 	 * This is CRC32c.
1596 	 * Also has a side bonus calculate the total length
1597 	 * of the mbuf chain.
1598 	 * Note: if offset is greater than the total mbuf length,
1599 	 * checksum=1, pktlen=0 is returned (ie. no real error code)
1600 	 */
1601 	int32_t tlen=0;
1602 	uint32_t base = 0xffffffff;
1603 	struct mbuf *at;
1604 	at = m;
1605 	/* find the correct mbuf and offset into mbuf */
1606 	while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1607 		offset -= at->m_len;	/* update remaining offset left */
1608 		at = at->m_next;
1609 	}
1610 
1611 	while (at != NULL) {
1612 		base = update_crc32(base, at->m_data + offset,
1613 		    at->m_len - offset);
1614 		tlen += at->m_len - offset;
1615 		/* we only offset once into the first mbuf */
1616 		if (offset) {
1617 			offset = 0;
1618 		}
1619 		at = at->m_next;
1620 	}
1621 	if (pktlen != NULL) {
1622 		*pktlen = tlen;
1623 	}
1624 	/* CRC-32c */
1625 	base = sctp_csum_finalize(base);
1626 	return (base);
1627 }
1628 
1629 void
1630 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1631 		    struct sctp_association *asoc, u_long mtu)
1632 {
1633 	/*
1634 	 * Reset the P-MTU size on this association, this involves changing
1635 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1636 	 * to allow the DF flag to be cleared.
1637 	 */
1638 	struct sctp_tmit_chunk *chk;
1639 	struct sctp_stream_out *strm;
1640 	unsigned int eff_mtu, ovh;
1641 	asoc->smallest_mtu = mtu;
1642 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1643 		ovh = SCTP_MIN_OVERHEAD;
1644 	} else {
1645 		ovh = SCTP_MIN_V4_OVERHEAD;
1646 	}
1647 	eff_mtu = mtu - ovh;
1648 	/* Now mark any chunks that need to let IP fragment */
1649 	TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1650 		TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1651 			if (chk->send_size > eff_mtu) {
1652 				chk->flags &= SCTP_DONT_FRAGMENT;
1653 				chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1654 			}
1655 		}
1656 	}
1657 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1658 		if (chk->send_size > eff_mtu) {
1659 			chk->flags &= SCTP_DONT_FRAGMENT;
1660 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1661 		}
1662 	}
1663 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1664 		if (chk->send_size > eff_mtu) {
1665 			chk->flags &= SCTP_DONT_FRAGMENT;
1666 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1667 		}
1668 	}
1669 }
1670 
1671 
1672 /*
1673  * given an association and starting time of the current RTT period
1674  * return RTO in number of usecs
1675  * net should point to the current network
1676  */
1677 u_int32_t
1678 sctp_calculate_rto(struct sctp_tcb *stcb,
1679 		   struct sctp_association *asoc,
1680 		   struct sctp_nets *net,
1681 		   struct timeval *old)
1682 {
1683 	/*
1684 	 * given an association and the starting time of the current RTT
1685 	 * period (in value1/value2) return RTO in number of usecs.
1686 	 */
1687 	int calc_time = 0;
1688 	unsigned int new_rto = 0;
1689 	int first_measure = 0;
1690 	struct timeval now;
1691 
1692 	/************************/
1693 	/* 1. calculate new RTT */
1694 	/************************/
1695 	/* get the current time */
1696 	SCTP_GETTIME_TIMEVAL(&now);
1697 	/* compute the RTT value */
1698 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1699 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1700 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1701 			calc_time += (((u_long)now.tv_usec -
1702 				       (u_long)old->tv_usec)/1000);
1703 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1704 			/* Borrow 1,000ms from current calculation */
1705 			calc_time -= 1000;
1706 			/* Add in the slop over */
1707 			calc_time += ((int)now.tv_usec/1000);
1708 			/* Add in the pre-second ms's */
1709 			calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1710 		}
1711 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1712 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1713 			calc_time = ((u_long)now.tv_usec -
1714 				     (u_long)old->tv_usec)/1000;
1715 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1716 			/* impossible .. garbage in nothing out */
1717 			return (((net->lastsa >> 2) + net->lastsv) >> 1);
1718 		} else {
1719 			/* impossible .. garbage in nothing out */
1720 			return (((net->lastsa >> 2) + net->lastsv) >> 1);
1721 		}
1722 	} else {
1723 		/* Clock wrapped? */
1724 		return (((net->lastsa >> 2) + net->lastsv) >> 1);
1725 	}
1726 	/***************************/
1727 	/* 2. update RTTVAR & SRTT */
1728 	/***************************/
1729 #if 0
1730 	/*	if (net->lastsv || net->lastsa) {*/
1731 	/* per Section 5.3.1 C3 in SCTP */
1732 	/*		net->lastsv = (int) 	*//* RTTVAR */
1733 	/*			(((double)(1.0 - 0.25) * (double)net->lastsv) +
1734 				(double)(0.25 * (double)abs(net->lastsa - calc_time)));
1735 				net->lastsa = (int) */	/* SRTT */
1736 	/*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1737 	  (double)(0.125 * (double)calc_time));
1738 	  } else {
1739 	*//* the first RTT calculation, per C2 Section 5.3.1 */
1740 	/*		net->lastsa = calc_time;	*//* SRTT */
1741 	/*		net->lastsv = calc_time / 2;	*//* RTTVAR */
1742 	/*	}*/
1743 	/* if RTTVAR goes to 0 you set to clock grainularity */
1744 	/*	if (net->lastsv == 0) {
1745 		net->lastsv = SCTP_CLOCK_GRANULARITY;
1746 		}
1747 		new_rto = net->lastsa + 4 * net->lastsv;
1748 	*/
1749 #endif
1750 	/* this is Van Jacobson's integer version */
1751 	if (net->RTO) {
1752 		calc_time -= (net->lastsa >> 3);
1753 		net->lastsa += calc_time;
1754 		if (calc_time < 0) {
1755 			calc_time = -calc_time;
1756 		}
1757 		calc_time -= (net->lastsv >> 2);
1758 		net->lastsv += calc_time;
1759 		if (net->lastsv == 0) {
1760 			net->lastsv = SCTP_CLOCK_GRANULARITY;
1761 		}
1762 	} else {
1763 		/* First RTO measurment */
1764 		net->lastsa = calc_time;
1765 		net->lastsv = calc_time >> 1;
1766 		first_measure = 1;
1767 	}
1768 	new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1769 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1770 	    (stcb->asoc.sat_network_lockout == 0)) {
1771 		stcb->asoc.sat_network = 1;
1772 	} else 	if ((!first_measure) && stcb->asoc.sat_network) {
1773 		stcb->asoc.sat_network = 0;
1774 		stcb->asoc.sat_network_lockout = 1;
1775 	}
1776 	/* bound it, per C6/C7 in Section 5.3.1 */
1777 	if (new_rto < stcb->asoc.minrto) {
1778 		new_rto = stcb->asoc.minrto;
1779 	}
1780 	if (new_rto > stcb->asoc.maxrto) {
1781 		new_rto = stcb->asoc.maxrto;
1782 	}
1783 	/* we are now returning the RTT Smoothed */
1784 	return ((u_int32_t)new_rto);
1785 }
1786 
1787 
1788 /*
1789  * return a pointer to a contiguous piece of data from the given
1790  * mbuf chain starting at 'off' for 'len' bytes.  If the desired
1791  * piece spans more than one mbuf, a copy is made at 'ptr'.
1792  * caller must ensure that the buffer size is >= 'len'
1793  * returns NULL if there there isn't 'len' bytes in the chain.
1794  */
1795 void *
1796 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1797 {
1798 	uint32_t count;
1799 	uint8_t *ptr;
1800 	ptr = in_ptr;
1801 	if ((off < 0) || (len <= 0))
1802 		return (NULL);
1803 
1804 	/* find the desired start location */
1805 	while ((m != NULL) && (off > 0)) {
1806 		if (off < m->m_len)
1807 			break;
1808 		off -= m->m_len;
1809 		m = m->m_next;
1810 	}
1811 	if (m == NULL)
1812 		return (NULL);
1813 
1814 	/* is the current mbuf large enough (eg. contiguous)? */
1815 	if ((m->m_len - off) >= len) {
1816 		return ((void *)(mtod(m, vaddr_t) + off));
1817 	} else {
1818 		/* else, it spans more than one mbuf, so save a temp copy... */
1819 		while ((m != NULL) && (len > 0)) {
1820 			count = uimin(m->m_len - off, len);
1821 			memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
1822 			len -= count;
1823 			ptr += count;
1824 			off = 0;
1825 			m = m->m_next;
1826 		}
1827 		if ((m == NULL) && (len > 0))
1828 			return (NULL);
1829 		else
1830 			return ((void *)in_ptr);
1831 	}
1832 }
1833 
1834 
1835 struct sctp_paramhdr *
1836 sctp_get_next_param(struct mbuf *m,
1837 		    int offset,
1838 		    struct sctp_paramhdr *pull,
1839 		    int pull_limit)
1840 {
1841 	/* This just provides a typed signature to Peter's Pull routine */
1842 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
1843     	    (u_int8_t *)pull));
1844 }
1845 
1846 
1847 int
1848 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
1849 {
1850 	/*
1851 	 * add padlen bytes of 0 filled padding to the end of the mbuf.
1852 	 * If padlen is > 3 this routine will fail.
1853 	 */
1854 	u_int8_t *dp;
1855 	int i;
1856 	if (padlen > 3) {
1857 		return (ENOBUFS);
1858 	}
1859 	if (M_TRAILINGSPACE(m)) {
1860 		/*
1861 		 * The easy way.
1862 		 * We hope the majority of the time we hit here :)
1863 		 */
1864 		dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
1865 		m->m_len += padlen;
1866 	} else {
1867 		/* Hard way we must grow the mbuf */
1868 		struct mbuf *tmp;
1869 		MGET(tmp, M_DONTWAIT, MT_DATA);
1870 		if (tmp == NULL) {
1871 			/* Out of space GAK! we are in big trouble. */
1872 			return (ENOSPC);
1873 		}
1874 		/* setup and insert in middle */
1875 		tmp->m_next = m->m_next;
1876 		tmp->m_len = padlen;
1877 		m->m_next = tmp;
1878 		dp = mtod(tmp, u_int8_t *);
1879 	}
1880 	/* zero out the pad */
1881 	for (i=  0; i < padlen; i++) {
1882 		*dp = 0;
1883 		dp++;
1884 	}
1885 	return (0);
1886 }
1887 
1888 int
1889 sctp_pad_lastmbuf(struct mbuf *m, int padval)
1890 {
1891 	/* find the last mbuf in chain and pad it */
1892 	struct mbuf *m_at;
1893 	m_at = m;
1894 	while (m_at) {
1895 		if (m_at->m_next == NULL) {
1896 			return (sctp_add_pad_tombuf(m_at, padval));
1897 		}
1898 		m_at = m_at->m_next;
1899 	}
1900 	return (EFAULT);
1901 }
1902 
1903 static void
1904 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
1905     u_int32_t error)
1906 {
1907 	struct mbuf *m_notify;
1908 	struct sctp_assoc_change *sac;
1909 	const struct sockaddr *to;
1910 	struct sockaddr_in6 sin6, lsa6;
1911 
1912 #ifdef SCTP_DEBUG
1913 	printf("notify: %d\n", event);
1914 #endif
1915 	/*
1916 	 * First if we are going down dump everything we
1917 	 * can to the socket rcv queue.
1918 	 */
1919 	if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
1920 		sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
1921 	}
1922 
1923 	/*
1924 	 * For TCP model AND UDP connected sockets we will send
1925 	 * an error up when an ABORT comes in.
1926 	 */
1927 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1928 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1929 	    (event == SCTP_COMM_LOST)) {
1930 		stcb->sctp_socket->so_error = ECONNRESET;
1931 		/* Wake ANY sleepers */
1932 		sowwakeup(stcb->sctp_socket);
1933 		sorwakeup(stcb->sctp_socket);
1934 	}
1935 #if 0
1936 	if ((event == SCTP_COMM_UP) &&
1937 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1938  	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1939 		 soisconnected(stcb->sctp_socket);
1940 	}
1941 #endif
1942 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
1943 		/* event not enabled */
1944 		return;
1945 	}
1946 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
1947 	if (m_notify == NULL)
1948 		/* no space left */
1949 		return;
1950 	m_notify->m_len = 0;
1951 
1952 	sac = mtod(m_notify, struct sctp_assoc_change *);
1953 	sac->sac_type = SCTP_ASSOC_CHANGE;
1954 	sac->sac_flags = 0;
1955 	sac->sac_length = sizeof(struct sctp_assoc_change);
1956 	sac->sac_state = event;
1957 	sac->sac_error = error;
1958 	/* XXX verify these stream counts */
1959 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
1960 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
1961 	sac->sac_assoc_id = sctp_get_associd(stcb);
1962 
1963 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
1964 	m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
1965 	m_reset_rcvif(m_notify);
1966 	m_notify->m_len = sizeof(struct sctp_assoc_change);
1967 	m_notify->m_next = NULL;
1968 
1969 	/* append to socket */
1970 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
1971 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1972 	    to->sa_family == AF_INET) {
1973 		const struct sockaddr_in *sin;
1974 
1975 		sin = (const struct sockaddr_in *)to;
1976 		in6_sin_2_v4mapsin6(sin, &sin6);
1977 		to = (struct sockaddr *)&sin6;
1978 	}
1979 	/* check and strip embedded scope junk */
1980 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
1981 						   &lsa6);
1982 	/*
1983 	 * We need to always notify comm changes.
1984 	 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
1985 	 * 	sctp_m_freem(m_notify);
1986 	 *	return;
1987 	 * }
1988 	*/
1989 	SCTP_TCB_UNLOCK(stcb);
1990 	SCTP_INP_WLOCK(stcb->sctp_ep);
1991 	SCTP_TCB_LOCK(stcb);
1992 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
1993 	    to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
1994 		/* not enough room */
1995 		sctp_m_freem(m_notify);
1996 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1997 		return;
1998 	}
1999 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2000 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2001 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2002 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2003 		}
2004 	} else {
2005 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2006 	}
2007 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2008 	/* Wake up any sleeper */
2009 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2010 	sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2011 }
2012 
2013 static void
2014 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2015     const struct sockaddr *sa, uint32_t error)
2016 {
2017 	struct mbuf *m_notify;
2018 	struct sctp_paddr_change *spc;
2019 	const struct sockaddr *to;
2020 	struct sockaddr_in6 sin6, lsa6;
2021 
2022 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2023 		/* event not enabled */
2024 		return;
2025 
2026 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2027 	if (m_notify == NULL)
2028 		return;
2029 	m_notify->m_len = 0;
2030 
2031 	MCLGET(m_notify, M_DONTWAIT);
2032 	if ((m_notify->m_flags & M_EXT) != M_EXT) {
2033 		sctp_m_freem(m_notify);
2034 		return;
2035 	}
2036 
2037 	spc = mtod(m_notify, struct sctp_paddr_change *);
2038 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2039 	spc->spc_flags = 0;
2040 	spc->spc_length = sizeof(struct sctp_paddr_change);
2041 	if (sa->sa_family == AF_INET) {
2042 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2043 	} else {
2044 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2045 	}
2046 	spc->spc_state = state;
2047 	spc->spc_error = error;
2048 	spc->spc_assoc_id = sctp_get_associd(stcb);
2049 
2050 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2051 	m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2052 	m_reset_rcvif(m_notify);
2053 	m_notify->m_len = sizeof(struct sctp_paddr_change);
2054 	m_notify->m_next = NULL;
2055 
2056 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2057 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2058 	    to->sa_family == AF_INET) {
2059 		const struct sockaddr_in *sin;
2060 
2061 		sin = (const struct sockaddr_in *)to;
2062 		in6_sin_2_v4mapsin6(sin, &sin6);
2063 		to = (struct sockaddr *)&sin6;
2064 	}
2065 	/* check and strip embedded scope junk */
2066 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2067 	    &lsa6);
2068 
2069 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2070 		sctp_m_freem(m_notify);
2071 		return;
2072 	}
2073 	/* append to socket */
2074 	SCTP_TCB_UNLOCK(stcb);
2075 	SCTP_INP_WLOCK(stcb->sctp_ep);
2076 	SCTP_TCB_LOCK(stcb);
2077 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2078 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2079 		/* not enough room */
2080 		sctp_m_freem(m_notify);
2081 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2082 		return;
2083 	}
2084 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2085 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2086 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2087 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2088 		}
2089 	} else {
2090 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2091 	}
2092 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2093 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2094 }
2095 
2096 
2097 static void
2098 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2099 			struct sctp_tmit_chunk *chk)
2100 {
2101 	struct mbuf *m_notify;
2102 	struct sctp_send_failed *ssf;
2103 	struct sockaddr_in6 sin6, lsa6;
2104 	const struct sockaddr *to;
2105 	int length;
2106 
2107 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2108 		/* event not enabled */
2109 		return;
2110 
2111 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2112 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2113 	if (m_notify == NULL)
2114 		/* no space left */
2115 		return;
2116 	m_notify->m_len = 0;
2117 	ssf = mtod(m_notify, struct sctp_send_failed *);
2118 	ssf->ssf_type = SCTP_SEND_FAILED;
2119 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2120 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2121 	else
2122 		ssf->ssf_flags = SCTP_DATA_SENT;
2123 	ssf->ssf_length = length;
2124 	ssf->ssf_error = error;
2125 	/* not exactly what the user sent in, but should be close :) */
2126 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2127 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2128 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2129 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2130 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2131 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2132 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2133 	m_notify->m_next = chk->data;
2134 	if (m_notify->m_next == NULL)
2135 		m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2136 	else {
2137 		struct mbuf *m;
2138 		m_notify->m_flags |= M_NOTIFICATION;
2139 		m = m_notify;
2140 		while (m->m_next != NULL)
2141 			m = m->m_next;
2142 		m->m_flags |= M_EOR;
2143 	}
2144 	m_notify->m_pkthdr.len = length;
2145 	m_reset_rcvif(m_notify);
2146 	m_notify->m_len = sizeof(struct sctp_send_failed);
2147 
2148 	/* Steal off the mbuf */
2149 	chk->data = NULL;
2150 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2151 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2152 	    to->sa_family == AF_INET) {
2153 		const struct sockaddr_in *sin;
2154 
2155 		sin = satocsin(to);
2156 		in6_sin_2_v4mapsin6(sin, &sin6);
2157 		to = (struct sockaddr *)&sin6;
2158 	}
2159 	/* check and strip embedded scope junk */
2160 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2161 						   &lsa6);
2162 
2163 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2164 		sctp_m_freem(m_notify);
2165 		return;
2166 	}
2167 
2168 	/* append to socket */
2169 	SCTP_TCB_UNLOCK(stcb);
2170 	SCTP_INP_WLOCK(stcb->sctp_ep);
2171 	SCTP_TCB_LOCK(stcb);
2172 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2173 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2174 		/* not enough room */
2175 		sctp_m_freem(m_notify);
2176 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2177 		return;
2178 	}
2179 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2180 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2181 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2182 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2183 		}
2184 	} else {
2185 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2186 	}
2187 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2188 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2189 }
2190 
2191 static void
2192 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2193 			   u_int32_t error)
2194 {
2195 	struct mbuf *m_notify;
2196 	struct sctp_adaption_event *sai;
2197 	struct sockaddr_in6 sin6, lsa6;
2198 	const struct sockaddr *to;
2199 
2200 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2201 		/* event not enabled */
2202 		return;
2203 
2204 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2205 	if (m_notify == NULL)
2206 		/* no space left */
2207 		return;
2208 	m_notify->m_len = 0;
2209 	sai = mtod(m_notify, struct sctp_adaption_event *);
2210 	sai->sai_type = SCTP_ADAPTION_INDICATION;
2211 	sai->sai_flags = 0;
2212 	sai->sai_length = sizeof(struct sctp_adaption_event);
2213 	sai->sai_adaption_ind = error;
2214 	sai->sai_assoc_id = sctp_get_associd(stcb);
2215 
2216 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2217 	m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2218 	m_reset_rcvif(m_notify);
2219 	m_notify->m_len = sizeof(struct sctp_adaption_event);
2220 	m_notify->m_next = NULL;
2221 
2222 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2223 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2224 	    (to->sa_family == AF_INET)) {
2225 		const struct sockaddr_in *sin;
2226 
2227 		sin = satocsin(to);
2228 		in6_sin_2_v4mapsin6(sin, &sin6);
2229 		to = (struct sockaddr *)&sin6;
2230 	}
2231 	/* check and strip embedded scope junk */
2232 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2233 						   &lsa6);
2234 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2235 		sctp_m_freem(m_notify);
2236 		return;
2237 	}
2238 	/* append to socket */
2239 	SCTP_TCB_UNLOCK(stcb);
2240 	SCTP_INP_WLOCK(stcb->sctp_ep);
2241 	SCTP_TCB_LOCK(stcb);
2242 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2243 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2244 		/* not enough room */
2245 		sctp_m_freem(m_notify);
2246 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2247 		return;
2248 	}
2249 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2250 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2251 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2252 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2253 		}
2254 	} else {
2255 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2256 	}
2257 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2258 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2259 }
2260 
2261 static void
2262 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2263 					u_int32_t error)
2264 {
2265 	struct mbuf *m_notify;
2266 	struct sctp_pdapi_event *pdapi;
2267 	struct sockaddr_in6 sin6, lsa6;
2268 	const struct sockaddr *to;
2269 
2270 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2271 		/* event not enabled */
2272 		return;
2273 
2274 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2275 	if (m_notify == NULL)
2276 		/* no space left */
2277 		return;
2278 	m_notify->m_len = 0;
2279 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2280 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2281 	pdapi->pdapi_flags = 0;
2282 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2283 	pdapi->pdapi_indication = error;
2284 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2285 
2286 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2287 	m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2288 	m_reset_rcvif(m_notify);
2289 	m_notify->m_len = sizeof(struct sctp_pdapi_event);
2290 	m_notify->m_next = NULL;
2291 
2292 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2293 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2294 	    (to->sa_family == AF_INET)) {
2295 		const struct sockaddr_in *sin;
2296 
2297 		sin = satocsin(to);
2298 		in6_sin_2_v4mapsin6(sin, &sin6);
2299 		to = (struct sockaddr *)&sin6;
2300 	}
2301 	/* check and strip embedded scope junk */
2302 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2303 						   &lsa6);
2304 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2305 		sctp_m_freem(m_notify);
2306 		return;
2307 	}
2308 	/* append to socket */
2309 	SCTP_TCB_UNLOCK(stcb);
2310 	SCTP_INP_WLOCK(stcb->sctp_ep);
2311 	SCTP_TCB_LOCK(stcb);
2312 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2313 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2314 		/* not enough room */
2315 		sctp_m_freem(m_notify);
2316 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2317 		return;
2318 	}
2319 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2320 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2321 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2322 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2323 		}
2324 	} else {
2325 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2326 	}
2327 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2328 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2329 }
2330 
2331 static void
2332 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2333 {
2334 	struct mbuf *m_notify;
2335 	struct sctp_shutdown_event *sse;
2336 	struct sockaddr_in6 sin6, lsa6;
2337 	const struct sockaddr *to;
2338 
2339 	/*
2340 	 * For TCP model AND UDP connected sockets we will send
2341 	 * an error up when an SHUTDOWN completes
2342 	 */
2343 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2344 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2345 		/* mark socket closed for read/write and wakeup! */
2346 		socantrcvmore(stcb->sctp_socket);
2347 		socantsendmore(stcb->sctp_socket);
2348 	}
2349 
2350 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2351 		/* event not enabled */
2352 		return;
2353 
2354 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2355 	if (m_notify == NULL)
2356 		/* no space left */
2357 		return;
2358 	m_notify->m_len = 0;
2359 	sse = mtod(m_notify, struct sctp_shutdown_event *);
2360 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
2361 	sse->sse_flags = 0;
2362 	sse->sse_length = sizeof(struct sctp_shutdown_event);
2363 	sse->sse_assoc_id = sctp_get_associd(stcb);
2364 
2365 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2366 	m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2367 	m_reset_rcvif(m_notify);
2368 	m_notify->m_len = sizeof(struct sctp_shutdown_event);
2369 	m_notify->m_next = NULL;
2370 
2371 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2372 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2373 	    to->sa_family == AF_INET) {
2374 		const struct sockaddr_in *sin;
2375 
2376 		sin = satocsin(to);
2377 		in6_sin_2_v4mapsin6(sin, &sin6);
2378 		to = (struct sockaddr *)&sin6;
2379 	}
2380 	/* check and strip embedded scope junk */
2381 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2382 	    &lsa6);
2383 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2384 		sctp_m_freem(m_notify);
2385 		return;
2386 	}
2387 	/* append to socket */
2388 	SCTP_TCB_UNLOCK(stcb);
2389 	SCTP_INP_WLOCK(stcb->sctp_ep);
2390 	SCTP_TCB_LOCK(stcb);
2391 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2392 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2393 		/* not enough room */
2394 		sctp_m_freem(m_notify);
2395 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2396 		return;
2397 	}
2398 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2399 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2400 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2401 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2402 		}
2403 	} else {
2404 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2405 	}
2406 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2407 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2408 }
2409 
2410 static void
2411 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2412     int number_entries, uint16_t *list, int flag)
2413 {
2414 	struct mbuf *m_notify;
2415 	struct sctp_stream_reset_event *strreset;
2416 	struct sockaddr_in6 sin6, lsa6;
2417 	const struct sockaddr *to;
2418 	int len;
2419 
2420 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2421 		/* event not enabled */
2422 		return;
2423 
2424 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2425 	if (m_notify == NULL)
2426 		/* no space left */
2427 		return;
2428 	m_notify->m_len = 0;
2429 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2430 	if (len > M_TRAILINGSPACE(m_notify)) {
2431 		MCLGET(m_notify, M_WAIT);
2432 	}
2433 	if (m_notify == NULL)
2434 		/* no clusters */
2435 		return;
2436 
2437 	if (len > M_TRAILINGSPACE(m_notify)) {
2438 		/* never enough room */
2439 		m_freem(m_notify);
2440 		return;
2441 	}
2442 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2443 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2444 	if (number_entries == 0) {
2445 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2446 	} else {
2447 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2448 	}
2449 	strreset->strreset_length = len;
2450 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
2451 	if (number_entries) {
2452 		int i;
2453 		for (i=0; i<number_entries; i++) {
2454 			strreset->strreset_list[i] = list[i];
2455 		}
2456 	}
2457 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2458 	m_notify->m_pkthdr.len = len;
2459 	m_reset_rcvif(m_notify);
2460 	m_notify->m_len = len;
2461 	m_notify->m_next = NULL;
2462 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2463 		/* no space */
2464 		sctp_m_freem(m_notify);
2465 		return;
2466 	}
2467 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2468 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2469 	    to->sa_family == AF_INET) {
2470 		const struct sockaddr_in *sin;
2471 
2472 		sin = satocsin(to);
2473 		in6_sin_2_v4mapsin6(sin, &sin6);
2474 		to = (struct sockaddr *)&sin6;
2475 	}
2476 	/* check and strip embedded scope junk */
2477 	to = (const struct sockaddr *) sctp_recover_scope((const struct sockaddr_in6 *)to,
2478 	    &lsa6);
2479 	/* append to socket */
2480 	SCTP_TCB_UNLOCK(stcb);
2481 	SCTP_INP_WLOCK(stcb->sctp_ep);
2482 	SCTP_TCB_LOCK(stcb);
2483 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2484 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2485 		/* not enough room */
2486 		sctp_m_freem(m_notify);
2487 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2488 		return;
2489 	}
2490 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2491 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2492 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2493 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2494 		}
2495 	} else {
2496 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2497 	}
2498 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2499 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2500 }
2501 
2502 
2503 void
2504 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2505 		u_int32_t error, void *data)
2506 {
2507 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2508 		/* No notifications up when we are in a no socket state */
2509 		return;
2510 	}
2511 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2512 		/* Can't send up to a closed socket any notifications */
2513 		return;
2514 	}
2515 	switch (notification) {
2516 	case SCTP_NOTIFY_ASSOC_UP:
2517 		sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2518 		break;
2519 	case SCTP_NOTIFY_ASSOC_DOWN:
2520 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2521 		break;
2522 	case SCTP_NOTIFY_INTERFACE_DOWN:
2523 	{
2524 		struct sctp_nets *net;
2525 		net = (struct sctp_nets *)data;
2526 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2527 		    rtcache_getdst(&net->ro), error);
2528 		break;
2529 	}
2530 	case SCTP_NOTIFY_INTERFACE_UP:
2531 	{
2532 		struct sctp_nets *net;
2533 		net = (struct sctp_nets *)data;
2534 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2535 		    rtcache_getdst(&net->ro), error);
2536 		break;
2537 	}
2538 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2539 	{
2540 		struct sctp_nets *net;
2541 		net = (struct sctp_nets *)data;
2542 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2543 		    rtcache_getdst(&net->ro), error);
2544 		break;
2545 	}
2546 	case SCTP_NOTIFY_DG_FAIL:
2547 		sctp_notify_send_failed(stcb, error,
2548 		    (struct sctp_tmit_chunk *)data);
2549 		break;
2550 	case SCTP_NOTIFY_ADAPTION_INDICATION:
2551 		/* Here the error is the adaption indication */
2552 		sctp_notify_adaption_layer(stcb, error);
2553 		break;
2554 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2555 		sctp_notify_partial_delivery_indication(stcb, error);
2556 		break;
2557 	case SCTP_NOTIFY_STRDATA_ERR:
2558 		break;
2559 	case SCTP_NOTIFY_ASSOC_ABORTED:
2560 		sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2561 		break;
2562 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
2563 		break;
2564 	case SCTP_NOTIFY_STREAM_OPENED_OK:
2565 		break;
2566 	case SCTP_NOTIFY_ASSOC_RESTART:
2567 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2568 		break;
2569 	case SCTP_NOTIFY_HB_RESP:
2570 		break;
2571 	case SCTP_NOTIFY_STR_RESET_SEND:
2572 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2573 		break;
2574 	case SCTP_NOTIFY_STR_RESET_RECV:
2575 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2576 		break;
2577 	case SCTP_NOTIFY_ASCONF_ADD_IP:
2578 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2579 		    error);
2580 		break;
2581 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
2582 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2583 		    error);
2584 		break;
2585 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2586 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2587 		    error);
2588 		break;
2589 	case SCTP_NOTIFY_ASCONF_SUCCESS:
2590 		break;
2591 	case SCTP_NOTIFY_ASCONF_FAILED:
2592 		break;
2593 	case SCTP_NOTIFY_PEER_SHUTDOWN:
2594 		sctp_notify_shutdown_event(stcb);
2595 		break;
2596 	default:
2597 #ifdef SCTP_DEBUG
2598 		if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2599 			printf("NOTIFY: unknown notification %xh (%u)\n",
2600 			    notification, notification);
2601 		}
2602 #endif /* SCTP_DEBUG */
2603 		break;
2604 	} /* end switch */
2605 }
2606 
2607 void
2608 sctp_report_all_outbound(struct sctp_tcb *stcb)
2609 {
2610 	struct sctp_association *asoc;
2611 	struct sctp_stream_out *outs;
2612 	struct sctp_tmit_chunk *chk;
2613 
2614 	asoc = &stcb->asoc;
2615 
2616 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2617 		return;
2618 	}
2619 	/* now through all the gunk freeing chunks */
2620 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2621 		/* now clean up any chunks here */
2622 		chk = TAILQ_FIRST(&outs->outqueue);
2623 		while (chk) {
2624 			stcb->asoc.stream_queue_cnt--;
2625 			TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2626 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2627 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2628 			if (chk->data) {
2629 				sctp_m_freem(chk->data);
2630 				chk->data = NULL;
2631 			}
2632 			if (chk->whoTo)
2633 				sctp_free_remote_addr(chk->whoTo);
2634 			chk->whoTo = NULL;
2635 			chk->asoc = NULL;
2636 			/* Free the chunk */
2637 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2638 			sctppcbinfo.ipi_count_chunk--;
2639 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2640 				panic("Chunk count is negative");
2641 			}
2642 			sctppcbinfo.ipi_gencnt_chunk++;
2643 			chk = TAILQ_FIRST(&outs->outqueue);
2644 		}
2645 	}
2646 	/* pending send queue SHOULD be empty */
2647 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
2648 		chk = TAILQ_FIRST(&asoc->send_queue);
2649 		while (chk) {
2650 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2651 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2652 			if (chk->data) {
2653 				sctp_m_freem(chk->data);
2654 				chk->data = NULL;
2655 			}
2656 			if (chk->whoTo)
2657 				sctp_free_remote_addr(chk->whoTo);
2658 			chk->whoTo = NULL;
2659 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2660 			sctppcbinfo.ipi_count_chunk--;
2661 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2662 				panic("Chunk count is negative");
2663 			}
2664 			sctppcbinfo.ipi_gencnt_chunk++;
2665 			chk = TAILQ_FIRST(&asoc->send_queue);
2666 		}
2667 	}
2668 	/* sent queue SHOULD be empty */
2669 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2670 		chk = TAILQ_FIRST(&asoc->sent_queue);
2671 		while (chk) {
2672 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2673 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2674 			    SCTP_NOTIFY_DATAGRAM_SENT, chk);
2675 			if (chk->data) {
2676 				sctp_m_freem(chk->data);
2677 				chk->data = NULL;
2678 			}
2679 			if (chk->whoTo)
2680 				sctp_free_remote_addr(chk->whoTo);
2681 			chk->whoTo = NULL;
2682 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2683 			sctppcbinfo.ipi_count_chunk--;
2684 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2685 				panic("Chunk count is negative");
2686 			}
2687 			sctppcbinfo.ipi_gencnt_chunk++;
2688 			chk = TAILQ_FIRST(&asoc->sent_queue);
2689 		}
2690 	}
2691 }
2692 
2693 void
2694 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2695 {
2696 
2697 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2698 		return;
2699 	}
2700 	/* Tell them we lost the asoc */
2701 	sctp_report_all_outbound(stcb);
2702 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2703 }
2704 
2705 void
2706 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2707     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2708 {
2709 	u_int32_t vtag;
2710 
2711 	vtag = 0;
2712 	if (stcb != NULL) {
2713 		/* We have a TCB to abort, send notification too */
2714 		vtag = stcb->asoc.peer_vtag;
2715 		sctp_abort_notification(stcb, 0);
2716 	}
2717 	sctp_send_abort(m, iphlen, sh, vtag, op_err);
2718 	if (stcb != NULL) {
2719 		/* Ok, now lets free it */
2720 		sctp_free_assoc(inp, stcb);
2721 	} else {
2722 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2723 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2724 				sctp_inpcb_free(inp, 1);
2725 			}
2726 		}
2727 	}
2728 }
2729 
2730 void
2731 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2732     int error, struct mbuf *op_err)
2733 {
2734 
2735 	if (stcb == NULL) {
2736 		/* Got to have a TCB */
2737 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2738 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2739 				sctp_inpcb_free(inp, 1);
2740 			}
2741 		}
2742 		return;
2743 	}
2744 	/* notify the ulp */
2745 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
2746 		sctp_abort_notification(stcb, error);
2747 	/* notify the peer */
2748 	sctp_send_abort_tcb(stcb, op_err);
2749 	/* now free the asoc */
2750 	sctp_free_assoc(inp, stcb);
2751 }
2752 
2753 void
2754 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
2755     struct sctp_inpcb *inp, struct mbuf *op_err)
2756 {
2757 	struct sctp_chunkhdr *ch, chunk_buf;
2758 	unsigned int chk_length;
2759 
2760 	/* Generate a TO address for future reference */
2761 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
2762 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2763 			sctp_inpcb_free(inp, 1);
2764 		}
2765 	}
2766 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2767 	    sizeof(*ch), (u_int8_t *)&chunk_buf);
2768 	while (ch != NULL) {
2769 		chk_length = ntohs(ch->chunk_length);
2770 		if (chk_length < sizeof(*ch)) {
2771 			/* break to abort land */
2772 			break;
2773 		}
2774 		switch (ch->chunk_type) {
2775 		case SCTP_PACKET_DROPPED:
2776 			/* we don't respond to pkt-dropped */
2777 			return;
2778 		case SCTP_ABORT_ASSOCIATION:
2779 			/* we don't respond with an ABORT to an ABORT */
2780 			return;
2781 		case SCTP_SHUTDOWN_COMPLETE:
2782 			/*
2783 			 * we ignore it since we are not waiting for it
2784 			 * and peer is gone
2785 			 */
2786 			return;
2787 		case SCTP_SHUTDOWN_ACK:
2788 			sctp_send_shutdown_complete2(m, iphlen, sh);
2789 			return;
2790 		default:
2791 			break;
2792 		}
2793 		offset += SCTP_SIZE32(chk_length);
2794 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2795 		    sizeof(*ch), (u_int8_t *)&chunk_buf);
2796 	}
2797 	sctp_send_abort(m, iphlen, sh, 0, op_err);
2798 }
2799 
2800 /*
2801  * check the inbound datagram to make sure there is not an abort
2802  * inside it, if there is return 1, else return 0.
2803  */
2804 int
2805 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
2806 {
2807 	struct sctp_chunkhdr *ch;
2808 	struct sctp_init_chunk *init_chk, chunk_buf;
2809 	int offset;
2810 	unsigned int chk_length;
2811 
2812 	offset = iphlen + sizeof(struct sctphdr);
2813 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
2814 	    (u_int8_t *)&chunk_buf);
2815 	while (ch != NULL) {
2816 		chk_length = ntohs(ch->chunk_length);
2817 		if (chk_length < sizeof(*ch)) {
2818 			/* packet is probably corrupt */
2819 			break;
2820 		}
2821 		/* we seem to be ok, is it an abort? */
2822 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
2823 			/* yep, tell them */
2824 			return (1);
2825 		}
2826 		if (ch->chunk_type == SCTP_INITIATION) {
2827 			/* need to update the Vtag */
2828 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
2829 			    offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
2830 			if (init_chk != NULL) {
2831 				*vtagfill = ntohl(init_chk->init.initiate_tag);
2832 			}
2833 		}
2834 		/* Nope, move to the next chunk */
2835 		offset += SCTP_SIZE32(chk_length);
2836 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2837 		    sizeof(*ch), (u_int8_t *)&chunk_buf);
2838 	}
2839 	return (0);
2840 }
2841 
2842 /*
2843  * currently (2/02), ifa_addr embeds scope_id's and don't
2844  * have sin6_scope_id set (i.e. it's 0)
2845  * so, create this function to compare link local scopes
2846  */
2847 uint32_t
2848 sctp_is_same_scope(const struct sockaddr_in6 *addr1, const struct sockaddr_in6 *addr2)
2849 {
2850 	struct sockaddr_in6 a, b;
2851 
2852 	/* save copies */
2853 	a = *addr1;
2854 	b = *addr2;
2855 
2856 	if (a.sin6_scope_id == 0)
2857 		if (sa6_recoverscope(&a)) {
2858 			/* can't get scope, so can't match */
2859 			return (0);
2860 		}
2861 	if (b.sin6_scope_id == 0)
2862 		if (sa6_recoverscope(&b)) {
2863 			/* can't get scope, so can't match */
2864 			return (0);
2865 		}
2866 	if (a.sin6_scope_id != b.sin6_scope_id)
2867 		return (0);
2868 
2869 	return (1);
2870 }
2871 
2872 /*
2873  * returns a sockaddr_in6 with embedded scope recovered and removed
2874  */
2875 const struct sockaddr_in6 *
2876 sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
2877 {
2878 	const struct sockaddr_in6 *newaddr;
2879 
2880 	newaddr = addr;
2881 	/* check and strip embedded scope junk */
2882 	if (addr->sin6_family == AF_INET6) {
2883 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
2884 			if (addr->sin6_scope_id == 0) {
2885 				*store = *addr;
2886 				if (sa6_recoverscope(store) == 0) {
2887 					/* use the recovered scope */
2888 					newaddr = store;
2889 				}
2890 				/* else, return the original "to" addr */
2891 			}
2892 		}
2893 	}
2894 	return (newaddr);
2895 }
2896 
2897 /*
2898  * are the two addresses the same?  currently a "scopeless" check
2899  * returns: 1 if same, 0 if not
2900  */
2901 int
2902 sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
2903 {
2904 
2905 	/* must be valid */
2906 	if (sa1 == NULL || sa2 == NULL)
2907 		return (0);
2908 
2909 	/* must be the same family */
2910 	if (sa1->sa_family != sa2->sa_family)
2911 		return (0);
2912 
2913 	if (sa1->sa_family == AF_INET6) {
2914 		/* IPv6 addresses */
2915 		const struct sockaddr_in6 *sin6_1, *sin6_2;
2916 
2917 		sin6_1 = (const struct sockaddr_in6 *)sa1;
2918 		sin6_2 = (const struct sockaddr_in6 *)sa2;
2919 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
2920 		    &sin6_2->sin6_addr));
2921 	} else if (sa1->sa_family == AF_INET) {
2922 		/* IPv4 addresses */
2923 		const struct sockaddr_in *sin_1, *sin_2;
2924 
2925 		sin_1 = (const struct sockaddr_in *)sa1;
2926 		sin_2 = (const struct sockaddr_in *)sa2;
2927 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
2928 	} else {
2929 		/* we don't do these... */
2930 		return (0);
2931 	}
2932 }
2933 
2934 void
2935 sctp_print_address(const struct sockaddr *sa)
2936 {
2937 	char ip6buf[INET6_ADDRSTRLEN];
2938 
2939 	if (sa->sa_family == AF_INET6) {
2940 		const struct sockaddr_in6 *sin6;
2941 		sin6 = (const struct sockaddr_in6 *)sa;
2942 		printf("IPv6 address: %s:%d scope:%u\n",
2943 		    IN6_PRINT(ip6buf, &sin6->sin6_addr), ntohs(sin6->sin6_port),
2944 		    sin6->sin6_scope_id);
2945 	} else if (sa->sa_family == AF_INET) {
2946 		const struct sockaddr_in *sin;
2947 		sin = (const struct sockaddr_in *)sa;
2948 		printf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
2949 		    ntohs(sin->sin_port));
2950 	} else {
2951 		printf("?\n");
2952 	}
2953 }
2954 
2955 void
2956 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
2957 {
2958 	if (iph->ip_v == IPVERSION) {
2959 		struct sockaddr_in lsa, fsa;
2960 
2961 		memset(&lsa, 0, sizeof(lsa));
2962 		lsa.sin_len = sizeof(lsa);
2963 		lsa.sin_family = AF_INET;
2964 		lsa.sin_addr = iph->ip_src;
2965 		lsa.sin_port = sh->src_port;
2966 		memset(&fsa, 0, sizeof(fsa));
2967 		fsa.sin_len = sizeof(fsa);
2968 		fsa.sin_family = AF_INET;
2969 		fsa.sin_addr = iph->ip_dst;
2970 		fsa.sin_port = sh->dest_port;
2971 		printf("src: ");
2972 		sctp_print_address((struct sockaddr *)&lsa);
2973 		printf("dest: ");
2974 		sctp_print_address((struct sockaddr *)&fsa);
2975 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
2976 		struct ip6_hdr *ip6;
2977 		struct sockaddr_in6 lsa6, fsa6;
2978 
2979 		ip6 = (struct ip6_hdr *)iph;
2980 		memset(&lsa6, 0, sizeof(lsa6));
2981 		lsa6.sin6_len = sizeof(lsa6);
2982 		lsa6.sin6_family = AF_INET6;
2983 		lsa6.sin6_addr = ip6->ip6_src;
2984 		lsa6.sin6_port = sh->src_port;
2985 		memset(&fsa6, 0, sizeof(fsa6));
2986 		fsa6.sin6_len = sizeof(fsa6);
2987 		fsa6.sin6_family = AF_INET6;
2988 		fsa6.sin6_addr = ip6->ip6_dst;
2989 		fsa6.sin6_port = sh->dest_port;
2990 		printf("src: ");
2991 		sctp_print_address((struct sockaddr *)&lsa6);
2992 		printf("dest: ");
2993 		sctp_print_address((struct sockaddr *)&fsa6);
2994 	}
2995 }
2996 
2997 #if defined(__FreeBSD__) || defined(__APPLE__)
2998 
2999 /* cloned from uipc_socket.c */
3000 
3001 #define SCTP_SBLINKRECORD(sb, m0) do {					\
3002 	if ((sb)->sb_lastrecord != NULL)				\
3003 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
3004 	else								\
3005 		(sb)->sb_mb = (m0);					\
3006 	(sb)->sb_lastrecord = (m0);					\
3007 } while (/*CONSTCOND*/0)
3008 #endif
3009 
3010 
3011 int
3012 sbappendaddr_nocheck(struct sockbuf *sb, const struct sockaddr *asa,
3013 	struct mbuf *m0, struct mbuf *control,
3014 	u_int32_t tag, struct sctp_inpcb *inp)
3015 {
3016 #ifdef __NetBSD__
3017 	struct mbuf *m, *n;
3018 
3019 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3020 		panic("sbappendaddr_nocheck");
3021 
3022 	m0->m_pkthdr.csum_data = (int)tag;
3023 
3024 	for (n = control; n; n = n->m_next) {
3025 		if (n->m_next == 0)	/* keep pointer to last control buf */
3026 			break;
3027 	}
3028 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3029 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3030 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3031 		if (m == 0)
3032 			return (0);
3033 
3034 		m->m_len = asa->sa_len;
3035 		memcpy(mtod(m, void *), (const void *)asa, asa->sa_len);
3036 	} else {
3037 		m = NULL;
3038 	}
3039 	if (n) {
3040 		n->m_next = m0;		/* concatenate data to control */
3041 	}else {
3042 		control = m0;
3043 	}
3044 	if (m)
3045 		m->m_next = control;
3046 	else
3047 		m = control;
3048 	m->m_pkthdr.csum_data = tag;
3049 
3050 	for (n = m; n; n = n->m_next)
3051 		sballoc(sb, n);
3052 	if ((n = sb->sb_mb) != NULL) {
3053 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3054 			inp->sb_last_mpkt = NULL;
3055 		}
3056 		if (inp->sb_last_mpkt)
3057 			inp->sb_last_mpkt->m_nextpkt = m;
3058  		else {
3059 			while (n->m_nextpkt) {
3060 				n = n->m_nextpkt;
3061 			}
3062 			n->m_nextpkt = m;
3063 		}
3064 		inp->sb_last_mpkt = m;
3065 	} else {
3066 		inp->sb_last_mpkt = sb->sb_mb = m;
3067 		inp->sctp_vtag_first = tag;
3068 	}
3069 	return (1);
3070 #endif
3071 #if defined(__FreeBSD__) || defined(__APPLE__)
3072 	struct mbuf *m, *n, *nlast;
3073 	int cnt=0;
3074 
3075 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3076 		panic("sbappendaddr_nocheck");
3077 
3078 	for (n = control; n; n = n->m_next) {
3079 		if (n->m_next == 0)	/* get pointer to last control buf */
3080 			break;
3081 	}
3082 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3083 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3084 		if (asa->sa_len > MHLEN)
3085 			return (0);
3086  try_again:
3087 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3088 		if (m == 0)
3089 			return (0);
3090 		m->m_len = 0;
3091 		/* safety */
3092 		if (m == m0) {
3093 			printf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3094 			       m0, m);
3095 			if (cnt) {
3096 				panic("more than once");
3097 			}
3098 			cnt++;
3099 			goto try_again;
3100 		}
3101 		m->m_len = asa->sa_len;
3102 		bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3103 	}
3104 	else {
3105 		m = NULL;
3106 	}
3107 	if (n)
3108 		n->m_next = m0;		/* concatenate data to control */
3109 	else
3110 		control = m0;
3111 	if (m)
3112 		m->m_next = control;
3113 	else
3114 		m = control;
3115 	m->m_pkthdr.csum_data = (int)tag;
3116 
3117 	for (n = m; n; n = n->m_next)
3118 		sballoc(sb, n);
3119 	nlast = n;
3120 	if (sb->sb_mb == NULL) {
3121 		inp->sctp_vtag_first = tag;
3122 	}
3123 
3124 #ifdef __FREEBSD__
3125 	if (sb->sb_mb == NULL)
3126 		inp->sctp_vtag_first = tag;
3127 	SCTP_SBLINKRECORD(sb, m);
3128 	sb->sb_mbtail = nlast;
3129 #else
3130 	if ((n = sb->sb_mb) != NULL) {
3131 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3132 			inp->sb_last_mpkt = NULL;
3133 		}
3134 		if (inp->sb_last_mpkt)
3135 			inp->sb_last_mpkt->m_nextpkt = m;
3136  		else {
3137 			while (n->m_nextpkt) {
3138 				n = n->m_nextpkt;
3139 			}
3140 			n->m_nextpkt = m;
3141 		}
3142 		inp->sb_last_mpkt = m;
3143 	} else {
3144 		inp->sb_last_mpkt = sb->sb_mb = m;
3145 		inp->sctp_vtag_first = tag;
3146 	}
3147 #endif
3148 	return (1);
3149 #endif
3150 #ifdef __OpenBSD__
3151 	struct mbuf *m, *n;
3152 
3153 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3154 		panic("sbappendaddr_nocheck");
3155 	m0->m_pkthdr.csum = (int)tag;
3156 	for (n = control; n; n = n->m_next) {
3157 		if (n->m_next == 0)	/* keep pointer to last control buf */
3158 			break;
3159 	}
3160 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3161 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3162 		if (asa->sa_len > MHLEN)
3163 			return (0);
3164 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3165 		if (m == 0)
3166 			return (0);
3167 		m->m_len = asa->sa_len;
3168 		bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3169 	} else {
3170 		m = NULL;
3171 	}
3172 	if (n)
3173 		n->m_next = m0;		/* concatenate data to control */
3174 	else
3175 		control = m0;
3176 
3177 	m->m_pkthdr.csum = (int)tag;
3178 	m->m_next = control;
3179 	for (n = m; n; n = n->m_next)
3180 		sballoc(sb, n);
3181 	if ((n = sb->sb_mb) != NULL) {
3182 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3183 			inp->sb_last_mpkt = NULL;
3184 		}
3185 		if (inp->sb_last_mpkt)
3186 			inp->sb_last_mpkt->m_nextpkt = m;
3187  		else {
3188 			while (n->m_nextpkt) {
3189 				n = n->m_nextpkt;
3190 			}
3191 			n->m_nextpkt = m;
3192 		}
3193 		inp->sb_last_mpkt = m;
3194 	} else {
3195 		inp->sb_last_mpkt = sb->sb_mb = m;
3196 		inp->sctp_vtag_first = tag;
3197 	}
3198 	return (1);
3199 #endif
3200 }
3201 
3202 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3203  *************ALTERNATE ROUTING CODE
3204  */
3205 
3206 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3207  *************ALTERNATE ROUTING CODE
3208  */
3209 
3210 struct mbuf *
3211 sctp_generate_invmanparam(int err)
3212 {
3213 	/* Return a MBUF with a invalid mandatory parameter */
3214 	struct mbuf *m;
3215 
3216 	MGET(m, M_DONTWAIT, MT_DATA);
3217 	if (m) {
3218 		struct sctp_paramhdr *ph;
3219 		m->m_len = sizeof(struct sctp_paramhdr);
3220 		ph = mtod(m, struct sctp_paramhdr *);
3221 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
3222 		ph->param_type = htons(err);
3223 	}
3224 	return (m);
3225 }
3226 
3227 static int
3228 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3229 {
3230 	struct mbuf *m;
3231 	/*
3232 	 * given a mbuf chain, look through it finding
3233 	 * the M_PKTHDR and return 1 if it belongs to
3234 	 * the association given. We tell this by
3235 	 * a kludge where we stuff the my_vtag of the asoc
3236 	 * into the m->m_pkthdr.csum_data/csum field.
3237 	 */
3238 	m = this;
3239 	while (m) {
3240 		if (m->m_flags & M_PKTHDR) {
3241 			/* check it */
3242 #if defined(__OpenBSD__)
3243 			if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3244 #else
3245 			if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3246 #endif
3247 			{
3248 				/* Yep */
3249 				return (1);
3250 			}
3251 		}
3252 		m = m->m_next;
3253 	}
3254 	return (0);
3255 }
3256 
3257 u_int32_t
3258 sctp_get_first_vtag_from_sb(struct socket *so)
3259 {
3260 	struct mbuf *this, *at;
3261 	u_int32_t retval;
3262 
3263 	retval = 0;
3264 	if (so->so_rcv.sb_mb) {
3265 		/* grubbing time */
3266 		this = so->so_rcv.sb_mb;
3267 		while (this) {
3268 			at = this;
3269 			/* get to the m_pkthdr */
3270 			while (at) {
3271 				if (at->m_flags & M_PKTHDR)
3272 					break;
3273 				else {
3274 					at = at->m_next;
3275 				}
3276 			}
3277 			/* now do we have a m_pkthdr */
3278 			if (at && (at->m_flags & M_PKTHDR)) {
3279 				/* check it */
3280 #if defined(__OpenBSD__)
3281 				if ((u_int32_t)at->m_pkthdr.csum != 0)
3282 #else
3283 				if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3284 #endif
3285 				{
3286 					/* its the one */
3287 #if defined(__OpenBSD__)
3288 					retval = (u_int32_t)at->m_pkthdr.csum;
3289 #else
3290 					retval =
3291 					    (u_int32_t)at->m_pkthdr.csum_data;
3292 #endif
3293 					break;
3294 				}
3295 			}
3296 			this = this->m_nextpkt;
3297 		}
3298 
3299 	}
3300 	return (retval);
3301 
3302 }
3303 void
3304 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3305     struct socket *new, struct sctp_tcb *stcb)
3306 {
3307 	struct mbuf **put, **take, *next, *this;
3308 	struct sockbuf *old_sb, *new_sb;
3309 	struct sctp_association *asoc;
3310 	int moved_top = 0;
3311 
3312 	asoc = &stcb->asoc;
3313 	old_sb = &old->so_rcv;
3314 	new_sb = &new->so_rcv;
3315 	if (old_sb->sb_mb == NULL) {
3316 		/* Nothing to move */
3317 		return;
3318 	}
3319 
3320 	if (inp->sctp_vtag_first == asoc->my_vtag) {
3321 		/* First one must be moved */
3322 		struct mbuf *mm;
3323 		for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
3324 			/*
3325 			 * Go down the chain and fix
3326 			 * the space allocation of the
3327 			 * two sockets.
3328 			 */
3329 			sbfree(old_sb, mm);
3330 			sballoc(new_sb, mm);
3331 		}
3332 		new_sb->sb_mb = old_sb->sb_mb;
3333 		old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
3334 		new_sb->sb_mb->m_nextpkt = NULL;
3335 		put = &new_sb->sb_mb->m_nextpkt;
3336 		moved_top = 1;
3337 	} else {
3338 		put = &new_sb->sb_mb;
3339 	}
3340 
3341 	take = &old_sb->sb_mb;
3342 	next = old_sb->sb_mb;
3343 	while (next) {
3344 		this = next;
3345 		/* postion for next one */
3346 		next = this->m_nextpkt;
3347 		/* check the tag of this packet */
3348 		if (sctp_should_be_moved(this, asoc)) {
3349 			/* yes this needs to be moved */
3350 			struct mbuf *mm;
3351 			*take = this->m_nextpkt;
3352 			this->m_nextpkt = NULL;
3353 			*put = this;
3354 			for (mm = this; mm; mm = mm->m_next) {
3355 				/*
3356 				 * Go down the chain and fix
3357 				 * the space allocation of the
3358 				 * two sockets.
3359 				 */
3360 				sbfree(old_sb, mm);
3361 				sballoc(new_sb, mm);
3362 			}
3363 			put = &this->m_nextpkt;
3364 
3365 		} else {
3366 			/* no advance our take point. */
3367 			take = &this->m_nextpkt;
3368 		}
3369 	}
3370 	if (moved_top) {
3371 		/*
3372 		 * Ok so now we must re-postion vtag_first to
3373 		 * match the new first one since we moved the
3374 		 * mbuf at the top.
3375 		 */
3376 		inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3377 	}
3378 }
3379 
3380 void
3381 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3382     struct sctp_tmit_chunk *tp1)
3383 {
3384 	if (tp1->data == NULL) {
3385 		return;
3386 	}
3387 #ifdef SCTP_MBCNT_LOGGING
3388 	sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3389 		       asoc->total_output_queue_size,
3390 		       tp1->book_size,
3391 		       asoc->total_output_mbuf_queue_size,
3392 		       tp1->mbcnt);
3393 #endif
3394 	if (asoc->total_output_queue_size >= tp1->book_size) {
3395 		asoc->total_output_queue_size -= tp1->book_size;
3396 	} else {
3397 		asoc->total_output_queue_size = 0;
3398 	}
3399 
3400 	/* Now free the mbuf */
3401 	if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3402 		asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3403 	} else {
3404 		asoc->total_output_mbuf_queue_size = 0;
3405 	}
3406 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3407 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3408 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3409 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3410 		} else {
3411 			stcb->sctp_socket->so_snd.sb_cc = 0;
3412 
3413 		}
3414 		if (stcb->sctp_socket->so_snd.sb_mbcnt >= tp1->mbcnt) {
3415 			stcb->sctp_socket->so_snd.sb_mbcnt -= tp1->mbcnt;
3416 		} else {
3417 			stcb->sctp_socket->so_snd.sb_mbcnt = 0;
3418 		}
3419 	}
3420 }
3421 
3422 int
3423 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3424     int reason, struct sctpchunk_listhead *queue)
3425 {
3426 	int ret_sz = 0;
3427 	int notdone;
3428 	uint8_t foundeom = 0;
3429 
3430 	do {
3431 		ret_sz += tp1->book_size;
3432 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
3433 		if (tp1->data) {
3434 			sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3435 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3436 			sctp_m_freem(tp1->data);
3437 			tp1->data = NULL;
3438 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3439 		}
3440 		if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3441 			stcb->asoc.sent_queue_cnt_removeable--;
3442 		}
3443 		if (queue == &stcb->asoc.send_queue) {
3444 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3445 			/* on to the sent queue */
3446 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3447 			    sctp_next);
3448 			stcb->asoc.sent_queue_cnt++;
3449 		}
3450 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3451 		    SCTP_DATA_NOT_FRAG) {
3452 			/* not frag'ed we ae done   */
3453 			notdone = 0;
3454 			foundeom = 1;
3455 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3456 			/* end of frag, we are done */
3457 			notdone = 0;
3458 			foundeom = 1;
3459 		} else {
3460 			/* Its a begin or middle piece, we must mark all of it */
3461 			notdone = 1;
3462 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3463 		}
3464 	} while (tp1 && notdone);
3465 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3466 		/*
3467 		 * The multi-part message was scattered
3468 		 * across the send and sent queue.
3469 		 */
3470 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3471 		/*
3472 		 * recurse throught the send_queue too, starting at the
3473 		 * beginning.
3474 		 */
3475 		if (tp1) {
3476 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3477 			    &stcb->asoc.send_queue);
3478 		} else {
3479 			printf("hmm, nothing on the send queue and no EOM?\n");
3480 		}
3481 	}
3482 	return (ret_sz);
3483 }
3484 
3485 /*
3486  * checks to see if the given address, sa, is one that is currently
3487  * known by the kernel
3488  * note: can't distinguish the same address on multiple interfaces and
3489  *       doesn't handle multiple addresses with different zone/scope id's
3490  * note: ifa_ifwithaddr() compares the entire sockaddr struct
3491  */
3492 struct ifaddr *
3493 sctp_find_ifa_by_addr(struct sockaddr *sa)
3494 {
3495 	struct ifnet *ifn;
3496 	struct ifaddr *ifa;
3497 	int s;
3498 
3499 	/* go through all our known interfaces */
3500 	s = pserialize_read_enter();
3501 	IFNET_READER_FOREACH(ifn) {
3502 		/* go through each interface addresses */
3503 		IFADDR_READER_FOREACH(ifa, ifn) {
3504 			/* correct family? */
3505 			if (ifa->ifa_addr->sa_family != sa->sa_family)
3506 				continue;
3507 
3508 #ifdef INET6
3509 			if (ifa->ifa_addr->sa_family == AF_INET6) {
3510 				/* IPv6 address */
3511 				struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3512 				sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3513 				if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3514 					/* create a copy and clear scope */
3515 					memcpy(&sin6_tmp, sin1,
3516 					    sizeof(struct sockaddr_in6));
3517 					sin1 = &sin6_tmp;
3518 					in6_clearscope(&sin1->sin6_addr);
3519 				}
3520 				sin2 = (struct sockaddr_in6 *)sa;
3521 				if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3522 					   sizeof(struct in6_addr)) == 0) {
3523 					/* found it */
3524 					pserialize_read_exit(s);
3525 					return (ifa);
3526 				}
3527 			} else
3528 #endif
3529 			if (ifa->ifa_addr->sa_family == AF_INET) {
3530 				/* IPv4 address */
3531 				struct sockaddr_in *sin1, *sin2;
3532 				sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3533 				sin2 = (struct sockaddr_in *)sa;
3534 				if (sin1->sin_addr.s_addr ==
3535 				    sin2->sin_addr.s_addr) {
3536 					/* found it */
3537 					pserialize_read_exit(s);
3538 					return (ifa);
3539 				}
3540 			}
3541 			/* else, not AF_INET or AF_INET6, so skip */
3542 		} /* end foreach ifa */
3543 	} /* end foreach ifn */
3544 	pserialize_read_exit(s);
3545 
3546 	/* not found! */
3547 	return (NULL);
3548 }
3549 
3550 
3551 #ifdef __APPLE__
3552 /*
3553  * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3554  * in the chain is a M_PKTHDR and the length is zero
3555  */
3556 static void
3557 sctp_pkthdr_fix(struct mbuf *m)
3558 {
3559 	struct mbuf *m_nxt;
3560 
3561 	if ((m->m_flags & M_PKTHDR) == 0) {
3562 		/* not a PKTHDR */
3563 		return;
3564 	}
3565 
3566 	if (m->m_len != 0) {
3567 		/* not a zero length PKTHDR mbuf */
3568 		return;
3569 	}
3570 
3571 	/* let's move in a word into the first mbuf... yes, ugly! */
3572 	m_nxt = m->m_next;
3573 	if (m_nxt == NULL) {
3574 		/* umm... not a very useful mbuf chain... */
3575 		return;
3576 	}
3577 	if ((size_t)m_nxt->m_len > sizeof(long)) {
3578 		/* move over a long */
3579 		bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
3580 		/* update mbuf data pointers and lengths */
3581 		m->m_len += sizeof(long);
3582 		m_nxt->m_data += sizeof(long);
3583 		m_nxt->m_len -= sizeof(long);
3584 	}
3585 }
3586 
3587 inline struct mbuf *
3588 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3589 {
3590 	sctp_pkthdr_fix(m);
3591 	return (m_copym(m, off, len, wait));
3592 }
3593 #endif /* __APPLE__ */
3594