xref: /netbsd-src/sys/netinet/sctputil.c (revision 95b39c65ca575fb40c6bb7083e0eb7ec28eabef1)
1 /*	$KAME: sctputil.c,v 1.39 2005/06/16 20:54:06 jinmei Exp $	*/
2 /*	$NetBSD: sctputil.c,v 1.1 2015/10/13 21:28:35 rjs Exp $	*/
3 
4 /*
5  * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Cisco Systems, Inc.
19  * 4. Neither the name of the project nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: sctputil.c,v 1.1 2015/10/13 21:28:35 rjs Exp $");
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42 #include "opt_sctp.h"
43 #endif /* _KERNEL_OPT */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/kernel.h>
56 #include <sys/sysctl.h>
57 
58 #include <sys/callout.h>
59 
60 #include <net/radix.h>
61 #include <net/route.h>
62 
63 #ifdef INET6
64 #include <sys/domain.h>
65 #endif
66 
67 #include <machine/limits.h>
68 
69 #include <net/if.h>
70 #include <net/if_types.h>
71 #include <net/route.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip_var.h>
79 
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/scope6_var.h>
84 #include <netinet6/in6_pcb.h>
85 
86 #endif /* INET6 */
87 
88 #include <netinet/sctp_pcb.h>
89 
90 #ifdef IPSEC
91 #include <netinet6/ipsec.h>
92 #include <netkey/key.h>
93 #endif /* IPSEC */
94 
95 #include <netinet/sctputil.h>
96 #include <netinet/sctp_var.h>
97 #ifdef INET6
98 #include <netinet6/sctp6_var.h>
99 #endif
100 #include <netinet/sctp_header.h>
101 #include <netinet/sctp_output.h>
102 #include <netinet/sctp_hashdriver.h>
103 #include <netinet/sctp_uio.h>
104 #include <netinet/sctp_timer.h>
105 #include <netinet/sctp_crc32.h>
106 #include <netinet/sctp_indata.h>	/* for sctp_deliver_data() */
107 #define NUMBER_OF_MTU_SIZES 18
108 
109 #ifdef SCTP_DEBUG
110 extern u_int32_t sctp_debug_on;
111 #endif
112 
113 #ifdef SCTP_STAT_LOGGING
114 int sctp_cwnd_log_at=0;
115 int sctp_cwnd_log_rolled=0;
116 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
117 
118 void sctp_clr_stat_log(void)
119 {
120 	sctp_cwnd_log_at=0;
121 	sctp_cwnd_log_rolled=0;
122 }
123 
124 void
125 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
126 {
127 
128 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
129 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
130 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
131 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
132 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
133 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
134 	sctp_cwnd_log_at++;
135 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
136 		sctp_cwnd_log_at = 0;
137 		sctp_cwnd_log_rolled = 1;
138 	}
139 
140 }
141 
142 void
143 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
144 {
145 
146 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
147 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
148 	sctp_clog[sctp_cwnd_log_at].x.map.base = map;
149 	sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
150 	sctp_clog[sctp_cwnd_log_at].x.map.high = high;
151 	sctp_cwnd_log_at++;
152 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
153 		sctp_cwnd_log_at = 0;
154 		sctp_cwnd_log_rolled = 1;
155 	}
156 }
157 
158 void
159 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
160     int from)
161 {
162 
163 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
164 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
165 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
166 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
167 	sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
168 	sctp_cwnd_log_at++;
169 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
170 		sctp_cwnd_log_at = 0;
171 		sctp_cwnd_log_rolled = 1;
172 	}
173 }
174 
175 void
176 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
177     int from)
178 {
179 
180 	if (chk == NULL) {
181 		printf("Gak log of NULL?\n");
182 		return;
183 	}
184 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
185 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
186 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
187 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
188 	if (poschk != NULL) {
189 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
190 		    poschk->rec.data.TSN_seq;
191 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
192 		    poschk->rec.data.stream_seq;
193 	} else {
194 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
195 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
196 	}
197 	sctp_cwnd_log_at++;
198 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
199 		sctp_cwnd_log_at = 0;
200 		sctp_cwnd_log_rolled = 1;
201 	}
202 }
203 
204 void
205 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
206 {
207 
208 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
209 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
210 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
211 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
212 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
213 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
214 	sctp_cwnd_log_at++;
215 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
216 		sctp_cwnd_log_at = 0;
217 		sctp_cwnd_log_rolled = 1;
218 	}
219 }
220 
221 void
222 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
223 {
224 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
225 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
226 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
227 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
228 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
229 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
230 	sctp_cwnd_log_at++;
231 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
232 		sctp_cwnd_log_at = 0;
233 		sctp_cwnd_log_rolled = 1;
234 	}
235 }
236 
237 void
238 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
239 {
240 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
241 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
242 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
243 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
244 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
245 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
246 	sctp_cwnd_log_at++;
247 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
248 		sctp_cwnd_log_at = 0;
249 		sctp_cwnd_log_rolled = 1;
250 	}
251 }
252 
253 void
254 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
255 {
256 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
257 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
258 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
259 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
260 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
261 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
262 	sctp_cwnd_log_at++;
263 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
264 		sctp_cwnd_log_at = 0;
265 		sctp_cwnd_log_rolled = 1;
266 	}
267 }
268 
269 void
270 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
271 {
272 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
273 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
274 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
275 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change  = book;
276 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
277 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
278 	sctp_cwnd_log_at++;
279 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
280 		sctp_cwnd_log_at = 0;
281 		sctp_cwnd_log_rolled = 1;
282 	}
283 }
284 
285 void
286 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
287 {
288 
289 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
290 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
291 	sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.sb_mbmax/1024);
292 	sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
293 	sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.sb_hiwat/1024);
294 	sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
295 	sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
296 	sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
297 	sctp_cwnd_log_at++;
298 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
299 		sctp_cwnd_log_at = 0;
300 		sctp_cwnd_log_rolled = 1;
301 	}
302 }
303 
304 int
305 sctp_fill_stat_log(struct mbuf *m)
306 {
307 	struct sctp_cwnd_log_req *req;
308 	int size_limit, num, i, at, cnt_out=0;
309 
310 	if (m == NULL)
311 		return (EINVAL);
312 
313 	size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
314 	if (size_limit < sizeof(struct sctp_cwnd_log)) {
315 		return (EINVAL);
316 	}
317 	req = mtod(m, struct sctp_cwnd_log_req *);
318 	num = size_limit/sizeof(struct sctp_cwnd_log);
319 	if (sctp_cwnd_log_rolled) {
320 		req->num_in_log = SCTP_STAT_LOG_SIZE;
321 	} else {
322 		req->num_in_log = sctp_cwnd_log_at;
323 		/* if the log has not rolled, we don't
324 		 * let you have old data.
325 		 */
326  		if (req->end_at > sctp_cwnd_log_at) {
327 			req->end_at = sctp_cwnd_log_at;
328 		}
329 	}
330 	if ((num < SCTP_STAT_LOG_SIZE) &&
331 	    ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
332 		/* we can't return all of it */
333 		if (((req->start_at == 0) && (req->end_at == 0)) ||
334 		    (req->start_at >= SCTP_STAT_LOG_SIZE) ||
335 		    (req->end_at >= SCTP_STAT_LOG_SIZE)) {
336 			/* No user request or user is wacked. */
337 			req->num_ret = num;
338 			req->end_at = sctp_cwnd_log_at - 1;
339 			if ((sctp_cwnd_log_at - num) < 0) {
340 				int cc;
341 				cc = num - sctp_cwnd_log_at;
342 				req->start_at = SCTP_STAT_LOG_SIZE - cc;
343 			} else {
344 				req->start_at = sctp_cwnd_log_at - num;
345 			}
346 		} else {
347 			/* a user request */
348 			int cc;
349 			if (req->start_at > req->end_at) {
350 				cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
351 				    (req->end_at + 1);
352 			} else {
353 
354 				cc = req->end_at - req->start_at;
355 			}
356 			if (cc < num) {
357 				num = cc;
358 			}
359 			req->num_ret = num;
360 		}
361 	} else {
362 		/* We can return all  of it */
363 		req->start_at = 0;
364 		req->end_at = sctp_cwnd_log_at - 1;
365 		req->num_ret = sctp_cwnd_log_at;
366 	}
367 	for (i = 0, at = req->start_at; i < req->num_ret; i++) {
368 		req->log[i] = sctp_clog[at];
369 		cnt_out++;
370 		at++;
371 		if (at >= SCTP_STAT_LOG_SIZE)
372 			at = 0;
373 	}
374 	m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
375 	return (0);
376 }
377 
378 #endif
379 
380 #ifdef SCTP_AUDITING_ENABLED
381 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
382 static int sctp_audit_indx = 0;
383 
384 static
385 void sctp_print_audit_report(void)
386 {
387 	int i;
388 	int cnt;
389 	cnt = 0;
390 	for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
391 		if ((sctp_audit_data[i][0] == 0xe0) &&
392 		    (sctp_audit_data[i][1] == 0x01)) {
393 			cnt = 0;
394 			printf("\n");
395 		} else if (sctp_audit_data[i][0] == 0xf0) {
396 			cnt = 0;
397 			printf("\n");
398 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
399 		    (sctp_audit_data[i][1] == 0x01)) {
400 			printf("\n");
401 			cnt = 0;
402 		}
403 		printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
404 		    (uint32_t)sctp_audit_data[i][1]);
405 		cnt++;
406 		if ((cnt % 14) == 0)
407 			printf("\n");
408 	}
409 	for (i=0;i<sctp_audit_indx;i++) {
410 		if ((sctp_audit_data[i][0] == 0xe0) &&
411 		    (sctp_audit_data[i][1] == 0x01)) {
412 			cnt = 0;
413 			printf("\n");
414 		} else if (sctp_audit_data[i][0] == 0xf0) {
415 			cnt = 0;
416 			printf("\n");
417 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
418 			 (sctp_audit_data[i][1] == 0x01)) {
419 			printf("\n");
420 			cnt = 0;
421 		}
422 		printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
423 		    (uint32_t)sctp_audit_data[i][1]);
424 		cnt++;
425 		if ((cnt % 14) == 0)
426 			printf("\n");
427 	}
428 	printf("\n");
429 }
430 
431 void sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
432     struct sctp_nets *net)
433 {
434 	int resend_cnt, tot_out, rep, tot_book_cnt;
435 	struct sctp_nets *lnet;
436 	struct sctp_tmit_chunk *chk;
437 
438 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
439 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
440 	sctp_audit_indx++;
441 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
442 		sctp_audit_indx = 0;
443 	}
444 	if (inp == NULL) {
445 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
446 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
447 		sctp_audit_indx++;
448 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
449 			sctp_audit_indx = 0;
450 		}
451 		return;
452 	}
453 	if (stcb == NULL) {
454 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
455 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
456 		sctp_audit_indx++;
457 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
458 			sctp_audit_indx = 0;
459 		}
460 		return;
461 	}
462 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
463 	sctp_audit_data[sctp_audit_indx][1] =
464 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
465 	sctp_audit_indx++;
466 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
467 		sctp_audit_indx = 0;
468 	}
469 	rep = 0;
470 	tot_book_cnt = 0;
471 	resend_cnt = tot_out = 0;
472 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
473 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
474 			resend_cnt++;
475 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
476 			tot_out += chk->book_size;
477 			tot_book_cnt++;
478 		}
479 	}
480 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
481 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
482 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
483 		sctp_audit_indx++;
484 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
485 			sctp_audit_indx = 0;
486 		}
487 		printf("resend_cnt:%d asoc-tot:%d\n",
488 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
489 		rep = 1;
490 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
491 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
492 		sctp_audit_data[sctp_audit_indx][1] =
493 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
494 		sctp_audit_indx++;
495 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
496 			sctp_audit_indx = 0;
497 		}
498 	}
499 	if (tot_out != stcb->asoc.total_flight) {
500 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
501 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
502 		sctp_audit_indx++;
503 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
504 			sctp_audit_indx = 0;
505 		}
506 		rep = 1;
507 		printf("tot_flt:%d asoc_tot:%d\n", tot_out,
508 		    (int)stcb->asoc.total_flight);
509 		stcb->asoc.total_flight = tot_out;
510 	}
511 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
512 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
513 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
514 		sctp_audit_indx++;
515 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
516 			sctp_audit_indx = 0;
517 		}
518 		rep = 1;
519 		printf("tot_flt_book:%d\n", tot_book);
520 
521 		stcb->asoc.total_flight_count = tot_book_cnt;
522 	}
523 	tot_out = 0;
524 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
525 		tot_out += lnet->flight_size;
526 	}
527 	if (tot_out != stcb->asoc.total_flight) {
528 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
529 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
530 		sctp_audit_indx++;
531 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
532 			sctp_audit_indx = 0;
533 		}
534 		rep = 1;
535 		printf("real flight:%d net total was %d\n",
536 		    stcb->asoc.total_flight, tot_out);
537 		/* now corrective action */
538 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
539 			tot_out = 0;
540 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
541 				if ((chk->whoTo == lnet) &&
542 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
543 					tot_out += chk->book_size;
544 				}
545 			}
546 			if (lnet->flight_size != tot_out) {
547 				printf("net:%x flight was %d corrected to %d\n",
548 				    (uint32_t)lnet, lnet->flight_size, tot_out);
549 				lnet->flight_size = tot_out;
550 			}
551 
552 		}
553 	}
554 
555 	if (rep) {
556 		sctp_print_audit_report();
557 	}
558 }
559 
560 void
561 sctp_audit_log(u_int8_t ev, u_int8_t fd)
562 {
563 	sctp_audit_data[sctp_audit_indx][0] = ev;
564 	sctp_audit_data[sctp_audit_indx][1] = fd;
565 	sctp_audit_indx++;
566 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
567 		sctp_audit_indx = 0;
568 	}
569 }
570 
571 #endif
572 
573 /*
574  * a list of sizes based on typical mtu's, used only if next hop
575  * size not returned.
576  */
577 static int sctp_mtu_sizes[] = {
578 	68,
579 	296,
580 	508,
581 	512,
582 	544,
583 	576,
584 	1006,
585 	1492,
586 	1500,
587 	1536,
588 	2002,
589 	2048,
590 	4352,
591 	4464,
592 	8166,
593 	17914,
594 	32000,
595 	65535
596 };
597 
598 int
599 find_next_best_mtu(int totsz)
600 {
601 	int i, perfer;
602 	/*
603 	 * if we are in here we must find the next best fit based on the
604 	 * size of the dg that failed to be sent.
605 	 */
606 	perfer = 0;
607 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
608 		if (totsz < sctp_mtu_sizes[i]) {
609 			perfer = i - 1;
610 			if (perfer < 0)
611 				perfer = 0;
612 			break;
613 		}
614 	}
615 	return (sctp_mtu_sizes[perfer]);
616 }
617 
618 void
619 sctp_fill_random_store(struct sctp_pcb *m)
620 {
621 	/*
622 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
623 	 * and our counter. The result becomes our good random numbers and
624 	 * we then setup to give these out. Note that we do no lockig
625 	 * to protect this. This is ok, since if competing folks call
626 	 * this we will get more gobbled gook in the random store whic
627 	 * is what we want. There is a danger that two guys will use
628 	 * the same random numbers, but thats ok too since that
629 	 * is random as well :->
630 	 */
631 	m->store_at = 0;
632 	sctp_hash_digest((char *)m->random_numbers, sizeof(m->random_numbers),
633 			 (char *)&m->random_counter, sizeof(m->random_counter),
634 			 (char *)m->random_store);
635 	m->random_counter++;
636 }
637 
638 uint32_t
639 sctp_select_initial_TSN(struct sctp_pcb *m)
640 {
641 	/*
642 	 * A true implementation should use random selection process to
643 	 * get the initial stream sequence number, using RFC1750 as a
644 	 * good guideline
645 	 */
646 	u_long x, *xp;
647 	uint8_t *p;
648 
649 	if (m->initial_sequence_debug != 0) {
650 		u_int32_t ret;
651 		ret = m->initial_sequence_debug;
652 		m->initial_sequence_debug++;
653 		return (ret);
654 	}
655 	if ((m->store_at+sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
656 		/* Refill the random store */
657 		sctp_fill_random_store(m);
658 	}
659 	p = &m->random_store[(int)m->store_at];
660 	xp = (u_long *)p;
661 	x = *xp;
662 	m->store_at += sizeof(u_long);
663 	return (x);
664 }
665 
666 u_int32_t sctp_select_a_tag(struct sctp_inpcb *m)
667 {
668 	u_long x, not_done;
669 	struct timeval now;
670 
671 	SCTP_GETTIME_TIMEVAL(&now);
672 	not_done = 1;
673 	while (not_done) {
674 		x = sctp_select_initial_TSN(&m->sctp_ep);
675 		if (x == 0) {
676 			/* we never use 0 */
677 			continue;
678 		}
679 		if (sctp_is_vtag_good(m, x, &now)) {
680 			not_done = 0;
681 		}
682 	}
683 	return (x);
684 }
685 
686 
687 int
688 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
689 	       int for_a_init, uint32_t override_tag )
690 {
691 	/*
692 	 * Anything set to zero is taken care of by the allocation
693 	 * routine's bzero
694 	 */
695 
696 	/*
697 	 * Up front select what scoping to apply on addresses I tell my peer
698 	 * Not sure what to do with these right now, we will need to come up
699 	 * with a way to set them. We may need to pass them through from the
700 	 * caller in the sctp_aloc_assoc() function.
701 	 */
702 	int i;
703 	/* init all variables to a known value.*/
704 	asoc->state = SCTP_STATE_INUSE;
705 	asoc->max_burst = m->sctp_ep.max_burst;
706 	asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
707 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
708 
709 	if (override_tag) {
710 		asoc->my_vtag = override_tag;
711 	} else {
712 		asoc->my_vtag = sctp_select_a_tag(m);
713 	}
714 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
715 		sctp_select_initial_TSN(&m->sctp_ep);
716 	asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
717 	/* we are opptimisitic here */
718 	asoc->peer_supports_asconf = 1;
719 	asoc->peer_supports_asconf_setprim = 1;
720 	asoc->peer_supports_pktdrop = 1;
721 
722 	asoc->sent_queue_retran_cnt = 0;
723 	/* This will need to be adjusted */
724 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
725 	asoc->last_acked_seq = asoc->init_seq_number - 1;
726 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
727 	asoc->asconf_seq_in = asoc->last_acked_seq;
728 
729 	/* here we are different, we hold the next one we expect */
730 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
731 
732 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
733 	asoc->initial_rto = m->sctp_ep.initial_rto;
734 
735 	asoc->max_init_times = m->sctp_ep.max_init_times;
736 	asoc->max_send_times = m->sctp_ep.max_send_times;
737 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
738 
739 	/* ECN Nonce initialization */
740 	asoc->ecn_nonce_allowed = 0;
741 	asoc->receiver_nonce_sum = 1;
742 	asoc->nonce_sum_expect_base = 1;
743 	asoc->nonce_sum_check = 1;
744 	asoc->nonce_resync_tsn = 0;
745 	asoc->nonce_wait_for_ecne = 0;
746 	asoc->nonce_wait_tsn = 0;
747 
748 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
749 		struct in6pcb *inp6;
750 
751 
752 		/* Its a V6 socket */
753 		inp6 = (struct in6pcb *)m;
754 		asoc->ipv6_addr_legal = 1;
755 		/* Now look at the binding flag to see if V4 will be legal */
756 	if (
757 #if defined(__OpenBSD__)
758 		(0) /* we always do dual bind */
759 #elif defined (__NetBSD__)
760 		(inp6->in6p_flags & IN6P_IPV6_V6ONLY)
761 #else
762 		(inp6->inp_flags & IN6P_IPV6_V6ONLY)
763 #endif
764 	     == 0) {
765 			asoc->ipv4_addr_legal = 1;
766 		} else {
767 			/* V4 addresses are NOT legal on the association */
768 			asoc->ipv4_addr_legal = 0;
769 		}
770 	} else {
771 		/* Its a V4 socket, no - V6 */
772 		asoc->ipv4_addr_legal = 1;
773 		asoc->ipv6_addr_legal = 0;
774 	}
775 
776 
777 	asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
778 	asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
779 
780 	asoc->smallest_mtu = m->sctp_frag_point;
781 	asoc->minrto = m->sctp_ep.sctp_minrto;
782 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
783 
784 	LIST_INIT(&asoc->sctp_local_addr_list);
785 	TAILQ_INIT(&asoc->nets);
786 	TAILQ_INIT(&asoc->pending_reply_queue);
787 	asoc->last_asconf_ack_sent = NULL;
788 	/* Setup to fill the hb random cache at first HB */
789 	asoc->hb_random_idx = 4;
790 
791 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
792 
793 	/*
794 	 * Now the stream parameters, here we allocate space for all
795 	 * streams that we request by default.
796 	 */
797 	asoc->streamoutcnt = asoc->pre_open_streams =
798 	    m->sctp_ep.pre_open_stream_count;
799 	asoc->strmout = malloc(asoc->streamoutcnt *
800 	    sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
801 	if (asoc->strmout == NULL) {
802 		/* big trouble no memory */
803 		return (ENOMEM);
804 	}
805 	for (i = 0; i < asoc->streamoutcnt; i++) {
806 		/*
807 		 * inbound side must be set to 0xffff,
808 		 * also NOTE when we get the INIT-ACK back (for INIT sender)
809 		 * we MUST reduce the count (streamoutcnt) but first check
810 		 * if we sent to any of the upper streams that were dropped
811 		 * (if some were). Those that were dropped must be notified
812 		 * to the upper layer as failed to send.
813 		 */
814 		asoc->strmout[i].next_sequence_sent = 0x0;
815 		TAILQ_INIT(&asoc->strmout[i].outqueue);
816 		asoc->strmout[i].stream_no = i;
817 		asoc->strmout[i].next_spoke.tqe_next = 0;
818 		asoc->strmout[i].next_spoke.tqe_prev = 0;
819 	}
820 	/* Now the mapping array */
821 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
822 	asoc->mapping_array = malloc(asoc->mapping_array_size,
823 	       M_PCB, M_NOWAIT);
824 	if (asoc->mapping_array == NULL) {
825 		free(asoc->strmout, M_PCB);
826 		return (ENOMEM);
827 	}
828 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
829 	/* Now the init of the other outqueues */
830 	TAILQ_INIT(&asoc->out_wheel);
831 	TAILQ_INIT(&asoc->control_send_queue);
832 	TAILQ_INIT(&asoc->send_queue);
833 	TAILQ_INIT(&asoc->sent_queue);
834 	TAILQ_INIT(&asoc->reasmqueue);
835 	TAILQ_INIT(&asoc->delivery_queue);
836 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
837 
838 	TAILQ_INIT(&asoc->asconf_queue);
839 	return (0);
840 }
841 
842 int
843 sctp_expand_mapping_array(struct sctp_association *asoc)
844 {
845 	/* mapping array needs to grow */
846 	u_int8_t *new_array;
847 	uint16_t new_size;
848 
849 	new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
850 	new_array = malloc(new_size, M_PCB, M_NOWAIT);
851 	if (new_array == NULL) {
852 		/* can't get more, forget it */
853 		printf("No memory for expansion of SCTP mapping array %d\n",
854 		       new_size);
855 		return (-1);
856 	}
857 	memset(new_array, 0, new_size);
858 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
859 	free(asoc->mapping_array, M_PCB);
860 	asoc->mapping_array = new_array;
861 	asoc->mapping_array_size = new_size;
862 	return (0);
863 }
864 
865 static void
866 sctp_timeout_handler(void *t)
867 {
868 	struct sctp_inpcb *inp;
869 	struct sctp_tcb *stcb;
870 	struct sctp_nets *net;
871 	struct sctp_timer *tmr;
872 	int did_output;
873 
874 	mutex_enter(softnet_lock);
875 	tmr = (struct sctp_timer *)t;
876 	inp = (struct sctp_inpcb *)tmr->ep;
877 	stcb = (struct sctp_tcb *)tmr->tcb;
878 	net = (struct sctp_nets *)tmr->net;
879 	did_output = 1;
880 
881 #ifdef SCTP_AUDITING_ENABLED
882 	sctp_audit_log(0xF0, (u_int8_t)tmr->type);
883 	sctp_auditing(3, inp, stcb, net);
884 #endif
885 	sctp_pegs[SCTP_TIMERS_EXP]++;
886 
887 	if (inp == NULL) {
888 		return;
889 	}
890 
891 	SCTP_INP_WLOCK(inp);
892 	if (inp->sctp_socket == 0) {
893 		mutex_exit(softnet_lock);
894 		SCTP_INP_WUNLOCK(inp);
895 		return;
896 	}
897 	if (stcb) {
898 		if (stcb->asoc.state == 0) {
899 			mutex_exit(softnet_lock);
900 			SCTP_INP_WUNLOCK(inp);
901 			return;
902 		}
903 	}
904 #ifdef SCTP_DEBUG
905 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
906 		printf("Timer type %d goes off\n", tmr->type);
907 	}
908 #endif /* SCTP_DEBUG */
909 #ifndef __NetBSD__
910 	if (!callout_active(&tmr->timer)) {
911 		SCTP_INP_WUNLOCK(inp);
912 		return;
913 	}
914 #endif
915 	if (stcb) {
916 		SCTP_TCB_LOCK(stcb);
917 	}
918 	SCTP_INP_INCR_REF(inp);
919 	SCTP_INP_WUNLOCK(inp);
920 
921 	switch (tmr->type) {
922 	case SCTP_TIMER_TYPE_ITERATOR:
923 	{
924 		struct sctp_iterator *it;
925 		it = (struct sctp_iterator *)inp;
926 		sctp_iterator_timer(it);
927 	}
928 	break;
929 	/* call the handler for the appropriate timer type */
930 	case SCTP_TIMER_TYPE_SEND:
931 		sctp_pegs[SCTP_TMIT_TIMER]++;
932 		stcb->asoc.num_send_timers_up--;
933 		if (stcb->asoc.num_send_timers_up < 0) {
934 			stcb->asoc.num_send_timers_up = 0;
935 		}
936 		if (sctp_t3rxt_timer(inp, stcb, net)) {
937 			/* no need to unlock on tcb its gone */
938 
939 			goto out_decr;
940 		}
941 #ifdef SCTP_AUDITING_ENABLED
942 		sctp_auditing(4, inp, stcb, net);
943 #endif
944 		sctp_chunk_output(inp, stcb, 1);
945 		if ((stcb->asoc.num_send_timers_up == 0) &&
946 		    (stcb->asoc.sent_queue_cnt > 0)
947 			) {
948 			struct sctp_tmit_chunk *chk;
949 			/*
950 			 * safeguard. If there on some on the sent queue
951 			 * somewhere but no timers running something is
952 			 * wrong... so we start a timer on the first chunk
953 			 * on the send queue on whatever net it is sent to.
954 			 */
955 			sctp_pegs[SCTP_T3_SAFEGRD]++;
956 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
957 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
958 					 chk->whoTo);
959 		}
960 		break;
961 	case SCTP_TIMER_TYPE_INIT:
962 		if (sctp_t1init_timer(inp, stcb, net)) {
963 			/* no need to unlock on tcb its gone */
964 			goto out_decr;
965 		}
966 		/* We do output but not here */
967 		did_output = 0;
968 		break;
969 	case SCTP_TIMER_TYPE_RECV:
970 		sctp_pegs[SCTP_RECV_TIMER]++;
971 		sctp_send_sack(stcb);
972 #ifdef SCTP_AUDITING_ENABLED
973 		sctp_auditing(4, inp, stcb, net);
974 #endif
975 		sctp_chunk_output(inp, stcb, 4);
976 		break;
977 	case SCTP_TIMER_TYPE_SHUTDOWN:
978 		if (sctp_shutdown_timer(inp, stcb, net) ) {
979 			/* no need to unlock on tcb its gone */
980 			goto out_decr;
981 		}
982 #ifdef SCTP_AUDITING_ENABLED
983 		sctp_auditing(4, inp, stcb, net);
984 #endif
985 		sctp_chunk_output(inp, stcb, 5);
986 		break;
987 	case SCTP_TIMER_TYPE_HEARTBEAT:
988 		if (sctp_heartbeat_timer(inp, stcb, net)) {
989 			/* no need to unlock on tcb its gone */
990 			goto out_decr;
991 		}
992 #ifdef SCTP_AUDITING_ENABLED
993 		sctp_auditing(4, inp, stcb, net);
994 #endif
995 		sctp_chunk_output(inp, stcb, 6);
996 		break;
997 	case SCTP_TIMER_TYPE_COOKIE:
998 		if (sctp_cookie_timer(inp, stcb, net)) {
999 			/* no need to unlock on tcb its gone */
1000 			goto out_decr;
1001 		}
1002 #ifdef SCTP_AUDITING_ENABLED
1003 		sctp_auditing(4, inp, stcb, net);
1004 #endif
1005 		sctp_chunk_output(inp, stcb, 1);
1006 		break;
1007 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1008 	{
1009 		struct timeval tv;
1010 		int i, secret;
1011 		SCTP_GETTIME_TIMEVAL(&tv);
1012 		SCTP_INP_WLOCK(inp);
1013 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1014 		inp->sctp_ep.last_secret_number =
1015 			inp->sctp_ep.current_secret_number;
1016 		inp->sctp_ep.current_secret_number++;
1017 		if (inp->sctp_ep.current_secret_number >=
1018 		    SCTP_HOW_MANY_SECRETS) {
1019 			inp->sctp_ep.current_secret_number = 0;
1020 		}
1021 		secret = (int)inp->sctp_ep.current_secret_number;
1022 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1023 			inp->sctp_ep.secret_key[secret][i] =
1024 				sctp_select_initial_TSN(&inp->sctp_ep);
1025 		}
1026 		SCTP_INP_WUNLOCK(inp);
1027 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1028 	}
1029 	did_output = 0;
1030 	break;
1031 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1032 		sctp_pathmtu_timer(inp, stcb, net);
1033 		did_output = 0;
1034 		break;
1035 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1036 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1037 			/* no need to unlock on tcb its gone */
1038 			goto out_decr;
1039 		}
1040 #ifdef SCTP_AUDITING_ENABLED
1041 		sctp_auditing(4, inp, stcb, net);
1042 #endif
1043 		sctp_chunk_output(inp, stcb, 7);
1044 		break;
1045 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1046 		sctp_abort_an_association(inp, stcb,
1047 					  SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1048 		/* no need to unlock on tcb its gone */
1049 		goto out_decr;
1050 		break;
1051 
1052 	case SCTP_TIMER_TYPE_STRRESET:
1053 		if (sctp_strreset_timer(inp, stcb, net)) {
1054 			/* no need to unlock on tcb its gone */
1055 			goto out_decr;
1056 		}
1057 		sctp_chunk_output(inp, stcb, 9);
1058 		break;
1059 
1060 	case SCTP_TIMER_TYPE_ASCONF:
1061 		if (sctp_asconf_timer(inp, stcb, net)) {
1062 			/* no need to unlock on tcb its gone */
1063 			goto out_decr;
1064 		}
1065 #ifdef SCTP_AUDITING_ENABLED
1066 		sctp_auditing(4, inp, stcb, net);
1067 #endif
1068 		sctp_chunk_output(inp, stcb, 8);
1069 		break;
1070 
1071 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1072 		sctp_autoclose_timer(inp, stcb, net);
1073 		sctp_chunk_output(inp, stcb, 10);
1074 		did_output = 0;
1075 		break;
1076 	case SCTP_TIMER_TYPE_INPKILL:
1077 		/* special case, take away our
1078 		 * increment since WE are the killer
1079 		 */
1080 		SCTP_INP_WLOCK(inp);
1081 		SCTP_INP_DECR_REF(inp);
1082 		SCTP_INP_WUNLOCK(inp);
1083 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1084 		sctp_inpcb_free(inp, 1);
1085 		goto out_no_decr;
1086 		break;
1087 	default:
1088 #ifdef SCTP_DEBUG
1089 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1090 			printf("sctp_timeout_handler:unknown timer %d\n",
1091 			       tmr->type);
1092 		}
1093 #endif /* SCTP_DEBUG */
1094 		break;
1095 	};
1096 #ifdef SCTP_AUDITING_ENABLED
1097 	sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1098 	sctp_auditing(5, inp, stcb, net);
1099 #endif
1100 	if (did_output) {
1101 		/*
1102 		 * Now we need to clean up the control chunk chain if an
1103 		 * ECNE is on it. It must be marked as UNSENT again so next
1104 		 * call will continue to send it until such time that we get
1105 		 * a CWR, to remove it. It is, however, less likely that we
1106 		 * will find a ecn echo on the chain though.
1107 		 */
1108 		sctp_fix_ecn_echo(&stcb->asoc);
1109 	}
1110 	if (stcb) {
1111 		SCTP_TCB_UNLOCK(stcb);
1112 	}
1113  out_decr:
1114 	SCTP_INP_WLOCK(inp);
1115 	SCTP_INP_DECR_REF(inp);
1116 	SCTP_INP_WUNLOCK(inp);
1117 
1118  out_no_decr:
1119 
1120 	mutex_exit(softnet_lock);
1121 }
1122 
1123 int
1124 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1125     struct sctp_nets *net)
1126 {
1127 	int to_ticks;
1128 	struct sctp_timer *tmr;
1129 
1130 	if (inp == NULL)
1131 		return (EFAULT);
1132 
1133 	to_ticks = 0;
1134 
1135 	tmr = NULL;
1136 	switch (t_type) {
1137 	case SCTP_TIMER_TYPE_ITERATOR:
1138 	{
1139 		struct sctp_iterator *it;
1140 		it = (struct sctp_iterator *)inp;
1141 		tmr = &it->tmr;
1142 		to_ticks = SCTP_ITERATOR_TICKS;
1143 	}
1144 	break;
1145 	case SCTP_TIMER_TYPE_SEND:
1146 		/* Here we use the RTO timer */
1147 	{
1148 		int rto_val;
1149 		if ((stcb == NULL) || (net == NULL)) {
1150 			return (EFAULT);
1151 		}
1152 		tmr = &net->rxt_timer;
1153 		if (net->RTO == 0) {
1154 			rto_val = stcb->asoc.initial_rto;
1155 		} else {
1156 			rto_val = net->RTO;
1157 		}
1158 		to_ticks = MSEC_TO_TICKS(rto_val);
1159 	}
1160 	break;
1161 	case SCTP_TIMER_TYPE_INIT:
1162 		/*
1163 		 * Here we use the INIT timer default
1164 		 * usually about 1 minute.
1165 		 */
1166 		if ((stcb == NULL) || (net == NULL)) {
1167 			return (EFAULT);
1168 		}
1169 		tmr = &net->rxt_timer;
1170 		if (net->RTO == 0) {
1171 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1172 		} else {
1173 			to_ticks = MSEC_TO_TICKS(net->RTO);
1174 		}
1175 		break;
1176 	case SCTP_TIMER_TYPE_RECV:
1177 		/*
1178 		 * Here we use the Delayed-Ack timer value from the inp
1179 		 * ususually about 200ms.
1180 		 */
1181 		if (stcb == NULL) {
1182 			return (EFAULT);
1183 		}
1184 		tmr = &stcb->asoc.dack_timer;
1185 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1186 		break;
1187 	case SCTP_TIMER_TYPE_SHUTDOWN:
1188 		/* Here we use the RTO of the destination. */
1189 		if ((stcb == NULL) || (net == NULL)) {
1190 			return (EFAULT);
1191 		}
1192 
1193 		if (net->RTO == 0) {
1194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1195 		} else {
1196 			to_ticks = MSEC_TO_TICKS(net->RTO);
1197 		}
1198 		tmr = &net->rxt_timer;
1199 		break;
1200 	case SCTP_TIMER_TYPE_HEARTBEAT:
1201 		/*
1202 		 * the net is used here so that we can add in the RTO.
1203 		 * Even though we use a different timer. We also add the
1204 		 * HB timer PLUS a random jitter.
1205 		 */
1206 		if (stcb == NULL) {
1207 			return (EFAULT);
1208 		}
1209 		{
1210 			uint32_t rndval;
1211 			uint8_t this_random;
1212 			int cnt_of_unconf=0;
1213 			struct sctp_nets *lnet;
1214 
1215 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1216 				if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1217 					cnt_of_unconf++;
1218 				}
1219 			}
1220 #ifdef SCTP_DEBUG
1221 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1222 				printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1223 				       cnt_of_unconf, stcb->asoc.heart_beat_delay);
1224 			}
1225 #endif
1226 			if (stcb->asoc.hb_random_idx > 3) {
1227 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1228 				memcpy(stcb->asoc.hb_random_values, &rndval,
1229 				       sizeof(stcb->asoc.hb_random_values));
1230 				this_random = stcb->asoc.hb_random_values[0];
1231 				stcb->asoc.hb_random_idx = 0;
1232 				stcb->asoc.hb_ect_randombit = 0;
1233 			} else {
1234 				this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1235 				stcb->asoc.hb_random_idx++;
1236 				stcb->asoc.hb_ect_randombit = 0;
1237 			}
1238 			/*
1239 			 * this_random will be 0 - 256 ms
1240 			 * RTO is in ms.
1241 			 */
1242 			if ((stcb->asoc.heart_beat_delay == 0) &&
1243 			    (cnt_of_unconf == 0)) {
1244 				/* no HB on this inp after confirmations */
1245 				return (0);
1246 			}
1247 			if (net) {
1248 				int delay;
1249 				delay = stcb->asoc.heart_beat_delay;
1250 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1251 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1252 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1253 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1254 					    delay = 0;
1255 					}
1256 				}
1257 				if (net->RTO == 0) {
1258 					/* Never been checked */
1259 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
1260 				} else {
1261 					/* set rto_val to the ms */
1262 					to_ticks = delay + net->RTO + this_random;
1263 				}
1264 			} else {
1265 				if (cnt_of_unconf) {
1266 					to_ticks = this_random + stcb->asoc.initial_rto;
1267 				} else {
1268 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1269 				}
1270 			}
1271 			/*
1272 			 * Now we must convert the to_ticks that are now in
1273 			 * ms to ticks.
1274 			 */
1275 			to_ticks *= hz;
1276 			to_ticks /= 1000;
1277 #ifdef SCTP_DEBUG
1278 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1279 				printf("Timer to expire in %d ticks\n", to_ticks);
1280 			}
1281 #endif
1282 			tmr = &stcb->asoc.hb_timer;
1283 		}
1284 		break;
1285 	case SCTP_TIMER_TYPE_COOKIE:
1286 		/*
1287 		 * Here we can use the RTO timer from the network since
1288 		 * one RTT was compelete. If a retran happened then we will
1289 		 * be using the RTO initial value.
1290 		 */
1291 		if ((stcb == NULL) || (net == NULL)) {
1292 			return (EFAULT);
1293 		}
1294 		if (net->RTO == 0) {
1295 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1296 		} else {
1297 			to_ticks = MSEC_TO_TICKS(net->RTO);
1298 		}
1299 		tmr = &net->rxt_timer;
1300 		break;
1301 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1302 		/*
1303 		 * nothing needed but the endpoint here
1304 		 * ususually about 60 minutes.
1305 		 */
1306 		tmr = &inp->sctp_ep.signature_change;
1307 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1308 		break;
1309 	case SCTP_TIMER_TYPE_INPKILL:
1310 		/*
1311 		 * The inp is setup to die. We re-use the
1312 		 * signature_chage timer since that has
1313 		 * stopped and we are in the GONE state.
1314 		 */
1315 		tmr = &inp->sctp_ep.signature_change;
1316 		to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1317 		break;
1318 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1319 		/*
1320 		 * Here we use the value found in the EP for PMTU
1321 		 * ususually about 10 minutes.
1322 		 */
1323 		if (stcb == NULL) {
1324 			return (EFAULT);
1325 		}
1326 		if (net == NULL) {
1327 			return (EFAULT);
1328 		}
1329 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1330 		tmr = &net->pmtu_timer;
1331 		break;
1332 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1333 		/* Here we use the RTO of the destination */
1334 		if ((stcb == NULL) || (net == NULL)) {
1335 			return (EFAULT);
1336 		}
1337 		if (net->RTO == 0) {
1338 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1339 		} else {
1340 			to_ticks = MSEC_TO_TICKS(net->RTO);
1341 		}
1342 		tmr = &net->rxt_timer;
1343 		break;
1344 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1345 		/*
1346 		 * Here we use the endpoints shutdown guard timer
1347 		 * usually about 3 minutes.
1348 		 */
1349 		if (stcb == NULL) {
1350 			return (EFAULT);
1351 		}
1352 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1353 		tmr = &stcb->asoc.shut_guard_timer;
1354 		break;
1355 	case SCTP_TIMER_TYPE_STRRESET:
1356 		/*
1357 		 * Here the timer comes from the inp
1358 		 * but its value is from the RTO.
1359 		 */
1360 		if ((stcb == NULL) || (net == NULL)) {
1361 			return (EFAULT);
1362 		}
1363 		if (net->RTO == 0) {
1364 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1365 		} else {
1366 			to_ticks = MSEC_TO_TICKS(net->RTO);
1367 		}
1368 		tmr = &stcb->asoc.strreset_timer;
1369 		break;
1370 
1371 	case SCTP_TIMER_TYPE_ASCONF:
1372 		/*
1373 		 * Here the timer comes from the inp
1374 		 * but its value is from the RTO.
1375 		 */
1376 		if ((stcb == NULL) || (net == NULL)) {
1377 			return (EFAULT);
1378 		}
1379 		if (net->RTO == 0) {
1380 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1381 		} else {
1382 			to_ticks = MSEC_TO_TICKS(net->RTO);
1383 		}
1384 		tmr = &stcb->asoc.asconf_timer;
1385 		break;
1386 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1387 		if (stcb == NULL) {
1388 			return (EFAULT);
1389 		}
1390 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
1391 			/* Really an error since stcb is NOT set to autoclose */
1392 			return (0);
1393 		}
1394 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
1395 		tmr = &stcb->asoc.autoclose_timer;
1396 		break;
1397 	default:
1398 #ifdef SCTP_DEBUG
1399 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1400 			printf("sctp_timer_start:Unknown timer type %d\n",
1401 			       t_type);
1402 		}
1403 #endif /* SCTP_DEBUG */
1404 		return (EFAULT);
1405 		break;
1406 	};
1407 	if ((to_ticks <= 0) || (tmr == NULL)) {
1408 #ifdef SCTP_DEBUG
1409 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1410 			printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1411 			       t_type, to_ticks, tmr);
1412 		}
1413 #endif /* SCTP_DEBUG */
1414 		return (EFAULT);
1415 	}
1416 	if (callout_pending(&tmr->timer)) {
1417 		/*
1418 		 * we do NOT allow you to have it already running.
1419 		 * if it is we leave the current one up unchanged
1420 		 */
1421 		return (EALREADY);
1422 	}
1423 	/* At this point we can proceed */
1424 	if (t_type == SCTP_TIMER_TYPE_SEND) {
1425 		stcb->asoc.num_send_timers_up++;
1426 	}
1427 	tmr->type = t_type;
1428 	tmr->ep = (void *)inp;
1429 	tmr->tcb = (void *)stcb;
1430 	tmr->net = (void *)net;
1431 	callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1432 	return (0);
1433 }
1434 
1435 int
1436 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1437 		struct sctp_nets *net)
1438 {
1439 	struct sctp_timer *tmr;
1440 
1441 	if (inp == NULL)
1442 		return (EFAULT);
1443 
1444 	tmr = NULL;
1445 	switch (t_type) {
1446 	case SCTP_TIMER_TYPE_ITERATOR:
1447 	{
1448 		struct sctp_iterator *it;
1449 		it = (struct sctp_iterator *)inp;
1450 		tmr = &it->tmr;
1451 	}
1452 	break;
1453 	case SCTP_TIMER_TYPE_SEND:
1454 		if ((stcb == NULL) || (net == NULL)) {
1455 			return (EFAULT);
1456 		}
1457 		tmr = &net->rxt_timer;
1458 		break;
1459 	case SCTP_TIMER_TYPE_INIT:
1460 		if ((stcb == NULL) || (net == NULL)) {
1461 			return (EFAULT);
1462 		}
1463 		tmr = &net->rxt_timer;
1464 		break;
1465 	case SCTP_TIMER_TYPE_RECV:
1466 		if (stcb == NULL) {
1467 			return (EFAULT);
1468 		}
1469 		tmr = &stcb->asoc.dack_timer;
1470 		break;
1471 	case SCTP_TIMER_TYPE_SHUTDOWN:
1472 		if ((stcb == NULL) || (net == NULL)) {
1473 			return (EFAULT);
1474 		}
1475 		tmr = &net->rxt_timer;
1476 		break;
1477 	case SCTP_TIMER_TYPE_HEARTBEAT:
1478 		if (stcb == NULL) {
1479 			return (EFAULT);
1480 		}
1481 		tmr = &stcb->asoc.hb_timer;
1482 		break;
1483 	case SCTP_TIMER_TYPE_COOKIE:
1484 		if ((stcb == NULL) || (net == NULL)) {
1485 			return (EFAULT);
1486 		}
1487 		tmr = &net->rxt_timer;
1488 		break;
1489 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1490 		/* nothing needed but the endpoint here */
1491 		tmr = &inp->sctp_ep.signature_change;
1492 		/* We re-use the newcookie timer for
1493 		 * the INP kill timer. We must assure
1494 		 * that we do not kill it by accident.
1495 		 */
1496 		break;
1497 	case SCTP_TIMER_TYPE_INPKILL:
1498 		/*
1499 		 * The inp is setup to die. We re-use the
1500 		 * signature_chage timer since that has
1501 		 * stopped and we are in the GONE state.
1502 		 */
1503 		tmr = &inp->sctp_ep.signature_change;
1504 		break;
1505 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1506 		if (stcb == NULL) {
1507 			return (EFAULT);
1508 		}
1509 		if (net == NULL) {
1510 			return (EFAULT);
1511 		}
1512 		tmr = &net->pmtu_timer;
1513 		break;
1514 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1515 		if ((stcb == NULL) || (net == NULL)) {
1516 			return (EFAULT);
1517 		}
1518 		tmr = &net->rxt_timer;
1519 		break;
1520 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1521 		if (stcb == NULL) {
1522 			return (EFAULT);
1523 		}
1524 		tmr = &stcb->asoc.shut_guard_timer;
1525 		break;
1526 	case SCTP_TIMER_TYPE_STRRESET:
1527 		if (stcb == NULL) {
1528 			return (EFAULT);
1529 		}
1530 		tmr = &stcb->asoc.strreset_timer;
1531 		break;
1532 	case SCTP_TIMER_TYPE_ASCONF:
1533 		if (stcb == NULL) {
1534 			return (EFAULT);
1535 		}
1536 		tmr = &stcb->asoc.asconf_timer;
1537 		break;
1538 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1539 		if (stcb == NULL) {
1540 			return (EFAULT);
1541 		}
1542 		tmr = &stcb->asoc.autoclose_timer;
1543 		break;
1544 	default:
1545 #ifdef SCTP_DEBUG
1546 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1547 			printf("sctp_timer_stop:Unknown timer type %d\n",
1548 			       t_type);
1549 		}
1550 #endif /* SCTP_DEBUG */
1551 		break;
1552 	};
1553 	if (tmr == NULL)
1554 		return (EFAULT);
1555 
1556 	if ((tmr->type != t_type) && tmr->type) {
1557 		/*
1558 		 * Ok we have a timer that is under joint use. Cookie timer
1559 		 * per chance with the SEND timer. We therefore are NOT
1560 		 * running the timer that the caller wants stopped.  So just
1561 		 * return.
1562 		 */
1563 		return (0);
1564 	}
1565 	if (t_type == SCTP_TIMER_TYPE_SEND) {
1566 		stcb->asoc.num_send_timers_up--;
1567 		if (stcb->asoc.num_send_timers_up < 0) {
1568 			stcb->asoc.num_send_timers_up = 0;
1569 		}
1570 	}
1571 	callout_stop(&tmr->timer);
1572 	return (0);
1573 }
1574 
1575 #ifdef SCTP_USE_ADLER32
1576 static uint32_t
1577 update_adler32(uint32_t adler, uint8_t *buf, int32_t len)
1578 {
1579 	u_int32_t s1 = adler & 0xffff;
1580 	u_int32_t s2 = (adler >> 16) & 0xffff;
1581 	int n;
1582 
1583 	for (n = 0; n < len; n++, buf++) {
1584 		/* s1 = (s1 + buf[n]) % BASE */
1585 		/* first we add */
1586 		s1 = (s1 + *buf);
1587 		/*
1588 		 * now if we need to, we do a mod by subtracting. It seems
1589 		 * a bit faster since I really will only ever do one subtract
1590 		 * at the MOST, since buf[n] is a max of 255.
1591 		 */
1592 		if (s1 >= SCTP_ADLER32_BASE) {
1593 			s1 -= SCTP_ADLER32_BASE;
1594 		}
1595 		/* s2 = (s2 + s1) % BASE */
1596 		/* first we add */
1597 		s2 = (s2 + s1);
1598 		/*
1599 		 * again, it is more efficent (it seems) to subtract since
1600 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1601 		 * case. This would then be (2 * BASE) - 2, which will still
1602 		 * only do one subtract. On Intel this is much better to do
1603 		 * this way and avoid the divide. Have not -pg'd on sparc.
1604 		 */
1605 		if (s2 >= SCTP_ADLER32_BASE) {
1606 			s2 -= SCTP_ADLER32_BASE;
1607 		}
1608 	}
1609 	/* Return the adler32 of the bytes buf[0..len-1] */
1610 	return ((s2 << 16) + s1);
1611 }
1612 
1613 #endif
1614 
1615 
1616 u_int32_t
1617 sctp_calculate_len(struct mbuf *m)
1618 {
1619 	u_int32_t tlen=0;
1620 	struct mbuf *at;
1621 	at = m;
1622 	while (at) {
1623 		tlen += at->m_len;
1624 		at = at->m_next;
1625 	}
1626 	return (tlen);
1627 }
1628 
1629 #if defined(SCTP_WITH_NO_CSUM)
1630 
1631 uint32_t
1632 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1633 {
1634 	/*
1635 	 * given a mbuf chain with a packetheader offset by 'offset'
1636 	 * pointing at a sctphdr (with csum set to 0) go through
1637 	 * the chain of m_next's and calculate the SCTP checksum.
1638 	 * This is currently Adler32 but will change to CRC32x
1639 	 * soon. Also has a side bonus calculate the total length
1640 	 * of the mbuf chain.
1641 	 * Note: if offset is greater than the total mbuf length,
1642 	 * checksum=1, pktlen=0 is returned (ie. no real error code)
1643 	 */
1644 	if (pktlen == NULL)
1645 		return (0);
1646 	*pktlen = sctp_calculate_len(m);
1647 	return (0);
1648 }
1649 
1650 #elif defined(SCTP_USE_INCHKSUM)
1651 
1652 #include <machine/in_cksum.h>
1653 
1654 uint32_t
1655 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1656 {
1657 	/*
1658 	 * given a mbuf chain with a packetheader offset by 'offset'
1659 	 * pointing at a sctphdr (with csum set to 0) go through
1660 	 * the chain of m_next's and calculate the SCTP checksum.
1661 	 * This is currently Adler32 but will change to CRC32x
1662 	 * soon. Also has a side bonus calculate the total length
1663 	 * of the mbuf chain.
1664 	 * Note: if offset is greater than the total mbuf length,
1665 	 * checksum=1, pktlen=0 is returned (ie. no real error code)
1666 	 */
1667 	int32_t tlen=0;
1668 	struct mbuf *at;
1669 	uint32_t the_sum, retsum;
1670 
1671 	at = m;
1672 	while (at) {
1673 		tlen += at->m_len;
1674 		at = at->m_next;
1675 	}
1676 	the_sum = (uint32_t)(in_cksum_skip(m, tlen, offset));
1677 	if (pktlen != NULL)
1678 		*pktlen = (tlen-offset);
1679 	retsum = htons(the_sum);
1680 	return (the_sum);
1681 }
1682 
1683 #else
1684 
1685 uint32_t
1686 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1687 {
1688 	/*
1689 	 * given a mbuf chain with a packetheader offset by 'offset'
1690 	 * pointing at a sctphdr (with csum set to 0) go through
1691 	 * the chain of m_next's and calculate the SCTP checksum.
1692 	 * This is currently Adler32 but will change to CRC32x
1693 	 * soon. Also has a side bonus calculate the total length
1694 	 * of the mbuf chain.
1695 	 * Note: if offset is greater than the total mbuf length,
1696 	 * checksum=1, pktlen=0 is returned (ie. no real error code)
1697 	 */
1698 	int32_t tlen=0;
1699 #ifdef SCTP_USE_ADLER32
1700 	uint32_t base = 1L;
1701 #else
1702 	uint32_t base = 0xffffffff;
1703 #endif /* SCTP_USE_ADLER32 */
1704 	struct mbuf *at;
1705 	at = m;
1706 	/* find the correct mbuf and offset into mbuf */
1707 	while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1708 		offset -= at->m_len;	/* update remaining offset left */
1709 		at = at->m_next;
1710 	}
1711 
1712 	while (at != NULL) {
1713 #ifdef SCTP_USE_ADLER32
1714 		base = update_adler32(base, at->m_data + offset,
1715 		    at->m_len - offset);
1716 #else
1717 		base = update_crc32(base, at->m_data + offset,
1718 		    at->m_len - offset);
1719 #endif /* SCTP_USE_ADLER32 */
1720 		tlen += at->m_len - offset;
1721 		/* we only offset once into the first mbuf */
1722 		if (offset) {
1723 			offset = 0;
1724 		}
1725 		at = at->m_next;
1726 	}
1727 	if (pktlen != NULL) {
1728 		*pktlen = tlen;
1729 	}
1730 #ifdef SCTP_USE_ADLER32
1731 	/* Adler32 */
1732 	base = htonl(base);
1733 #else
1734 	/* CRC-32c */
1735 	base = sctp_csum_finalize(base);
1736 #endif
1737 	return (base);
1738 }
1739 
1740 
1741 #endif
1742 
1743 void
1744 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1745 		    struct sctp_association *asoc, u_long mtu)
1746 {
1747 	/*
1748 	 * Reset the P-MTU size on this association, this involves changing
1749 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1750 	 * to allow the DF flag to be cleared.
1751 	 */
1752 	struct sctp_tmit_chunk *chk;
1753 	struct sctp_stream_out *strm;
1754 	unsigned int eff_mtu, ovh;
1755 	asoc->smallest_mtu = mtu;
1756 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1757 		ovh = SCTP_MIN_OVERHEAD;
1758 	} else {
1759 		ovh = SCTP_MIN_V4_OVERHEAD;
1760 	}
1761 	eff_mtu = mtu - ovh;
1762 	/* Now mark any chunks that need to let IP fragment */
1763 	TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1764 		TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1765 			if (chk->send_size > eff_mtu) {
1766 				chk->flags &= SCTP_DONT_FRAGMENT;
1767 				chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1768 			}
1769 		}
1770 	}
1771 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1772 		if (chk->send_size > eff_mtu) {
1773 			chk->flags &= SCTP_DONT_FRAGMENT;
1774 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1775 		}
1776 	}
1777 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1778 		if (chk->send_size > eff_mtu) {
1779 			chk->flags &= SCTP_DONT_FRAGMENT;
1780 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1781 		}
1782 	}
1783 }
1784 
1785 
1786 /*
1787  * given an association and starting time of the current RTT period
1788  * return RTO in number of usecs
1789  * net should point to the current network
1790  */
1791 u_int32_t
1792 sctp_calculate_rto(struct sctp_tcb *stcb,
1793 		   struct sctp_association *asoc,
1794 		   struct sctp_nets *net,
1795 		   struct timeval *old)
1796 {
1797 	/*
1798 	 * given an association and the starting time of the current RTT
1799 	 * period (in value1/value2) return RTO in number of usecs.
1800 	 */
1801 	int calc_time = 0;
1802 	unsigned int new_rto = 0;
1803 	int first_measure = 0;
1804 	struct timeval now;
1805 
1806 	/************************/
1807 	/* 1. calculate new RTT */
1808 	/************************/
1809 	/* get the current time */
1810 	SCTP_GETTIME_TIMEVAL(&now);
1811 	/* compute the RTT value */
1812 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1813 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1814 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1815 			calc_time += (((u_long)now.tv_usec -
1816 				       (u_long)old->tv_usec)/1000);
1817 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1818 			/* Borrow 1,000ms from current calculation */
1819 			calc_time -= 1000;
1820 			/* Add in the slop over */
1821 			calc_time += ((int)now.tv_usec/1000);
1822 			/* Add in the pre-second ms's */
1823 			calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1824 		}
1825 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1826 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1827 			calc_time = ((u_long)now.tv_usec -
1828 				     (u_long)old->tv_usec)/1000;
1829 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1830 			/* impossible .. garbage in nothing out */
1831 			return (((net->lastsa >> 2) + net->lastsv) >> 1);
1832 		} else {
1833 			/* impossible .. garbage in nothing out */
1834 			return (((net->lastsa >> 2) + net->lastsv) >> 1);
1835 		}
1836 	} else {
1837 		/* Clock wrapped? */
1838 		return (((net->lastsa >> 2) + net->lastsv) >> 1);
1839 	}
1840 	/***************************/
1841 	/* 2. update RTTVAR & SRTT */
1842 	/***************************/
1843 #if 0
1844 	/*	if (net->lastsv || net->lastsa) {*/
1845 	/* per Section 5.3.1 C3 in SCTP */
1846 	/*		net->lastsv = (int) 	*//* RTTVAR */
1847 	/*			(((double)(1.0 - 0.25) * (double)net->lastsv) +
1848 				(double)(0.25 * (double)abs(net->lastsa - calc_time)));
1849 				net->lastsa = (int) */	/* SRTT */
1850 	/*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1851 	  (double)(0.125 * (double)calc_time));
1852 	  } else {
1853 	*//* the first RTT calculation, per C2 Section 5.3.1 */
1854 	/*		net->lastsa = calc_time;	*//* SRTT */
1855 	/*		net->lastsv = calc_time / 2;	*//* RTTVAR */
1856 	/*	}*/
1857 	/* if RTTVAR goes to 0 you set to clock grainularity */
1858 	/*	if (net->lastsv == 0) {
1859 		net->lastsv = SCTP_CLOCK_GRANULARITY;
1860 		}
1861 		new_rto = net->lastsa + 4 * net->lastsv;
1862 	*/
1863 #endif
1864 	/* this is Van Jacobson's integer version */
1865 	if (net->RTO) {
1866 		calc_time -= (net->lastsa >> 3);
1867 		net->lastsa += calc_time;
1868 		if (calc_time < 0) {
1869 			calc_time = -calc_time;
1870 		}
1871 		calc_time -= (net->lastsv >> 2);
1872 		net->lastsv += calc_time;
1873 		if (net->lastsv == 0) {
1874 			net->lastsv = SCTP_CLOCK_GRANULARITY;
1875 		}
1876 	} else {
1877 		/* First RTO measurment */
1878 		net->lastsa = calc_time;
1879 		net->lastsv = calc_time >> 1;
1880 		first_measure = 1;
1881 	}
1882 	new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1883 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1884 	    (stcb->asoc.sat_network_lockout == 0)) {
1885 		stcb->asoc.sat_network = 1;
1886 	} else 	if ((!first_measure) && stcb->asoc.sat_network) {
1887 		stcb->asoc.sat_network = 0;
1888 		stcb->asoc.sat_network_lockout = 1;
1889 	}
1890 	/* bound it, per C6/C7 in Section 5.3.1 */
1891 	if (new_rto < stcb->asoc.minrto) {
1892 		new_rto = stcb->asoc.minrto;
1893 	}
1894 	if (new_rto > stcb->asoc.maxrto) {
1895 		new_rto = stcb->asoc.maxrto;
1896 	}
1897 	/* we are now returning the RTT Smoothed */
1898 	return ((u_int32_t)new_rto);
1899 }
1900 
1901 
1902 /*
1903  * return a pointer to a contiguous piece of data from the given
1904  * mbuf chain starting at 'off' for 'len' bytes.  If the desired
1905  * piece spans more than one mbuf, a copy is made at 'ptr'.
1906  * caller must ensure that the buffer size is >= 'len'
1907  * returns NULL if there there isn't 'len' bytes in the chain.
1908  */
1909 void *
1910 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1911 {
1912 	uint32_t count;
1913 	uint8_t *ptr;
1914 	ptr = in_ptr;
1915 	if ((off < 0) || (len <= 0))
1916 		return (NULL);
1917 
1918 	/* find the desired start location */
1919 	while ((m != NULL) && (off > 0)) {
1920 		if (off < m->m_len)
1921 			break;
1922 		off -= m->m_len;
1923 		m = m->m_next;
1924 	}
1925 	if (m == NULL)
1926 		return (NULL);
1927 
1928 	/* is the current mbuf large enough (eg. contiguous)? */
1929 	if ((m->m_len - off) >= len) {
1930 		return ((void *)(mtod(m, vaddr_t) + off));
1931 	} else {
1932 		/* else, it spans more than one mbuf, so save a temp copy... */
1933 		while ((m != NULL) && (len > 0)) {
1934 			count = min(m->m_len - off, len);
1935 			memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
1936 			len -= count;
1937 			ptr += count;
1938 			off = 0;
1939 			m = m->m_next;
1940 		}
1941 		if ((m == NULL) && (len > 0))
1942 			return (NULL);
1943 		else
1944 			return ((void *)in_ptr);
1945 	}
1946 }
1947 
1948 
1949 struct sctp_paramhdr *
1950 sctp_get_next_param(struct mbuf *m,
1951 		    int offset,
1952 		    struct sctp_paramhdr *pull,
1953 		    int pull_limit)
1954 {
1955 	/* This just provides a typed signature to Peter's Pull routine */
1956 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
1957     	    (u_int8_t *)pull));
1958 }
1959 
1960 
1961 int
1962 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
1963 {
1964 	/*
1965 	 * add padlen bytes of 0 filled padding to the end of the mbuf.
1966 	 * If padlen is > 3 this routine will fail.
1967 	 */
1968 	u_int8_t *dp;
1969 	int i;
1970 	if (padlen > 3) {
1971 		return (ENOBUFS);
1972 	}
1973 	if (M_TRAILINGSPACE(m)) {
1974 		/*
1975 		 * The easy way.
1976 		 * We hope the majority of the time we hit here :)
1977 		 */
1978 		dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
1979 		m->m_len += padlen;
1980 	} else {
1981 		/* Hard way we must grow the mbuf */
1982 		struct mbuf *tmp;
1983 		MGET(tmp, M_DONTWAIT, MT_DATA);
1984 		if (tmp == NULL) {
1985 			/* Out of space GAK! we are in big trouble. */
1986 			return (ENOSPC);
1987 		}
1988 		/* setup and insert in middle */
1989 		tmp->m_next = m->m_next;
1990 		tmp->m_len = padlen;
1991 		m->m_next = tmp;
1992 		dp = mtod(tmp, u_int8_t *);
1993 	}
1994 	/* zero out the pad */
1995 	for (i=  0; i < padlen; i++) {
1996 		*dp = 0;
1997 		dp++;
1998 	}
1999 	return (0);
2000 }
2001 
2002 int
2003 sctp_pad_lastmbuf(struct mbuf *m, int padval)
2004 {
2005 	/* find the last mbuf in chain and pad it */
2006 	struct mbuf *m_at;
2007 	m_at = m;
2008 	while (m_at) {
2009 		if (m_at->m_next == NULL) {
2010 			return (sctp_add_pad_tombuf(m_at, padval));
2011 		}
2012 		m_at = m_at->m_next;
2013 	}
2014 	return (EFAULT);
2015 }
2016 
2017 static void
2018 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
2019     u_int32_t error)
2020 {
2021 	struct mbuf *m_notify;
2022 	struct sctp_assoc_change *sac;
2023 	const struct sockaddr *to;
2024 	struct sockaddr_in6 sin6, lsa6;
2025 
2026 #ifdef SCTP_DEBUG
2027 	printf("notify: %d\n", event);
2028 #endif
2029 	/*
2030 	 * First if we are are going down dump everything we
2031 	 * can to the socket rcv queue.
2032 	 */
2033 	if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
2034 		sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
2035 	}
2036 
2037 	/*
2038 	 * For TCP model AND UDP connected sockets we will send
2039 	 * an error up when an ABORT comes in.
2040 	 */
2041 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2042 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2043 	    (event == SCTP_COMM_LOST)) {
2044 		stcb->sctp_socket->so_error = ECONNRESET;
2045 		/* Wake ANY sleepers */
2046 		sowwakeup(stcb->sctp_socket);
2047 		sorwakeup(stcb->sctp_socket);
2048 	}
2049 #if 0
2050 	if ((event == SCTP_COMM_UP) &&
2051 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2052  	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
2053 		 soisconnected(stcb->sctp_socket);
2054 	}
2055 #endif
2056 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2057 		/* event not enabled */
2058 		return;
2059 	}
2060 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2061 	if (m_notify == NULL)
2062 		/* no space left */
2063 		return;
2064 	m_notify->m_len = 0;
2065 
2066 	sac = mtod(m_notify, struct sctp_assoc_change *);
2067 	sac->sac_type = SCTP_ASSOC_CHANGE;
2068 	sac->sac_flags = 0;
2069 	sac->sac_length = sizeof(struct sctp_assoc_change);
2070 	sac->sac_state = event;
2071 	sac->sac_error = error;
2072 	/* XXX verify these stream counts */
2073 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2074 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2075 	sac->sac_assoc_id = sctp_get_associd(stcb);
2076 
2077 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2078 	m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
2079 	m_notify->m_pkthdr.rcvif = 0;
2080 	m_notify->m_len = sizeof(struct sctp_assoc_change);
2081 	m_notify->m_next = NULL;
2082 
2083 	/* append to socket */
2084 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2085 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2086 	    to->sa_family == AF_INET) {
2087 		const struct sockaddr_in *sin;
2088 
2089 		sin = (const struct sockaddr_in *)to;
2090 		memset(&sin6, 0, sizeof(sin6));
2091 		sin6.sin6_family = AF_INET6;
2092 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2093 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2094 		memcpy(&sin6.sin6_addr.s6_addr16[3], &sin->sin_addr,
2095 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2096 		sin6.sin6_port = sin->sin_port;
2097 		to = (struct sockaddr *)&sin6;
2098 	}
2099 	/* check and strip embedded scope junk */
2100 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2101 						   &lsa6);
2102 	/*
2103 	 * We need to always notify comm changes.
2104 	 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2105 	 * 	sctp_m_freem(m_notify);
2106 	 *	return;
2107 	 * }
2108 	*/
2109 	SCTP_TCB_UNLOCK(stcb);
2110 	SCTP_INP_WLOCK(stcb->sctp_ep);
2111 	SCTP_TCB_LOCK(stcb);
2112 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
2113 	    to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2114 		/* not enough room */
2115 		sctp_m_freem(m_notify);
2116 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2117 		return;
2118 	}
2119 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2120 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2121 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2122 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2123 		}
2124 	} else {
2125 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2126 	}
2127 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2128 	/* Wake up any sleeper */
2129 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2130 	sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2131 }
2132 
2133 static void
2134 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2135     const struct sockaddr *sa, uint32_t error)
2136 {
2137 	struct mbuf *m_notify;
2138 	struct sctp_paddr_change *spc;
2139 	const struct sockaddr *to;
2140 	struct sockaddr_in6 sin6, lsa6;
2141 
2142 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2143 		/* event not enabled */
2144 		return;
2145 
2146 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2147 	if (m_notify == NULL)
2148 		return;
2149 	m_notify->m_len = 0;
2150 
2151 	MCLGET(m_notify, M_DONTWAIT);
2152 	if ((m_notify->m_flags & M_EXT) != M_EXT) {
2153 		sctp_m_freem(m_notify);
2154 		return;
2155 	}
2156 
2157 	spc = mtod(m_notify, struct sctp_paddr_change *);
2158 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2159 	spc->spc_flags = 0;
2160 	spc->spc_length = sizeof(struct sctp_paddr_change);
2161 	if (sa->sa_family == AF_INET) {
2162 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2163 	} else {
2164 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2165 	}
2166 	spc->spc_state = state;
2167 	spc->spc_error = error;
2168 	spc->spc_assoc_id = sctp_get_associd(stcb);
2169 
2170 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2171 	m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2172 	m_notify->m_pkthdr.rcvif = 0;
2173 	m_notify->m_len = sizeof(struct sctp_paddr_change);
2174 	m_notify->m_next = NULL;
2175 
2176 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2177 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2178 	    to->sa_family == AF_INET) {
2179 		const struct sockaddr_in *sin;
2180 
2181 		sin = (const struct sockaddr_in *)to;
2182 		memset(&sin6, 0, sizeof(sin6));
2183 		sin6.sin6_family = AF_INET6;
2184 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2185 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2186 		bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2187 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2188 		sin6.sin6_port = sin->sin_port;
2189 		to = (struct sockaddr *)&sin6;
2190 	}
2191 	/* check and strip embedded scope junk */
2192 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2193 	    &lsa6);
2194 
2195 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2196 		sctp_m_freem(m_notify);
2197 		return;
2198 	}
2199 	/* append to socket */
2200 	SCTP_TCB_UNLOCK(stcb);
2201 	SCTP_INP_WLOCK(stcb->sctp_ep);
2202 	SCTP_TCB_LOCK(stcb);
2203 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2204 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2205 		/* not enough room */
2206 		sctp_m_freem(m_notify);
2207 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2208 		return;
2209 	}
2210 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2211 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2212 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2213 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2214 		}
2215 	} else {
2216 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2217 	}
2218 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2219 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2220 }
2221 
2222 
2223 static void
2224 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2225 			struct sctp_tmit_chunk *chk)
2226 {
2227 	struct mbuf *m_notify;
2228 	struct sctp_send_failed *ssf;
2229 	struct sockaddr_in6 sin6, lsa6;
2230 	const struct sockaddr *to;
2231 	int length;
2232 
2233 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2234 		/* event not enabled */
2235 		return;
2236 
2237 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2238 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2239 	if (m_notify == NULL)
2240 		/* no space left */
2241 		return;
2242 	m_notify->m_len = 0;
2243 	ssf = mtod(m_notify, struct sctp_send_failed *);
2244 	ssf->ssf_type = SCTP_SEND_FAILED;
2245 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2246 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2247 	else
2248 		ssf->ssf_flags = SCTP_DATA_SENT;
2249 	ssf->ssf_length = length;
2250 	ssf->ssf_error = error;
2251 	/* not exactly what the user sent in, but should be close :) */
2252 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2253 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2254 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2255 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2256 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2257 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2258 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2259 	m_notify->m_next = chk->data;
2260 	if (m_notify->m_next == NULL)
2261 		m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2262 	else {
2263 		struct mbuf *m;
2264 		m_notify->m_flags |= M_NOTIFICATION;
2265 		m = m_notify;
2266 		while (m->m_next != NULL)
2267 			m = m->m_next;
2268 		m->m_flags |= M_EOR;
2269 	}
2270 	m_notify->m_pkthdr.len = length;
2271 	m_notify->m_pkthdr.rcvif = 0;
2272 	m_notify->m_len = sizeof(struct sctp_send_failed);
2273 
2274 	/* Steal off the mbuf */
2275 	chk->data = NULL;
2276 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2277 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2278 	    to->sa_family == AF_INET) {
2279 		const struct sockaddr_in *sin;
2280 
2281 		sin = satocsin(to);
2282 		memset(&sin6, 0, sizeof(sin6));
2283 		sin6.sin6_family = AF_INET6;
2284 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2285 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2286 		bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2287 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2288 		sin6.sin6_port = sin->sin_port;
2289 		to = (struct sockaddr *)&sin6;
2290 	}
2291 	/* check and strip embedded scope junk */
2292 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2293 						   &lsa6);
2294 
2295 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2296 		sctp_m_freem(m_notify);
2297 		return;
2298 	}
2299 
2300 	/* append to socket */
2301 	SCTP_TCB_UNLOCK(stcb);
2302 	SCTP_INP_WLOCK(stcb->sctp_ep);
2303 	SCTP_TCB_LOCK(stcb);
2304 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2305 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2306 		/* not enough room */
2307 		sctp_m_freem(m_notify);
2308 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2309 		return;
2310 	}
2311 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2312 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2313 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2314 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2315 		}
2316 	} else {
2317 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2318 	}
2319 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2320 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2321 }
2322 
2323 static void
2324 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2325 			   u_int32_t error)
2326 {
2327 	struct mbuf *m_notify;
2328 	struct sctp_adaption_event *sai;
2329 	struct sockaddr_in6 sin6, lsa6;
2330 	const struct sockaddr *to;
2331 
2332 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2333 		/* event not enabled */
2334 		return;
2335 
2336 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2337 	if (m_notify == NULL)
2338 		/* no space left */
2339 		return;
2340 	m_notify->m_len = 0;
2341 	sai = mtod(m_notify, struct sctp_adaption_event *);
2342 	sai->sai_type = SCTP_ADAPTION_INDICATION;
2343 	sai->sai_flags = 0;
2344 	sai->sai_length = sizeof(struct sctp_adaption_event);
2345 	sai->sai_adaption_ind = error;
2346 	sai->sai_assoc_id = sctp_get_associd(stcb);
2347 
2348 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2349 	m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2350 	m_notify->m_pkthdr.rcvif = 0;
2351 	m_notify->m_len = sizeof(struct sctp_adaption_event);
2352 	m_notify->m_next = NULL;
2353 
2354 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2355 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2356 	    (to->sa_family == AF_INET)) {
2357 		const struct sockaddr_in *sin;
2358 
2359 		sin = satocsin(to);
2360 		memset(&sin6, 0, sizeof(sin6));
2361 		sin6.sin6_family = AF_INET6;
2362 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2363 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2364 		bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2365 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2366 		sin6.sin6_port = sin->sin_port;
2367 		to = (struct sockaddr *)&sin6;
2368 	}
2369 	/* check and strip embedded scope junk */
2370 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2371 						   &lsa6);
2372 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2373 		sctp_m_freem(m_notify);
2374 		return;
2375 	}
2376 	/* append to socket */
2377 	SCTP_TCB_UNLOCK(stcb);
2378 	SCTP_INP_WLOCK(stcb->sctp_ep);
2379 	SCTP_TCB_LOCK(stcb);
2380 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2381 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2382 		/* not enough room */
2383 		sctp_m_freem(m_notify);
2384 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2385 		return;
2386 	}
2387 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2388 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2389 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2390 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2391 		}
2392 	} else {
2393 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2394 	}
2395 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2396 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2397 }
2398 
2399 static void
2400 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2401 					u_int32_t error)
2402 {
2403 	struct mbuf *m_notify;
2404 	struct sctp_pdapi_event *pdapi;
2405 	struct sockaddr_in6 sin6, lsa6;
2406 	const struct sockaddr *to;
2407 
2408 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2409 		/* event not enabled */
2410 		return;
2411 
2412 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2413 	if (m_notify == NULL)
2414 		/* no space left */
2415 		return;
2416 	m_notify->m_len = 0;
2417 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2418 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2419 	pdapi->pdapi_flags = 0;
2420 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2421 	pdapi->pdapi_indication = error;
2422 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2423 
2424 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2425 	m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2426 	m_notify->m_pkthdr.rcvif = 0;
2427 	m_notify->m_len = sizeof(struct sctp_pdapi_event);
2428 	m_notify->m_next = NULL;
2429 
2430 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2431 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2432 	    (to->sa_family == AF_INET)) {
2433 		const struct sockaddr_in *sin;
2434 
2435 		sin = satocsin(to);
2436 		memset(&sin6, 0, sizeof(sin6));
2437 		sin6.sin6_family = AF_INET6;
2438 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2439 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2440 		bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2441 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2442 		sin6.sin6_port = sin->sin_port;
2443 		to = (struct sockaddr *)&sin6;
2444 	}
2445 	/* check and strip embedded scope junk */
2446 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2447 						   &lsa6);
2448 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2449 		sctp_m_freem(m_notify);
2450 		return;
2451 	}
2452 	/* append to socket */
2453 	SCTP_TCB_UNLOCK(stcb);
2454 	SCTP_INP_WLOCK(stcb->sctp_ep);
2455 	SCTP_TCB_LOCK(stcb);
2456 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2457 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2458 		/* not enough room */
2459 		sctp_m_freem(m_notify);
2460 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2461 		return;
2462 	}
2463 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2464 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2465 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2466 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2467 		}
2468 	} else {
2469 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2470 	}
2471 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2472 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2473 }
2474 
2475 static void
2476 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2477 {
2478 	struct mbuf *m_notify;
2479 	struct sctp_shutdown_event *sse;
2480 	struct sockaddr_in6 sin6, lsa6;
2481 	const struct sockaddr *to;
2482 
2483 	/*
2484 	 * For TCP model AND UDP connected sockets we will send
2485 	 * an error up when an SHUTDOWN completes
2486 	 */
2487 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2488 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2489 		/* mark socket closed for read/write and wakeup! */
2490 		socantrcvmore(stcb->sctp_socket);
2491 		socantsendmore(stcb->sctp_socket);
2492 	}
2493 
2494 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2495 		/* event not enabled */
2496 		return;
2497 
2498 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2499 	if (m_notify == NULL)
2500 		/* no space left */
2501 		return;
2502 	m_notify->m_len = 0;
2503 	sse = mtod(m_notify, struct sctp_shutdown_event *);
2504 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
2505 	sse->sse_flags = 0;
2506 	sse->sse_length = sizeof(struct sctp_shutdown_event);
2507 	sse->sse_assoc_id = sctp_get_associd(stcb);
2508 
2509 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2510 	m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2511 	m_notify->m_pkthdr.rcvif = 0;
2512 	m_notify->m_len = sizeof(struct sctp_shutdown_event);
2513 	m_notify->m_next = NULL;
2514 
2515 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2516 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2517 	    to->sa_family == AF_INET) {
2518 		const struct sockaddr_in *sin;
2519 
2520 		sin = satocsin(to);
2521 		memset(&sin6, 0, sizeof(sin6));
2522 		sin6.sin6_family = AF_INET6;
2523 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2524 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2525 		bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2526 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2527 		sin6.sin6_port = sin->sin_port;
2528 		to = (struct sockaddr *)&sin6;
2529 	}
2530 	/* check and strip embedded scope junk */
2531 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2532 	    &lsa6);
2533 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2534 		sctp_m_freem(m_notify);
2535 		return;
2536 	}
2537 	/* append to socket */
2538 	SCTP_TCB_UNLOCK(stcb);
2539 	SCTP_INP_WLOCK(stcb->sctp_ep);
2540 	SCTP_TCB_LOCK(stcb);
2541 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2542 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2543 		/* not enough room */
2544 		sctp_m_freem(m_notify);
2545 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2546 		return;
2547 	}
2548 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2549 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2550 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2551 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2552 		}
2553 	} else {
2554 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2555 	}
2556 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2557 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2558 }
2559 
2560 static void
2561 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2562     int number_entries, uint16_t *list, int flag)
2563 {
2564 	struct mbuf *m_notify;
2565 	struct sctp_stream_reset_event *strreset;
2566 	struct sockaddr_in6 sin6, lsa6;
2567 	const struct sockaddr *to;
2568 	int len;
2569 
2570 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2571 		/* event not enabled */
2572 		return;
2573 
2574 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2575 	if (m_notify == NULL)
2576 		/* no space left */
2577 		return;
2578 	m_notify->m_len = 0;
2579 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2580 	if (len > M_TRAILINGSPACE(m_notify)) {
2581 		MCLGET(m_notify, M_WAIT);
2582 	}
2583 	if (m_notify == NULL)
2584 		/* no clusters */
2585 		return;
2586 
2587 	if (len > M_TRAILINGSPACE(m_notify)) {
2588 		/* never enough room */
2589 		m_freem(m_notify);
2590 		return;
2591 	}
2592 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2593 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2594 	if (number_entries == 0) {
2595 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2596 	} else {
2597 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2598 	}
2599 	strreset->strreset_length = len;
2600 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
2601 	if (number_entries) {
2602 		int i;
2603 		for (i=0; i<number_entries; i++) {
2604 			strreset->strreset_list[i] = list[i];
2605 		}
2606 	}
2607 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2608 	m_notify->m_pkthdr.len = len;
2609 	m_notify->m_pkthdr.rcvif = 0;
2610 	m_notify->m_len = len;
2611 	m_notify->m_next = NULL;
2612 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2613 		/* no space */
2614 		sctp_m_freem(m_notify);
2615 		return;
2616 	}
2617 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2618 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2619 	    to->sa_family == AF_INET) {
2620 		const struct sockaddr_in *sin;
2621 
2622 		sin = satocsin(to);
2623 		memset(&sin6, 0, sizeof(sin6));
2624 		sin6.sin6_family = AF_INET6;
2625 		sin6.sin6_len = sizeof(struct sockaddr_in6);
2626 		sin6.sin6_addr.s6_addr16[2] = 0xffff;
2627 		bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2628 		    sizeof(sin6.sin6_addr.s6_addr16[3]));
2629 		sin6.sin6_port = sin->sin_port;
2630 		to = (struct sockaddr *)&sin6;
2631 	}
2632 	/* check and strip embedded scope junk */
2633 	to = (const struct sockaddr *) sctp_recover_scope((const struct sockaddr_in6 *)to,
2634 	    &lsa6);
2635 	/* append to socket */
2636 	SCTP_TCB_UNLOCK(stcb);
2637 	SCTP_INP_WLOCK(stcb->sctp_ep);
2638 	SCTP_TCB_LOCK(stcb);
2639 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2640 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2641 		/* not enough room */
2642 		sctp_m_freem(m_notify);
2643 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2644 		return;
2645 	}
2646 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2647 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2648 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2649 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2650 		}
2651 	} else {
2652 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2653 	}
2654 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2655 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2656 }
2657 
2658 
2659 void
2660 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2661 		u_int32_t error, void *data)
2662 {
2663 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2664 		/* No notifications up when we are in a no socket state */
2665 		return;
2666 	}
2667 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2668 		/* Can't send up to a closed socket any notifications */
2669 		return;
2670 	}
2671 	switch (notification) {
2672 	case SCTP_NOTIFY_ASSOC_UP:
2673 		sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2674 		break;
2675 	case SCTP_NOTIFY_ASSOC_DOWN:
2676 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2677 		break;
2678 	case SCTP_NOTIFY_INTERFACE_DOWN:
2679 	{
2680 		struct sctp_nets *net;
2681 		net = (struct sctp_nets *)data;
2682 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2683 		    rtcache_getdst(&net->ro), error);
2684 		break;
2685 	}
2686 	case SCTP_NOTIFY_INTERFACE_UP:
2687 	{
2688 		struct sctp_nets *net;
2689 		net = (struct sctp_nets *)data;
2690 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2691 		    rtcache_getdst(&net->ro), error);
2692 		break;
2693 	}
2694 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2695 	{
2696 		struct sctp_nets *net;
2697 		net = (struct sctp_nets *)data;
2698 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2699 		    rtcache_getdst(&net->ro), error);
2700 		break;
2701 	}
2702 	case SCTP_NOTIFY_DG_FAIL:
2703 		sctp_notify_send_failed(stcb, error,
2704 		    (struct sctp_tmit_chunk *)data);
2705 		break;
2706 	case SCTP_NOTIFY_ADAPTION_INDICATION:
2707 		/* Here the error is the adaption indication */
2708 		sctp_notify_adaption_layer(stcb, error);
2709 		break;
2710 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2711 		sctp_notify_partial_delivery_indication(stcb, error);
2712 		break;
2713 	case SCTP_NOTIFY_STRDATA_ERR:
2714 		break;
2715 	case SCTP_NOTIFY_ASSOC_ABORTED:
2716 		sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2717 		break;
2718 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
2719 		break;
2720 	case SCTP_NOTIFY_STREAM_OPENED_OK:
2721 		break;
2722 	case SCTP_NOTIFY_ASSOC_RESTART:
2723 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2724 		break;
2725 	case SCTP_NOTIFY_HB_RESP:
2726 		break;
2727 	case SCTP_NOTIFY_STR_RESET_SEND:
2728 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2729 		break;
2730 	case SCTP_NOTIFY_STR_RESET_RECV:
2731 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2732 		break;
2733 	case SCTP_NOTIFY_ASCONF_ADD_IP:
2734 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2735 		    error);
2736 		break;
2737 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
2738 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2739 		    error);
2740 		break;
2741 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2742 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2743 		    error);
2744 		break;
2745 	case SCTP_NOTIFY_ASCONF_SUCCESS:
2746 		break;
2747 	case SCTP_NOTIFY_ASCONF_FAILED:
2748 		break;
2749 	case SCTP_NOTIFY_PEER_SHUTDOWN:
2750 		sctp_notify_shutdown_event(stcb);
2751 		break;
2752 	default:
2753 #ifdef SCTP_DEBUG
2754 		if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2755 			printf("NOTIFY: unknown notification %xh (%u)\n",
2756 			    notification, notification);
2757 		}
2758 #endif /* SCTP_DEBUG */
2759 		break;
2760 	} /* end switch */
2761 }
2762 
2763 void
2764 sctp_report_all_outbound(struct sctp_tcb *stcb)
2765 {
2766 	struct sctp_association *asoc;
2767 	struct sctp_stream_out *outs;
2768 	struct sctp_tmit_chunk *chk;
2769 
2770 	asoc = &stcb->asoc;
2771 
2772 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2773 		return;
2774 	}
2775 	/* now through all the gunk freeing chunks */
2776 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2777 		/* now clean up any chunks here */
2778 		chk = TAILQ_FIRST(&outs->outqueue);
2779 		while (chk) {
2780 			stcb->asoc.stream_queue_cnt--;
2781 			TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2782 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2783 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2784 			if (chk->data) {
2785 				sctp_m_freem(chk->data);
2786 				chk->data = NULL;
2787 			}
2788 			if (chk->whoTo)
2789 				sctp_free_remote_addr(chk->whoTo);
2790 			chk->whoTo = NULL;
2791 			chk->asoc = NULL;
2792 			/* Free the chunk */
2793 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2794 			sctppcbinfo.ipi_count_chunk--;
2795 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2796 				panic("Chunk count is negative");
2797 			}
2798 			sctppcbinfo.ipi_gencnt_chunk++;
2799 			chk = TAILQ_FIRST(&outs->outqueue);
2800 		}
2801 	}
2802 	/* pending send queue SHOULD be empty */
2803 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
2804 		chk = TAILQ_FIRST(&asoc->send_queue);
2805 		while (chk) {
2806 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2807 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2808 			if (chk->data) {
2809 				sctp_m_freem(chk->data);
2810 				chk->data = NULL;
2811 			}
2812 			if (chk->whoTo)
2813 				sctp_free_remote_addr(chk->whoTo);
2814 			chk->whoTo = NULL;
2815 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2816 			sctppcbinfo.ipi_count_chunk--;
2817 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2818 				panic("Chunk count is negative");
2819 			}
2820 			sctppcbinfo.ipi_gencnt_chunk++;
2821 			chk = TAILQ_FIRST(&asoc->send_queue);
2822 		}
2823 	}
2824 	/* sent queue SHOULD be empty */
2825 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2826 		chk = TAILQ_FIRST(&asoc->sent_queue);
2827 		while (chk) {
2828 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2829 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2830 			    SCTP_NOTIFY_DATAGRAM_SENT, chk);
2831 			if (chk->data) {
2832 				sctp_m_freem(chk->data);
2833 				chk->data = NULL;
2834 			}
2835 			if (chk->whoTo)
2836 				sctp_free_remote_addr(chk->whoTo);
2837 			chk->whoTo = NULL;
2838 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2839 			sctppcbinfo.ipi_count_chunk--;
2840 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2841 				panic("Chunk count is negative");
2842 			}
2843 			sctppcbinfo.ipi_gencnt_chunk++;
2844 			chk = TAILQ_FIRST(&asoc->sent_queue);
2845 		}
2846 	}
2847 }
2848 
2849 void
2850 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2851 {
2852 
2853 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2854 		return;
2855 	}
2856 	/* Tell them we lost the asoc */
2857 	sctp_report_all_outbound(stcb);
2858 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2859 }
2860 
2861 void
2862 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2863     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2864 {
2865 	u_int32_t vtag;
2866 
2867 	vtag = 0;
2868 	if (stcb != NULL) {
2869 		/* We have a TCB to abort, send notification too */
2870 		vtag = stcb->asoc.peer_vtag;
2871 		sctp_abort_notification(stcb, 0);
2872 	}
2873 	sctp_send_abort(m, iphlen, sh, vtag, op_err);
2874 	if (stcb != NULL) {
2875 		/* Ok, now lets free it */
2876 		sctp_free_assoc(inp, stcb);
2877 	} else {
2878 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2879 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2880 				sctp_inpcb_free(inp, 1);
2881 			}
2882 		}
2883 	}
2884 }
2885 
2886 void
2887 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2888     int error, struct mbuf *op_err)
2889 {
2890 
2891 	if (stcb == NULL) {
2892 		/* Got to have a TCB */
2893 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2894 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2895 				sctp_inpcb_free(inp, 1);
2896 			}
2897 		}
2898 		return;
2899 	}
2900 	/* notify the ulp */
2901 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
2902 		sctp_abort_notification(stcb, error);
2903 	/* notify the peer */
2904 	sctp_send_abort_tcb(stcb, op_err);
2905 	/* now free the asoc */
2906 	sctp_free_assoc(inp, stcb);
2907 }
2908 
2909 void
2910 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
2911     struct sctp_inpcb *inp, struct mbuf *op_err)
2912 {
2913 	struct sctp_chunkhdr *ch, chunk_buf;
2914 	unsigned int chk_length;
2915 
2916 	/* Generate a TO address for future reference */
2917 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
2918 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2919 			sctp_inpcb_free(inp, 1);
2920 		}
2921 	}
2922 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2923 	    sizeof(*ch), (u_int8_t *)&chunk_buf);
2924 	while (ch != NULL) {
2925 		chk_length = ntohs(ch->chunk_length);
2926 		if (chk_length < sizeof(*ch)) {
2927 			/* break to abort land */
2928 			break;
2929 		}
2930 		switch (ch->chunk_type) {
2931 		case SCTP_PACKET_DROPPED:
2932 			/* we don't respond to pkt-dropped */
2933 			return;
2934 		case SCTP_ABORT_ASSOCIATION:
2935 			/* we don't respond with an ABORT to an ABORT */
2936 			return;
2937 		case SCTP_SHUTDOWN_COMPLETE:
2938 			/*
2939 			 * we ignore it since we are not waiting for it
2940 			 * and peer is gone
2941 			 */
2942 			return;
2943 		case SCTP_SHUTDOWN_ACK:
2944 			sctp_send_shutdown_complete2(m, iphlen, sh);
2945 			return;
2946 		default:
2947 			break;
2948 		}
2949 		offset += SCTP_SIZE32(chk_length);
2950 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2951 		    sizeof(*ch), (u_int8_t *)&chunk_buf);
2952 	}
2953 	sctp_send_abort(m, iphlen, sh, 0, op_err);
2954 }
2955 
2956 /*
2957  * check the inbound datagram to make sure there is not an abort
2958  * inside it, if there is return 1, else return 0.
2959  */
2960 int
2961 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
2962 {
2963 	struct sctp_chunkhdr *ch;
2964 	struct sctp_init_chunk *init_chk, chunk_buf;
2965 	int offset;
2966 	unsigned int chk_length;
2967 
2968 	offset = iphlen + sizeof(struct sctphdr);
2969 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
2970 	    (u_int8_t *)&chunk_buf);
2971 	while (ch != NULL) {
2972 		chk_length = ntohs(ch->chunk_length);
2973 		if (chk_length < sizeof(*ch)) {
2974 			/* packet is probably corrupt */
2975 			break;
2976 		}
2977 		/* we seem to be ok, is it an abort? */
2978 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
2979 			/* yep, tell them */
2980 			return (1);
2981 		}
2982 		if (ch->chunk_type == SCTP_INITIATION) {
2983 			/* need to update the Vtag */
2984 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
2985 			    offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
2986 			if (init_chk != NULL) {
2987 				*vtagfill = ntohl(init_chk->init.initiate_tag);
2988 			}
2989 		}
2990 		/* Nope, move to the next chunk */
2991 		offset += SCTP_SIZE32(chk_length);
2992 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2993 		    sizeof(*ch), (u_int8_t *)&chunk_buf);
2994 	}
2995 	return (0);
2996 }
2997 
2998 /*
2999  * currently (2/02), ifa_addr embeds scope_id's and don't
3000  * have sin6_scope_id set (i.e. it's 0)
3001  * so, create this function to compare link local scopes
3002  */
3003 uint32_t
3004 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3005 {
3006 	struct sockaddr_in6 a, b;
3007 
3008 	/* save copies */
3009 	a = *addr1;
3010 	b = *addr2;
3011 
3012 	if (a.sin6_scope_id == 0)
3013 		if (sa6_recoverscope(&a)) {
3014 			/* can't get scope, so can't match */
3015 			return (0);
3016 		}
3017 	if (b.sin6_scope_id == 0)
3018 		if (sa6_recoverscope(&b)) {
3019 			/* can't get scope, so can't match */
3020 			return (0);
3021 		}
3022 	if (a.sin6_scope_id != b.sin6_scope_id)
3023 		return (0);
3024 
3025 	return (1);
3026 }
3027 
3028 /*
3029  * returns a sockaddr_in6 with embedded scope recovered and removed
3030  */
3031 const struct sockaddr_in6 *
3032 sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3033 {
3034 	const struct sockaddr_in6 *newaddr;
3035 
3036 	newaddr = addr;
3037 	/* check and strip embedded scope junk */
3038 	if (addr->sin6_family == AF_INET6) {
3039 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3040 			if (addr->sin6_scope_id == 0) {
3041 				*store = *addr;
3042 				if (sa6_recoverscope(store) == 0) {
3043 					/* use the recovered scope */
3044 					newaddr = store;
3045 				}
3046 				/* else, return the original "to" addr */
3047 			}
3048 		}
3049 	}
3050 	return (newaddr);
3051 }
3052 
3053 /*
3054  * are the two addresses the same?  currently a "scopeless" check
3055  * returns: 1 if same, 0 if not
3056  */
3057 int
3058 sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
3059 {
3060 
3061 	/* must be valid */
3062 	if (sa1 == NULL || sa2 == NULL)
3063 		return (0);
3064 
3065 	/* must be the same family */
3066 	if (sa1->sa_family != sa2->sa_family)
3067 		return (0);
3068 
3069 	if (sa1->sa_family == AF_INET6) {
3070 		/* IPv6 addresses */
3071 		const struct sockaddr_in6 *sin6_1, *sin6_2;
3072 
3073 		sin6_1 = (const struct sockaddr_in6 *)sa1;
3074 		sin6_2 = (const struct sockaddr_in6 *)sa2;
3075 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3076 		    &sin6_2->sin6_addr));
3077 	} else if (sa1->sa_family == AF_INET) {
3078 		/* IPv4 addresses */
3079 		const struct sockaddr_in *sin_1, *sin_2;
3080 
3081 		sin_1 = (const struct sockaddr_in *)sa1;
3082 		sin_2 = (const struct sockaddr_in *)sa2;
3083 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3084 	} else {
3085 		/* we don't do these... */
3086 		return (0);
3087 	}
3088 }
3089 
3090 void
3091 sctp_print_address(const struct sockaddr *sa)
3092 {
3093 
3094 	if (sa->sa_family == AF_INET6) {
3095 		const struct sockaddr_in6 *sin6;
3096 		sin6 = (const struct sockaddr_in6 *)sa;
3097 		printf("IPv6 address: %s:%d scope:%u\n",
3098 		    ip6_sprintf(&sin6->sin6_addr), ntohs(sin6->sin6_port),
3099 		    sin6->sin6_scope_id);
3100 	} else if (sa->sa_family == AF_INET) {
3101 		const struct sockaddr_in *sin;
3102 		sin = (const struct sockaddr_in *)sa;
3103 		printf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
3104 		    ntohs(sin->sin_port));
3105 	} else {
3106 		printf("?\n");
3107 	}
3108 }
3109 
3110 void
3111 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3112 {
3113 	if (iph->ip_v == IPVERSION) {
3114 		struct sockaddr_in lsa, fsa;
3115 
3116 		memset(&lsa, 0, sizeof(lsa));
3117 		lsa.sin_len = sizeof(lsa);
3118 		lsa.sin_family = AF_INET;
3119 		lsa.sin_addr = iph->ip_src;
3120 		lsa.sin_port = sh->src_port;
3121 		memset(&fsa, 0, sizeof(fsa));
3122 		fsa.sin_len = sizeof(fsa);
3123 		fsa.sin_family = AF_INET;
3124 		fsa.sin_addr = iph->ip_dst;
3125 		fsa.sin_port = sh->dest_port;
3126 		printf("src: ");
3127 		sctp_print_address((struct sockaddr *)&lsa);
3128 		printf("dest: ");
3129 		sctp_print_address((struct sockaddr *)&fsa);
3130 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3131 		struct ip6_hdr *ip6;
3132 		struct sockaddr_in6 lsa6, fsa6;
3133 
3134 		ip6 = (struct ip6_hdr *)iph;
3135 		memset(&lsa6, 0, sizeof(lsa6));
3136 		lsa6.sin6_len = sizeof(lsa6);
3137 		lsa6.sin6_family = AF_INET6;
3138 		lsa6.sin6_addr = ip6->ip6_src;
3139 		lsa6.sin6_port = sh->src_port;
3140 		memset(&fsa6, 0, sizeof(fsa6));
3141 		fsa6.sin6_len = sizeof(fsa6);
3142 		fsa6.sin6_family = AF_INET6;
3143 		fsa6.sin6_addr = ip6->ip6_dst;
3144 		fsa6.sin6_port = sh->dest_port;
3145 		printf("src: ");
3146 		sctp_print_address((struct sockaddr *)&lsa6);
3147 		printf("dest: ");
3148 		sctp_print_address((struct sockaddr *)&fsa6);
3149 	}
3150 }
3151 
3152 #if defined(__FreeBSD__) || defined(__APPLE__)
3153 
3154 /* cloned from uipc_socket.c */
3155 
3156 #define SCTP_SBLINKRECORD(sb, m0) do {					\
3157 	if ((sb)->sb_lastrecord != NULL)				\
3158 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
3159 	else								\
3160 		(sb)->sb_mb = (m0);					\
3161 	(sb)->sb_lastrecord = (m0);					\
3162 } while (/*CONSTCOND*/0)
3163 #endif
3164 
3165 
3166 int
3167 sbappendaddr_nocheck(struct sockbuf *sb, const struct sockaddr *asa,
3168 	struct mbuf *m0, struct mbuf *control,
3169 	u_int32_t tag, struct sctp_inpcb *inp)
3170 {
3171 #ifdef __NetBSD__
3172 	struct mbuf *m, *n;
3173 
3174 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3175 		panic("sbappendaddr_nocheck");
3176 
3177 	m0->m_pkthdr.csum_data = (int)tag;
3178 
3179 	for (n = control; n; n = n->m_next) {
3180 		if (n->m_next == 0)	/* keep pointer to last control buf */
3181 			break;
3182 	}
3183 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3184 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3185 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3186 		if (m == 0)
3187 			return (0);
3188 
3189 		m->m_len = asa->sa_len;
3190 		memcpy(mtod(m, void *), (const void *)asa, asa->sa_len);
3191 	} else {
3192 		m = NULL;
3193 	}
3194 	if (n) {
3195 		n->m_next = m0;		/* concatenate data to control */
3196 	}else {
3197 		control = m0;
3198 	}
3199 	if (m)
3200 		m->m_next = control;
3201 	else
3202 		m = control;
3203 	m->m_pkthdr.csum_data = tag;
3204 
3205 	for (n = m; n; n = n->m_next)
3206 		sballoc(sb, n);
3207 	if ((n = sb->sb_mb) != NULL) {
3208 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3209 			inp->sb_last_mpkt = NULL;
3210 		}
3211 		if (inp->sb_last_mpkt)
3212 			inp->sb_last_mpkt->m_nextpkt = m;
3213  		else {
3214 			while (n->m_nextpkt) {
3215 				n = n->m_nextpkt;
3216 			}
3217 			n->m_nextpkt = m;
3218 		}
3219 		inp->sb_last_mpkt = m;
3220 	} else {
3221 		inp->sb_last_mpkt = sb->sb_mb = m;
3222 		inp->sctp_vtag_first = tag;
3223 	}
3224 	return (1);
3225 #endif
3226 #if defined(__FreeBSD__) || defined(__APPLE__)
3227 	struct mbuf *m, *n, *nlast;
3228 	int cnt=0;
3229 
3230 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3231 		panic("sbappendaddr_nocheck");
3232 
3233 	for (n = control; n; n = n->m_next) {
3234 		if (n->m_next == 0)	/* get pointer to last control buf */
3235 			break;
3236 	}
3237 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3238 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3239 		if (asa->sa_len > MHLEN)
3240 			return (0);
3241  try_again:
3242 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3243 		if (m == 0)
3244 			return (0);
3245 		m->m_len = 0;
3246 		/* safety */
3247 		if (m == m0) {
3248 			printf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3249 			       m0, m);
3250 			if (cnt) {
3251 				panic("more than once");
3252 			}
3253 			cnt++;
3254 			goto try_again;
3255 		}
3256 		m->m_len = asa->sa_len;
3257 		bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3258 	}
3259 	else {
3260 		m = NULL;
3261 	}
3262 	if (n)
3263 		n->m_next = m0;		/* concatenate data to control */
3264 	else
3265 		control = m0;
3266 	if (m)
3267 		m->m_next = control;
3268 	else
3269 		m = control;
3270 	m->m_pkthdr.csum_data = (int)tag;
3271 
3272 	for (n = m; n; n = n->m_next)
3273 		sballoc(sb, n);
3274 	nlast = n;
3275 	if (sb->sb_mb == NULL) {
3276 		inp->sctp_vtag_first = tag;
3277 	}
3278 
3279 #ifdef __FREEBSD__
3280 	if (sb->sb_mb == NULL)
3281 		inp->sctp_vtag_first = tag;
3282 	SCTP_SBLINKRECORD(sb, m);
3283 	sb->sb_mbtail = nlast;
3284 #else
3285 	if ((n = sb->sb_mb) != NULL) {
3286 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3287 			inp->sb_last_mpkt = NULL;
3288 		}
3289 		if (inp->sb_last_mpkt)
3290 			inp->sb_last_mpkt->m_nextpkt = m;
3291  		else {
3292 			while (n->m_nextpkt) {
3293 				n = n->m_nextpkt;
3294 			}
3295 			n->m_nextpkt = m;
3296 		}
3297 		inp->sb_last_mpkt = m;
3298 	} else {
3299 		inp->sb_last_mpkt = sb->sb_mb = m;
3300 		inp->sctp_vtag_first = tag;
3301 	}
3302 #endif
3303 	return (1);
3304 #endif
3305 #ifdef __OpenBSD__
3306 	struct mbuf *m, *n;
3307 
3308 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3309 		panic("sbappendaddr_nocheck");
3310 	m0->m_pkthdr.csum = (int)tag;
3311 	for (n = control; n; n = n->m_next) {
3312 		if (n->m_next == 0)	/* keep pointer to last control buf */
3313 			break;
3314 	}
3315 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3316 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3317 		if (asa->sa_len > MHLEN)
3318 			return (0);
3319 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3320 		if (m == 0)
3321 			return (0);
3322 		m->m_len = asa->sa_len;
3323 		bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3324 	} else {
3325 		m = NULL;
3326 	}
3327 	if (n)
3328 		n->m_next = m0;		/* concatenate data to control */
3329 	else
3330 		control = m0;
3331 
3332 	m->m_pkthdr.csum = (int)tag;
3333 	m->m_next = control;
3334 	for (n = m; n; n = n->m_next)
3335 		sballoc(sb, n);
3336 	if ((n = sb->sb_mb) != NULL) {
3337 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3338 			inp->sb_last_mpkt = NULL;
3339 		}
3340 		if (inp->sb_last_mpkt)
3341 			inp->sb_last_mpkt->m_nextpkt = m;
3342  		else {
3343 			while (n->m_nextpkt) {
3344 				n = n->m_nextpkt;
3345 			}
3346 			n->m_nextpkt = m;
3347 		}
3348 		inp->sb_last_mpkt = m;
3349 	} else {
3350 		inp->sb_last_mpkt = sb->sb_mb = m;
3351 		inp->sctp_vtag_first = tag;
3352 	}
3353 	return (1);
3354 #endif
3355 }
3356 
3357 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3358  *************ALTERNATE ROUTING CODE
3359  */
3360 
3361 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3362  *************ALTERNATE ROUTING CODE
3363  */
3364 
3365 struct mbuf *
3366 sctp_generate_invmanparam(int err)
3367 {
3368 	/* Return a MBUF with a invalid mandatory parameter */
3369 	struct mbuf *m;
3370 
3371 	MGET(m, M_DONTWAIT, MT_DATA);
3372 	if (m) {
3373 		struct sctp_paramhdr *ph;
3374 		m->m_len = sizeof(struct sctp_paramhdr);
3375 		ph = mtod(m, struct sctp_paramhdr *);
3376 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
3377 		ph->param_type = htons(err);
3378 	}
3379 	return (m);
3380 }
3381 
3382 static int
3383 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3384 {
3385 	struct mbuf *m;
3386 	/*
3387 	 * given a mbuf chain, look through it finding
3388 	 * the M_PKTHDR and return 1 if it belongs to
3389 	 * the association given. We tell this by
3390 	 * a kludge where we stuff the my_vtag of the asoc
3391 	 * into the m->m_pkthdr.csum_data/csum field.
3392 	 */
3393 	m = this;
3394 	while (m) {
3395 		if (m->m_flags & M_PKTHDR) {
3396 			/* check it */
3397 #if defined(__OpenBSD__)
3398 			if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3399 #else
3400 			if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3401 #endif
3402 			{
3403 				/* Yep */
3404 				return (1);
3405 			}
3406 		}
3407 		m = m->m_next;
3408 	}
3409 	return (0);
3410 }
3411 
3412 u_int32_t
3413 sctp_get_first_vtag_from_sb(struct socket *so)
3414 {
3415 	struct mbuf *this, *at;
3416 	u_int32_t retval;
3417 
3418 	retval = 0;
3419 	if (so->so_rcv.sb_mb) {
3420 		/* grubbing time */
3421 		this = so->so_rcv.sb_mb;
3422 		while (this) {
3423 			at = this;
3424 			/* get to the m_pkthdr */
3425 			while (at) {
3426 				if (at->m_flags & M_PKTHDR)
3427 					break;
3428 				else {
3429 					at = at->m_next;
3430 				}
3431 			}
3432 			/* now do we have a m_pkthdr */
3433 			if (at && (at->m_flags & M_PKTHDR)) {
3434 				/* check it */
3435 #if defined(__OpenBSD__)
3436 				if ((u_int32_t)at->m_pkthdr.csum != 0)
3437 #else
3438 				if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3439 #endif
3440 				{
3441 					/* its the one */
3442 #if defined(__OpenBSD__)
3443 					retval = (u_int32_t)at->m_pkthdr.csum;
3444 #else
3445 					retval =
3446 					    (u_int32_t)at->m_pkthdr.csum_data;
3447 #endif
3448 					break;
3449 				}
3450 			}
3451 			this = this->m_nextpkt;
3452 		}
3453 
3454 	}
3455 	return (retval);
3456 
3457 }
3458 void
3459 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3460     struct socket *new, struct sctp_tcb *stcb)
3461 {
3462 	struct mbuf **put, **take, *next, *this;
3463 	struct sockbuf *old_sb, *new_sb;
3464 	struct sctp_association *asoc;
3465 	int moved_top = 0;
3466 
3467 	asoc = &stcb->asoc;
3468 	old_sb = &old->so_rcv;
3469 	new_sb = &new->so_rcv;
3470 	if (old_sb->sb_mb == NULL) {
3471 		/* Nothing to move */
3472 		return;
3473 	}
3474 
3475 	if (inp->sctp_vtag_first == asoc->my_vtag) {
3476 		/* First one must be moved */
3477 		struct mbuf *mm;
3478 		for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
3479 			/*
3480 			 * Go down the chain and fix
3481 			 * the space allocation of the
3482 			 * two sockets.
3483 			 */
3484 			sbfree(old_sb, mm);
3485 			sballoc(new_sb, mm);
3486 		}
3487 		new_sb->sb_mb = old_sb->sb_mb;
3488 		old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
3489 		new_sb->sb_mb->m_nextpkt = NULL;
3490 		put = &new_sb->sb_mb->m_nextpkt;
3491 		moved_top = 1;
3492 	} else {
3493 		put = &new_sb->sb_mb;
3494 	}
3495 
3496 	take = &old_sb->sb_mb;
3497 	next = old_sb->sb_mb;
3498 	while (next) {
3499 		this = next;
3500 		/* postion for next one */
3501 		next = this->m_nextpkt;
3502 		/* check the tag of this packet */
3503 		if (sctp_should_be_moved(this, asoc)) {
3504 			/* yes this needs to be moved */
3505 			struct mbuf *mm;
3506 			*take = this->m_nextpkt;
3507 			this->m_nextpkt = NULL;
3508 			*put = this;
3509 			for (mm = this; mm; mm = mm->m_next) {
3510 				/*
3511 				 * Go down the chain and fix
3512 				 * the space allocation of the
3513 				 * two sockets.
3514 				 */
3515 				sbfree(old_sb, mm);
3516 				sballoc(new_sb, mm);
3517 			}
3518 			put = &this->m_nextpkt;
3519 
3520 		} else {
3521 			/* no advance our take point. */
3522 			take = &this->m_nextpkt;
3523 		}
3524 	}
3525 	if (moved_top) {
3526 		/*
3527 		 * Ok so now we must re-postion vtag_first to
3528 		 * match the new first one since we moved the
3529 		 * mbuf at the top.
3530 		 */
3531 		inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3532 	}
3533 }
3534 
3535 void
3536 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3537     struct sctp_tmit_chunk *tp1)
3538 {
3539 	if (tp1->data == NULL) {
3540 		return;
3541 	}
3542 #ifdef SCTP_MBCNT_LOGGING
3543 	sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3544 		       asoc->total_output_queue_size,
3545 		       tp1->book_size,
3546 		       asoc->total_output_mbuf_queue_size,
3547 		       tp1->mbcnt);
3548 #endif
3549 	if (asoc->total_output_queue_size >= tp1->book_size) {
3550 		asoc->total_output_queue_size -= tp1->book_size;
3551 	} else {
3552 		asoc->total_output_queue_size = 0;
3553 	}
3554 
3555 	/* Now free the mbuf */
3556 	if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3557 		asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3558 	} else {
3559 		asoc->total_output_mbuf_queue_size = 0;
3560 	}
3561 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3562 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3563 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3564 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3565 		} else {
3566 			stcb->sctp_socket->so_snd.sb_cc = 0;
3567 
3568 		}
3569 		if (stcb->sctp_socket->so_snd.sb_mbcnt >= tp1->mbcnt) {
3570 			stcb->sctp_socket->so_snd.sb_mbcnt -= tp1->mbcnt;
3571 		} else {
3572 			stcb->sctp_socket->so_snd.sb_mbcnt = 0;
3573 		}
3574 	}
3575 }
3576 
3577 int
3578 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3579     int reason, struct sctpchunk_listhead *queue)
3580 {
3581 	int ret_sz = 0;
3582 	int notdone;
3583 	uint8_t foundeom = 0;
3584 
3585 	do {
3586 		ret_sz += tp1->book_size;
3587 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
3588 		if (tp1->data) {
3589 			sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3590 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3591 			sctp_m_freem(tp1->data);
3592 			tp1->data = NULL;
3593 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3594 		}
3595 		if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3596 			stcb->asoc.sent_queue_cnt_removeable--;
3597 		}
3598 		if (queue == &stcb->asoc.send_queue) {
3599 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3600 			/* on to the sent queue */
3601 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3602 			    sctp_next);
3603 			stcb->asoc.sent_queue_cnt++;
3604 		}
3605 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3606 		    SCTP_DATA_NOT_FRAG) {
3607 			/* not frag'ed we ae done   */
3608 			notdone = 0;
3609 			foundeom = 1;
3610 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3611 			/* end of frag, we are done */
3612 			notdone = 0;
3613 			foundeom = 1;
3614 		} else {
3615 			/* Its a begin or middle piece, we must mark all of it */
3616 			notdone = 1;
3617 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3618 		}
3619 	} while (tp1 && notdone);
3620 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3621 		/*
3622 		 * The multi-part message was scattered
3623 		 * across the send and sent queue.
3624 		 */
3625 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3626 		/*
3627 		 * recurse throught the send_queue too, starting at the
3628 		 * beginning.
3629 		 */
3630 		if (tp1) {
3631 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3632 			    &stcb->asoc.send_queue);
3633 		} else {
3634 			printf("hmm, nothing on the send queue and no EOM?\n");
3635 		}
3636 	}
3637 	return (ret_sz);
3638 }
3639 
3640 /*
3641  * checks to see if the given address, sa, is one that is currently
3642  * known by the kernel
3643  * note: can't distinguish the same address on multiple interfaces and
3644  *       doesn't handle multiple addresses with different zone/scope id's
3645  * note: ifa_ifwithaddr() compares the entire sockaddr struct
3646  */
3647 struct ifaddr *
3648 sctp_find_ifa_by_addr(struct sockaddr *sa)
3649 {
3650 	struct ifnet *ifn;
3651 	struct ifaddr *ifa;
3652 
3653 	/* go through all our known interfaces */
3654 	TAILQ_FOREACH(ifn, &ifnet_list, if_list) {
3655 		/* go through each interface addresses */
3656 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
3657 			/* correct family? */
3658 			if (ifa->ifa_addr->sa_family != sa->sa_family)
3659 				continue;
3660 
3661 #ifdef INET6
3662 			if (ifa->ifa_addr->sa_family == AF_INET6) {
3663 				/* IPv6 address */
3664 				struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3665 				sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3666 				if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3667 					/* create a copy and clear scope */
3668 					memcpy(&sin6_tmp, sin1,
3669 					    sizeof(struct sockaddr_in6));
3670 					sin1 = &sin6_tmp;
3671 					in6_clearscope(&sin1->sin6_addr);
3672 				}
3673 				sin2 = (struct sockaddr_in6 *)sa;
3674 				if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3675 					   sizeof(struct in6_addr)) == 0) {
3676 					/* found it */
3677 					return (ifa);
3678 				}
3679 			} else
3680 #endif
3681 			if (ifa->ifa_addr->sa_family == AF_INET) {
3682 				/* IPv4 address */
3683 				struct sockaddr_in *sin1, *sin2;
3684 				sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3685 				sin2 = (struct sockaddr_in *)sa;
3686 				if (sin1->sin_addr.s_addr ==
3687 				    sin2->sin_addr.s_addr) {
3688 					/* found it */
3689 					return (ifa);
3690 				}
3691 			}
3692 			/* else, not AF_INET or AF_INET6, so skip */
3693 		} /* end foreach ifa */
3694 	} /* end foreach ifn */
3695 	/* not found! */
3696 	return (NULL);
3697 }
3698 
3699 
3700 #ifdef __APPLE__
3701 /*
3702  * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3703  * in the chain is a M_PKTHDR and the length is zero
3704  */
3705 static void
3706 sctp_pkthdr_fix(struct mbuf *m)
3707 {
3708 	struct mbuf *m_nxt;
3709 
3710 	if ((m->m_flags & M_PKTHDR) == 0) {
3711 		/* not a PKTHDR */
3712 		return;
3713 	}
3714 
3715 	if (m->m_len != 0) {
3716 		/* not a zero length PKTHDR mbuf */
3717 		return;
3718 	}
3719 
3720 	/* let's move in a word into the first mbuf... yes, ugly! */
3721 	m_nxt = m->m_next;
3722 	if (m_nxt == NULL) {
3723 		/* umm... not a very useful mbuf chain... */
3724 		return;
3725 	}
3726 	if ((size_t)m_nxt->m_len > sizeof(long)) {
3727 		/* move over a long */
3728 		bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
3729 		/* update mbuf data pointers and lengths */
3730 		m->m_len += sizeof(long);
3731 		m_nxt->m_data += sizeof(long);
3732 		m_nxt->m_len -= sizeof(long);
3733 	}
3734 }
3735 
3736 inline struct mbuf *
3737 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3738 {
3739 	sctp_pkthdr_fix(m);
3740 	return (m_copym(m, off, len, wait));
3741 }
3742 #endif /* __APPLE__ */
3743