xref: /netbsd-src/sys/netinet/sctputil.c (revision 481d3881954fd794ca5f2d880b68c53a5db8620e)
1 /*	$KAME: sctputil.c,v 1.39 2005/06/16 20:54:06 jinmei Exp $	*/
2 /*	$NetBSD: sctputil.c,v 1.20 2024/07/05 04:31:54 rin Exp $	*/
3 
4 /*
5  * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Cisco Systems, Inc.
19  * 4. Neither the name of the project nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: sctputil.c,v 1.20 2024/07/05 04:31:54 rin Exp $");
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42 #include "opt_sctp.h"
43 #endif /* _KERNEL_OPT */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/kernel.h>
56 #include <sys/sysctl.h>
57 #include <sys/cprng.h>
58 
59 #include <sys/callout.h>
60 
61 #include <net/route.h>
62 
63 #ifdef INET6
64 #include <sys/domain.h>
65 #endif
66 
67 #include <machine/limits.h>
68 
69 #include <net/if.h>
70 #include <net/if_types.h>
71 #include <net/route.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip_var.h>
79 
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/scope6_var.h>
84 #include <netinet6/in6_pcb.h>
85 
86 #endif /* INET6 */
87 
88 #include <netinet/sctp_pcb.h>
89 
90 #ifdef IPSEC
91 #include <netipsec/ipsec.h>
92 #include <netipsec/key.h>
93 #endif /* IPSEC */
94 
95 #include <netinet/sctputil.h>
96 #include <netinet/sctp_var.h>
97 #ifdef INET6
98 #include <netinet6/sctp6_var.h>
99 #endif
100 #include <netinet/sctp_header.h>
101 #include <netinet/sctp_output.h>
102 #include <netinet/sctp_hashdriver.h>
103 #include <netinet/sctp_uio.h>
104 #include <netinet/sctp_timer.h>
105 #include <netinet/sctp_crc32.h>
106 #include <netinet/sctp_indata.h>	/* for sctp_deliver_data() */
107 #define NUMBER_OF_MTU_SIZES 18
108 
109 #ifdef SCTP_DEBUG
110 extern u_int32_t sctp_debug_on;
111 #endif
112 
113 #ifdef SCTP_STAT_LOGGING
114 int sctp_cwnd_log_at=0;
115 int sctp_cwnd_log_rolled=0;
116 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
117 
sctp_clr_stat_log(void)118 void sctp_clr_stat_log(void)
119 {
120 	sctp_cwnd_log_at=0;
121 	sctp_cwnd_log_rolled=0;
122 }
123 
124 void
sctp_log_strm_del_alt(u_int32_t tsn,u_int16_t sseq,int from)125 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
126 {
127 
128 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
129 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
130 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
131 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
132 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
133 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
134 	sctp_cwnd_log_at++;
135 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
136 		sctp_cwnd_log_at = 0;
137 		sctp_cwnd_log_rolled = 1;
138 	}
139 
140 }
141 
142 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)143 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
144 {
145 
146 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
147 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
148 	sctp_clog[sctp_cwnd_log_at].x.map.base = map;
149 	sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
150 	sctp_clog[sctp_cwnd_log_at].x.map.high = high;
151 	sctp_cwnd_log_at++;
152 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
153 		sctp_cwnd_log_at = 0;
154 		sctp_cwnd_log_rolled = 1;
155 	}
156 }
157 
158 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)159 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
160     int from)
161 {
162 
163 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
164 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
165 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
166 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
167 	sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
168 	sctp_cwnd_log_at++;
169 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
170 		sctp_cwnd_log_at = 0;
171 		sctp_cwnd_log_rolled = 1;
172 	}
173 }
174 
175 void
sctp_log_strm_del(struct sctp_tmit_chunk * chk,struct sctp_tmit_chunk * poschk,int from)176 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
177     int from)
178 {
179 
180 	if (chk == NULL) {
181 		printf("Gak log of NULL?\n");
182 		return;
183 	}
184 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
185 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
186 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
187 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
188 	if (poschk != NULL) {
189 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
190 		    poschk->rec.data.TSN_seq;
191 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
192 		    poschk->rec.data.stream_seq;
193 	} else {
194 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
195 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
196 	}
197 	sctp_cwnd_log_at++;
198 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
199 		sctp_cwnd_log_at = 0;
200 		sctp_cwnd_log_rolled = 1;
201 	}
202 }
203 
204 void
sctp_log_cwnd(struct sctp_nets * net,int augment,uint8_t from)205 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
206 {
207 
208 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
209 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
210 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
211 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
212 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
213 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
214 	sctp_cwnd_log_at++;
215 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
216 		sctp_cwnd_log_at = 0;
217 		sctp_cwnd_log_rolled = 1;
218 	}
219 }
220 
221 void
sctp_log_maxburst(struct sctp_nets * net,int error,int burst,uint8_t from)222 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
223 {
224 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
225 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
226 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
227 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
228 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
229 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
230 	sctp_cwnd_log_at++;
231 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
232 		sctp_cwnd_log_at = 0;
233 		sctp_cwnd_log_rolled = 1;
234 	}
235 }
236 
237 void
sctp_log_rwnd(uint8_t from,u_int32_t peers_rwnd,u_int32_t snd_size,u_int32_t overhead)238 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
239 {
240 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
241 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
242 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
243 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
244 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
245 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
246 	sctp_cwnd_log_at++;
247 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
248 		sctp_cwnd_log_at = 0;
249 		sctp_cwnd_log_rolled = 1;
250 	}
251 }
252 
253 void
sctp_log_rwnd_set(uint8_t from,u_int32_t peers_rwnd,u_int32_t flight_size,u_int32_t overhead,u_int32_t a_rwndval)254 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
255 {
256 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
257 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
258 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
259 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
260 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
261 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
262 	sctp_cwnd_log_at++;
263 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
264 		sctp_cwnd_log_at = 0;
265 		sctp_cwnd_log_rolled = 1;
266 	}
267 }
268 
269 void
sctp_log_mbcnt(uint8_t from,u_int32_t total_oq,u_int32_t book,u_int32_t total_mbcnt_q,u_int32_t mbcnt)270 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
271 {
272 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
273 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
274 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
275 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change  = book;
276 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
277 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
278 	sctp_cwnd_log_at++;
279 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
280 		sctp_cwnd_log_at = 0;
281 		sctp_cwnd_log_rolled = 1;
282 	}
283 }
284 
285 void
sctp_log_block(uint8_t from,struct socket * so,struct sctp_association * asoc)286 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
287 {
288 
289 	sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
290 	sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
291 	sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.sb_mbmax/1024);
292 	sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
293 	sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.sb_hiwat/1024);
294 	sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
295 	sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
296 	sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
297 	sctp_cwnd_log_at++;
298 	if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
299 		sctp_cwnd_log_at = 0;
300 		sctp_cwnd_log_rolled = 1;
301 	}
302 }
303 
304 int
sctp_fill_stat_log(struct mbuf * m)305 sctp_fill_stat_log(struct mbuf *m)
306 {
307 	struct sctp_cwnd_log_req *req;
308 	int size_limit, num, i, at, cnt_out=0;
309 
310 	if (m == NULL)
311 		return (EINVAL);
312 
313 	size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
314 	if (size_limit < sizeof(struct sctp_cwnd_log)) {
315 		return (EINVAL);
316 	}
317 	req = mtod(m, struct sctp_cwnd_log_req *);
318 	num = size_limit/sizeof(struct sctp_cwnd_log);
319 	if (sctp_cwnd_log_rolled) {
320 		req->num_in_log = SCTP_STAT_LOG_SIZE;
321 	} else {
322 		req->num_in_log = sctp_cwnd_log_at;
323 		/* if the log has not rolled, we don't
324 		 * let you have old data.
325 		 */
326  		if (req->end_at > sctp_cwnd_log_at) {
327 			req->end_at = sctp_cwnd_log_at;
328 		}
329 	}
330 	if ((num < SCTP_STAT_LOG_SIZE) &&
331 	    ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
332 		/* we can't return all of it */
333 		if (((req->start_at == 0) && (req->end_at == 0)) ||
334 		    (req->start_at >= SCTP_STAT_LOG_SIZE) ||
335 		    (req->end_at >= SCTP_STAT_LOG_SIZE)) {
336 			/* No user request or user is wacked. */
337 			req->num_ret = num;
338 			req->end_at = sctp_cwnd_log_at - 1;
339 			if ((sctp_cwnd_log_at - num) < 0) {
340 				int cc;
341 				cc = num - sctp_cwnd_log_at;
342 				req->start_at = SCTP_STAT_LOG_SIZE - cc;
343 			} else {
344 				req->start_at = sctp_cwnd_log_at - num;
345 			}
346 		} else {
347 			/* a user request */
348 			int cc;
349 			if (req->start_at > req->end_at) {
350 				cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
351 				    (req->end_at + 1);
352 			} else {
353 
354 				cc = req->end_at - req->start_at;
355 			}
356 			if (cc < num) {
357 				num = cc;
358 			}
359 			req->num_ret = num;
360 		}
361 	} else {
362 		/* We can return all  of it */
363 		req->start_at = 0;
364 		req->end_at = sctp_cwnd_log_at - 1;
365 		req->num_ret = sctp_cwnd_log_at;
366 	}
367 	for (i = 0, at = req->start_at; i < req->num_ret; i++) {
368 		req->log[i] = sctp_clog[at];
369 		cnt_out++;
370 		at++;
371 		if (at >= SCTP_STAT_LOG_SIZE)
372 			at = 0;
373 	}
374 	m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
375 	return (0);
376 }
377 
378 #endif
379 
380 #ifdef SCTP_AUDITING_ENABLED
381 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
382 static int sctp_audit_indx = 0;
383 
384 static
sctp_print_audit_report(void)385 void sctp_print_audit_report(void)
386 {
387 	int i;
388 	int cnt;
389 	cnt = 0;
390 	for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
391 		if ((sctp_audit_data[i][0] == 0xe0) &&
392 		    (sctp_audit_data[i][1] == 0x01)) {
393 			cnt = 0;
394 			printf("\n");
395 		} else if (sctp_audit_data[i][0] == 0xf0) {
396 			cnt = 0;
397 			printf("\n");
398 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
399 		    (sctp_audit_data[i][1] == 0x01)) {
400 			printf("\n");
401 			cnt = 0;
402 		}
403 		printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
404 		    (uint32_t)sctp_audit_data[i][1]);
405 		cnt++;
406 		if ((cnt % 14) == 0)
407 			printf("\n");
408 	}
409 	for (i=0;i<sctp_audit_indx;i++) {
410 		if ((sctp_audit_data[i][0] == 0xe0) &&
411 		    (sctp_audit_data[i][1] == 0x01)) {
412 			cnt = 0;
413 			printf("\n");
414 		} else if (sctp_audit_data[i][0] == 0xf0) {
415 			cnt = 0;
416 			printf("\n");
417 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
418 			 (sctp_audit_data[i][1] == 0x01)) {
419 			printf("\n");
420 			cnt = 0;
421 		}
422 		printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
423 		    (uint32_t)sctp_audit_data[i][1]);
424 		cnt++;
425 		if ((cnt % 14) == 0)
426 			printf("\n");
427 	}
428 	printf("\n");
429 }
430 
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)431 void sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
432     struct sctp_nets *net)
433 {
434 	int resend_cnt, tot_out, rep, tot_book_cnt;
435 	struct sctp_nets *lnet;
436 	struct sctp_tmit_chunk *chk;
437 
438 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
439 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
440 	sctp_audit_indx++;
441 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
442 		sctp_audit_indx = 0;
443 	}
444 	if (inp == NULL) {
445 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
446 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
447 		sctp_audit_indx++;
448 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
449 			sctp_audit_indx = 0;
450 		}
451 		return;
452 	}
453 	if (stcb == NULL) {
454 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
455 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
456 		sctp_audit_indx++;
457 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
458 			sctp_audit_indx = 0;
459 		}
460 		return;
461 	}
462 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
463 	sctp_audit_data[sctp_audit_indx][1] =
464 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
465 	sctp_audit_indx++;
466 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
467 		sctp_audit_indx = 0;
468 	}
469 	rep = 0;
470 	tot_book_cnt = 0;
471 	resend_cnt = tot_out = 0;
472 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
473 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
474 			resend_cnt++;
475 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
476 			tot_out += chk->book_size;
477 			tot_book_cnt++;
478 		}
479 	}
480 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
481 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
482 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
483 		sctp_audit_indx++;
484 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
485 			sctp_audit_indx = 0;
486 		}
487 		printf("resend_cnt:%d asoc-tot:%d\n",
488 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
489 		rep = 1;
490 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
491 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
492 		sctp_audit_data[sctp_audit_indx][1] =
493 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
494 		sctp_audit_indx++;
495 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
496 			sctp_audit_indx = 0;
497 		}
498 	}
499 	if (tot_out != stcb->asoc.total_flight) {
500 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
501 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
502 		sctp_audit_indx++;
503 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
504 			sctp_audit_indx = 0;
505 		}
506 		rep = 1;
507 		printf("tot_flt:%d asoc_tot:%d\n", tot_out,
508 		    (int)stcb->asoc.total_flight);
509 		stcb->asoc.total_flight = tot_out;
510 	}
511 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
512 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
513 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
514 		sctp_audit_indx++;
515 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
516 			sctp_audit_indx = 0;
517 		}
518 		rep = 1;
519 		printf("tot_flt_book:%d\n", tot_book);
520 
521 		stcb->asoc.total_flight_count = tot_book_cnt;
522 	}
523 	tot_out = 0;
524 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
525 		tot_out += lnet->flight_size;
526 	}
527 	if (tot_out != stcb->asoc.total_flight) {
528 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
529 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
530 		sctp_audit_indx++;
531 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
532 			sctp_audit_indx = 0;
533 		}
534 		rep = 1;
535 		printf("real flight:%d net total was %d\n",
536 		    stcb->asoc.total_flight, tot_out);
537 		/* now corrective action */
538 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
539 			tot_out = 0;
540 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
541 				if ((chk->whoTo == lnet) &&
542 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
543 					tot_out += chk->book_size;
544 				}
545 			}
546 			if (lnet->flight_size != tot_out) {
547 				printf("net:%x flight was %d corrected to %d\n",
548 				    (uint32_t)lnet, lnet->flight_size, tot_out);
549 				lnet->flight_size = tot_out;
550 			}
551 
552 		}
553 	}
554 
555 	if (rep) {
556 		sctp_print_audit_report();
557 	}
558 }
559 
560 void
sctp_audit_log(u_int8_t ev,u_int8_t fd)561 sctp_audit_log(u_int8_t ev, u_int8_t fd)
562 {
563 	sctp_audit_data[sctp_audit_indx][0] = ev;
564 	sctp_audit_data[sctp_audit_indx][1] = fd;
565 	sctp_audit_indx++;
566 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
567 		sctp_audit_indx = 0;
568 	}
569 }
570 
571 #endif
572 
573 /*
574  * a list of sizes based on typical mtu's, used only if next hop
575  * size not returned.
576  */
577 static int sctp_mtu_sizes[] = {
578 	68,
579 	296,
580 	508,
581 	512,
582 	544,
583 	576,
584 	1006,
585 	1492,
586 	1500,
587 	1536,
588 	2002,
589 	2048,
590 	4352,
591 	4464,
592 	8166,
593 	17914,
594 	32000,
595 	65535
596 };
597 
598 int
find_next_best_mtu(int totsz)599 find_next_best_mtu(int totsz)
600 {
601 	int i, perfer;
602 	/*
603 	 * if we are in here we must find the next best fit based on the
604 	 * size of the dg that failed to be sent.
605 	 */
606 	perfer = 0;
607 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
608 		if (totsz < sctp_mtu_sizes[i]) {
609 			perfer = i - 1;
610 			if (perfer < 0)
611 				perfer = 0;
612 			break;
613 		}
614 	}
615 	return (sctp_mtu_sizes[perfer]);
616 }
617 
618 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * m)619 sctp_select_initial_TSN(struct sctp_pcb *m)
620 {
621 	return cprng_strong32();
622 }
623 
sctp_select_a_tag(struct sctp_inpcb * m)624 u_int32_t sctp_select_a_tag(struct sctp_inpcb *m)
625 {
626 	u_long x, not_done;
627 	struct timeval now;
628 
629 	SCTP_GETTIME_TIMEVAL(&now);
630 	not_done = 1;
631 	while (not_done) {
632 		x = sctp_select_initial_TSN(&m->sctp_ep);
633 		if (x == 0) {
634 			/* we never use 0 */
635 			continue;
636 		}
637 		if (sctp_is_vtag_good(m, x, &now)) {
638 			not_done = 0;
639 		}
640 	}
641 	return (x);
642 }
643 
644 
645 int
sctp_init_asoc(struct sctp_inpcb * m,struct sctp_association * asoc,int for_a_init,uint32_t override_tag)646 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
647 	       int for_a_init, uint32_t override_tag )
648 {
649 	/*
650 	 * Anything set to zero is taken care of by the allocation
651 	 * routine's bzero
652 	 */
653 
654 	/*
655 	 * Up front select what scoping to apply on addresses I tell my peer
656 	 * Not sure what to do with these right now, we will need to come up
657 	 * with a way to set them. We may need to pass them through from the
658 	 * caller in the sctp_aloc_assoc() function.
659 	 */
660 	int i;
661 	/* init all variables to a known value.*/
662 	asoc->state = SCTP_STATE_INUSE;
663 	asoc->max_burst = m->sctp_ep.max_burst;
664 	asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
665 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
666 
667 	if (override_tag) {
668 		asoc->my_vtag = override_tag;
669 	} else {
670 		asoc->my_vtag = sctp_select_a_tag(m);
671 	}
672 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
673 		sctp_select_initial_TSN(&m->sctp_ep);
674 	asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
675 	/* we are opptimisitic here */
676 	asoc->peer_supports_asconf = 1;
677 	asoc->peer_supports_asconf_setprim = 1;
678 	asoc->peer_supports_pktdrop = 1;
679 
680 	asoc->sent_queue_retran_cnt = 0;
681 	/* This will need to be adjusted */
682 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
683 	asoc->last_acked_seq = asoc->init_seq_number - 1;
684 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
685 	asoc->asconf_seq_in = asoc->last_acked_seq;
686 
687 	/* here we are different, we hold the next one we expect */
688 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
689 
690 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
691 	asoc->initial_rto = m->sctp_ep.initial_rto;
692 
693 	asoc->max_init_times = m->sctp_ep.max_init_times;
694 	asoc->max_send_times = m->sctp_ep.max_send_times;
695 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
696 
697 	/* ECN Nonce initialization */
698 	asoc->ecn_nonce_allowed = 0;
699 	asoc->receiver_nonce_sum = 1;
700 	asoc->nonce_sum_expect_base = 1;
701 	asoc->nonce_sum_check = 1;
702 	asoc->nonce_resync_tsn = 0;
703 	asoc->nonce_wait_for_ecne = 0;
704 	asoc->nonce_wait_tsn = 0;
705 
706 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
707 		struct in6pcb *inp6;
708 
709 
710 		/* Its a V6 socket */
711 		inp6 = (struct in6pcb *)m;
712 		asoc->ipv6_addr_legal = 1;
713 		/* Now look at the binding flag to see if V4 will be legal */
714 	if (
715 #if defined(__OpenBSD__)
716 		(0) /* we always do dual bind */
717 #elif defined (__NetBSD__)
718 		(inp6->in6p_flags & IN6P_IPV6_V6ONLY)
719 #else
720 		(inp6->inp_flags & IN6P_IPV6_V6ONLY)
721 #endif
722 	     == 0) {
723 			asoc->ipv4_addr_legal = 1;
724 		} else {
725 			/* V4 addresses are NOT legal on the association */
726 			asoc->ipv4_addr_legal = 0;
727 		}
728 	} else {
729 		/* Its a V4 socket, no - V6 */
730 		asoc->ipv4_addr_legal = 1;
731 		asoc->ipv6_addr_legal = 0;
732 	}
733 
734 
735 	asoc->my_rwnd = uimax(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
736 	asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
737 
738 	asoc->smallest_mtu = m->sctp_frag_point;
739 	asoc->minrto = m->sctp_ep.sctp_minrto;
740 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
741 
742 	LIST_INIT(&asoc->sctp_local_addr_list);
743 	TAILQ_INIT(&asoc->nets);
744 	TAILQ_INIT(&asoc->pending_reply_queue);
745 	asoc->last_asconf_ack_sent = NULL;
746 	/* Setup to fill the hb random cache at first HB */
747 	asoc->hb_random_idx = 4;
748 
749 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
750 
751 	/*
752 	 * Now the stream parameters, here we allocate space for all
753 	 * streams that we request by default.
754 	 */
755 	asoc->streamoutcnt = asoc->pre_open_streams =
756 	    m->sctp_ep.pre_open_stream_count;
757 	asoc->strmout = malloc(asoc->streamoutcnt *
758 	    sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
759 	if (asoc->strmout == NULL) {
760 		/* big trouble no memory */
761 		return (ENOMEM);
762 	}
763 	for (i = 0; i < asoc->streamoutcnt; i++) {
764 		/*
765 		 * inbound side must be set to 0xffff,
766 		 * also NOTE when we get the INIT-ACK back (for INIT sender)
767 		 * we MUST reduce the count (streamoutcnt) but first check
768 		 * if we sent to any of the upper streams that were dropped
769 		 * (if some were). Those that were dropped must be notified
770 		 * to the upper layer as failed to send.
771 		 */
772 		asoc->strmout[i].next_sequence_sent = 0x0;
773 		TAILQ_INIT(&asoc->strmout[i].outqueue);
774 		asoc->strmout[i].stream_no = i;
775 		asoc->strmout[i].next_spoke.tqe_next = 0;
776 		asoc->strmout[i].next_spoke.tqe_prev = 0;
777 	}
778 	/* Now the mapping array */
779 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
780 	asoc->mapping_array = malloc(asoc->mapping_array_size,
781 	       M_PCB, M_NOWAIT);
782 	if (asoc->mapping_array == NULL) {
783 		free(asoc->strmout, M_PCB);
784 		return (ENOMEM);
785 	}
786 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
787 	/* Now the init of the other outqueues */
788 	TAILQ_INIT(&asoc->out_wheel);
789 	TAILQ_INIT(&asoc->control_send_queue);
790 	TAILQ_INIT(&asoc->send_queue);
791 	TAILQ_INIT(&asoc->sent_queue);
792 	TAILQ_INIT(&asoc->reasmqueue);
793 	TAILQ_INIT(&asoc->delivery_queue);
794 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
795 
796 	TAILQ_INIT(&asoc->asconf_queue);
797 	return (0);
798 }
799 
800 int
sctp_expand_mapping_array(struct sctp_association * asoc)801 sctp_expand_mapping_array(struct sctp_association *asoc)
802 {
803 	/* mapping array needs to grow */
804 	u_int8_t *new_array;
805 	uint16_t new_size, old_size;
806 
807 	old_size = asoc->mapping_array_size;
808 	new_size = old_size + SCTP_MAPPING_ARRAY_INCR;
809 	new_array = malloc(new_size, M_PCB, M_NOWAIT);
810 	if (new_array == NULL) {
811 		/* can't get more, forget it */
812 		printf("No memory for expansion of SCTP mapping array %d\n",
813 		       new_size);
814 		return (-1);
815 	}
816 	memcpy(new_array, asoc->mapping_array, old_size);
817 	memset(new_array + old_size, 0, SCTP_MAPPING_ARRAY_INCR);
818 	free(asoc->mapping_array, M_PCB);
819 	asoc->mapping_array = new_array;
820 	asoc->mapping_array_size = new_size;
821 	return (0);
822 }
823 
824 static void
sctp_timeout_handler(void * t)825 sctp_timeout_handler(void *t)
826 {
827 	struct sctp_inpcb *inp;
828 	struct sctp_tcb *stcb;
829 	struct sctp_nets *net;
830 	struct sctp_timer *tmr;
831 	int did_output;
832 
833 	mutex_enter(softnet_lock);
834 	tmr = (struct sctp_timer *)t;
835 	inp = (struct sctp_inpcb *)tmr->ep;
836 	stcb = (struct sctp_tcb *)tmr->tcb;
837 	net = (struct sctp_nets *)tmr->net;
838 	did_output = 1;
839 
840 #ifdef SCTP_AUDITING_ENABLED
841 	sctp_audit_log(0xF0, (u_int8_t)tmr->type);
842 	sctp_auditing(3, inp, stcb, net);
843 #endif
844 	sctp_pegs[SCTP_TIMERS_EXP]++;
845 
846 	if (inp == NULL) {
847 		return;
848 	}
849 
850 	SCTP_INP_WLOCK(inp);
851 	if (inp->sctp_socket == 0) {
852 		mutex_exit(softnet_lock);
853 		SCTP_INP_WUNLOCK(inp);
854 		return;
855 	}
856 	if (stcb) {
857 		if (stcb->asoc.state == 0) {
858 			mutex_exit(softnet_lock);
859 			SCTP_INP_WUNLOCK(inp);
860 			return;
861 		}
862 	}
863 #ifdef SCTP_DEBUG
864 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
865 		printf("Timer type %d goes off\n", tmr->type);
866 	}
867 #endif /* SCTP_DEBUG */
868 #ifndef __NetBSD__
869 	if (!callout_active(&tmr->timer)) {
870 		SCTP_INP_WUNLOCK(inp);
871 		return;
872 	}
873 #endif
874 	if (stcb) {
875 		SCTP_TCB_LOCK(stcb);
876 	}
877 	SCTP_INP_INCR_REF(inp);
878 	SCTP_INP_WUNLOCK(inp);
879 
880 	switch (tmr->type) {
881 	case SCTP_TIMER_TYPE_ITERATOR:
882 	{
883 		struct sctp_iterator *it;
884 		it = (struct sctp_iterator *)inp;
885 		sctp_iterator_timer(it);
886 	}
887 	break;
888 	/* call the handler for the appropriate timer type */
889 	case SCTP_TIMER_TYPE_SEND:
890 		sctp_pegs[SCTP_TMIT_TIMER]++;
891 		stcb->asoc.num_send_timers_up--;
892 		if (stcb->asoc.num_send_timers_up < 0) {
893 			stcb->asoc.num_send_timers_up = 0;
894 		}
895 		if (sctp_t3rxt_timer(inp, stcb, net)) {
896 			/* no need to unlock on tcb its gone */
897 
898 			goto out_decr;
899 		}
900 #ifdef SCTP_AUDITING_ENABLED
901 		sctp_auditing(4, inp, stcb, net);
902 #endif
903 		sctp_chunk_output(inp, stcb, 1);
904 		if ((stcb->asoc.num_send_timers_up == 0) &&
905 		    (stcb->asoc.sent_queue_cnt > 0)
906 			) {
907 			struct sctp_tmit_chunk *chk;
908 			/*
909 			 * safeguard. If there on some on the sent queue
910 			 * somewhere but no timers running something is
911 			 * wrong... so we start a timer on the first chunk
912 			 * on the send queue on whatever net it is sent to.
913 			 */
914 			sctp_pegs[SCTP_T3_SAFEGRD]++;
915 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
916 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
917 					 chk->whoTo);
918 		}
919 		break;
920 	case SCTP_TIMER_TYPE_INIT:
921 		if (sctp_t1init_timer(inp, stcb, net)) {
922 			/* no need to unlock on tcb its gone */
923 			goto out_decr;
924 		}
925 		/* We do output but not here */
926 		did_output = 0;
927 		break;
928 	case SCTP_TIMER_TYPE_RECV:
929 		sctp_pegs[SCTP_RECV_TIMER]++;
930 		sctp_send_sack(stcb);
931 #ifdef SCTP_AUDITING_ENABLED
932 		sctp_auditing(4, inp, stcb, net);
933 #endif
934 		sctp_chunk_output(inp, stcb, 4);
935 		break;
936 	case SCTP_TIMER_TYPE_SHUTDOWN:
937 		if (sctp_shutdown_timer(inp, stcb, net) ) {
938 			/* no need to unlock on tcb its gone */
939 			goto out_decr;
940 		}
941 #ifdef SCTP_AUDITING_ENABLED
942 		sctp_auditing(4, inp, stcb, net);
943 #endif
944 		sctp_chunk_output(inp, stcb, 5);
945 		break;
946 	case SCTP_TIMER_TYPE_HEARTBEAT:
947 		if (sctp_heartbeat_timer(inp, stcb, net)) {
948 			/* no need to unlock on tcb its gone */
949 			goto out_decr;
950 		}
951 #ifdef SCTP_AUDITING_ENABLED
952 		sctp_auditing(4, inp, stcb, net);
953 #endif
954 		sctp_chunk_output(inp, stcb, 6);
955 		break;
956 	case SCTP_TIMER_TYPE_COOKIE:
957 		if (sctp_cookie_timer(inp, stcb, net)) {
958 			/* no need to unlock on tcb its gone */
959 			goto out_decr;
960 		}
961 #ifdef SCTP_AUDITING_ENABLED
962 		sctp_auditing(4, inp, stcb, net);
963 #endif
964 		sctp_chunk_output(inp, stcb, 1);
965 		break;
966 	case SCTP_TIMER_TYPE_NEWCOOKIE:
967 	{
968 		struct timeval tv;
969 		int i, secret;
970 		SCTP_GETTIME_TIMEVAL(&tv);
971 		SCTP_INP_WLOCK(inp);
972 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
973 		inp->sctp_ep.last_secret_number =
974 			inp->sctp_ep.current_secret_number;
975 		inp->sctp_ep.current_secret_number++;
976 		if (inp->sctp_ep.current_secret_number >=
977 		    SCTP_HOW_MANY_SECRETS) {
978 			inp->sctp_ep.current_secret_number = 0;
979 		}
980 		secret = (int)inp->sctp_ep.current_secret_number;
981 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
982 			inp->sctp_ep.secret_key[secret][i] =
983 				sctp_select_initial_TSN(&inp->sctp_ep);
984 		}
985 		SCTP_INP_WUNLOCK(inp);
986 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
987 	}
988 	did_output = 0;
989 	break;
990 	case SCTP_TIMER_TYPE_PATHMTURAISE:
991 		sctp_pathmtu_timer(inp, stcb, net);
992 		did_output = 0;
993 		break;
994 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
995 		if (sctp_shutdownack_timer(inp, stcb, net)) {
996 			/* no need to unlock on tcb its gone */
997 			goto out_decr;
998 		}
999 #ifdef SCTP_AUDITING_ENABLED
1000 		sctp_auditing(4, inp, stcb, net);
1001 #endif
1002 		sctp_chunk_output(inp, stcb, 7);
1003 		break;
1004 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1005 		sctp_abort_an_association(inp, stcb,
1006 					  SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1007 		/* no need to unlock on tcb its gone */
1008 		goto out_decr;
1009 		break;
1010 
1011 	case SCTP_TIMER_TYPE_STRRESET:
1012 		if (sctp_strreset_timer(inp, stcb, net)) {
1013 			/* no need to unlock on tcb its gone */
1014 			goto out_decr;
1015 		}
1016 		sctp_chunk_output(inp, stcb, 9);
1017 		break;
1018 
1019 	case SCTP_TIMER_TYPE_ASCONF:
1020 		if (sctp_asconf_timer(inp, stcb, net)) {
1021 			/* no need to unlock on tcb its gone */
1022 			goto out_decr;
1023 		}
1024 #ifdef SCTP_AUDITING_ENABLED
1025 		sctp_auditing(4, inp, stcb, net);
1026 #endif
1027 		sctp_chunk_output(inp, stcb, 8);
1028 		break;
1029 
1030 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1031 		sctp_autoclose_timer(inp, stcb, net);
1032 		sctp_chunk_output(inp, stcb, 10);
1033 		did_output = 0;
1034 		break;
1035 	case SCTP_TIMER_TYPE_INPKILL:
1036 		/* special case, take away our
1037 		 * increment since WE are the killer
1038 		 */
1039 		SCTP_INP_WLOCK(inp);
1040 		SCTP_INP_DECR_REF(inp);
1041 		SCTP_INP_WUNLOCK(inp);
1042 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1043 		sctp_inpcb_free(inp, 1);
1044 		goto out_no_decr;
1045 		break;
1046 	default:
1047 #ifdef SCTP_DEBUG
1048 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1049 			printf("sctp_timeout_handler:unknown timer %d\n",
1050 			       tmr->type);
1051 		}
1052 #endif /* SCTP_DEBUG */
1053 		break;
1054 	};
1055 #ifdef SCTP_AUDITING_ENABLED
1056 	sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1057 	sctp_auditing(5, inp, stcb, net);
1058 #endif
1059 	if (did_output) {
1060 		/*
1061 		 * Now we need to clean up the control chunk chain if an
1062 		 * ECNE is on it. It must be marked as UNSENT again so next
1063 		 * call will continue to send it until such time that we get
1064 		 * a CWR, to remove it. It is, however, less likely that we
1065 		 * will find a ecn echo on the chain though.
1066 		 */
1067 		sctp_fix_ecn_echo(&stcb->asoc);
1068 	}
1069 	if (stcb) {
1070 		SCTP_TCB_UNLOCK(stcb);
1071 	}
1072  out_decr:
1073 	SCTP_INP_WLOCK(inp);
1074 	SCTP_INP_DECR_REF(inp);
1075 	SCTP_INP_WUNLOCK(inp);
1076 
1077  out_no_decr:
1078 
1079 	mutex_exit(softnet_lock);
1080 }
1081 
1082 int
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1083 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1084     struct sctp_nets *net)
1085 {
1086 	int to_ticks;
1087 	struct sctp_timer *tmr;
1088 
1089 	if (inp == NULL)
1090 		return (EFAULT);
1091 
1092 	to_ticks = 0;
1093 
1094 	tmr = NULL;
1095 	switch (t_type) {
1096 	case SCTP_TIMER_TYPE_ITERATOR:
1097 	{
1098 		struct sctp_iterator *it;
1099 		it = (struct sctp_iterator *)inp;
1100 		tmr = &it->tmr;
1101 		to_ticks = SCTP_ITERATOR_TICKS;
1102 	}
1103 	break;
1104 	case SCTP_TIMER_TYPE_SEND:
1105 		/* Here we use the RTO timer */
1106 	{
1107 		int rto_val;
1108 		if ((stcb == NULL) || (net == NULL)) {
1109 			return (EFAULT);
1110 		}
1111 		tmr = &net->rxt_timer;
1112 		if (net->RTO == 0) {
1113 			rto_val = stcb->asoc.initial_rto;
1114 		} else {
1115 			rto_val = net->RTO;
1116 		}
1117 		to_ticks = MSEC_TO_TICKS(rto_val);
1118 	}
1119 	break;
1120 	case SCTP_TIMER_TYPE_INIT:
1121 		/*
1122 		 * Here we use the INIT timer default
1123 		 * usually about 1 minute.
1124 		 */
1125 		if ((stcb == NULL) || (net == NULL)) {
1126 			return (EFAULT);
1127 		}
1128 		tmr = &net->rxt_timer;
1129 		if (net->RTO == 0) {
1130 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1131 		} else {
1132 			to_ticks = MSEC_TO_TICKS(net->RTO);
1133 		}
1134 		break;
1135 	case SCTP_TIMER_TYPE_RECV:
1136 		/*
1137 		 * Here we use the Delayed-Ack timer value from the inp
1138 		 * ususually about 200ms.
1139 		 */
1140 		if (stcb == NULL) {
1141 			return (EFAULT);
1142 		}
1143 		tmr = &stcb->asoc.dack_timer;
1144 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1145 		break;
1146 	case SCTP_TIMER_TYPE_SHUTDOWN:
1147 		/* Here we use the RTO of the destination. */
1148 		if ((stcb == NULL) || (net == NULL)) {
1149 			return (EFAULT);
1150 		}
1151 
1152 		if (net->RTO == 0) {
1153 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1154 		} else {
1155 			to_ticks = MSEC_TO_TICKS(net->RTO);
1156 		}
1157 		tmr = &net->rxt_timer;
1158 		break;
1159 	case SCTP_TIMER_TYPE_HEARTBEAT:
1160 		/*
1161 		 * the net is used here so that we can add in the RTO.
1162 		 * Even though we use a different timer. We also add the
1163 		 * HB timer PLUS a random jitter.
1164 		 */
1165 		if (stcb == NULL) {
1166 			return (EFAULT);
1167 		}
1168 		{
1169 			uint32_t rndval;
1170 			uint8_t this_random;
1171 			int cnt_of_unconf=0;
1172 			struct sctp_nets *lnet;
1173 
1174 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1175 				if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1176 					cnt_of_unconf++;
1177 				}
1178 			}
1179 #ifdef SCTP_DEBUG
1180 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1181 				printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1182 				       cnt_of_unconf, stcb->asoc.heart_beat_delay);
1183 			}
1184 #endif
1185 			if (stcb->asoc.hb_random_idx > 3) {
1186 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1187 				memcpy(stcb->asoc.hb_random_values, &rndval,
1188 				       sizeof(stcb->asoc.hb_random_values));
1189 				this_random = stcb->asoc.hb_random_values[0];
1190 				stcb->asoc.hb_random_idx = 0;
1191 				stcb->asoc.hb_ect_randombit = 0;
1192 			} else {
1193 				this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1194 				stcb->asoc.hb_random_idx++;
1195 				stcb->asoc.hb_ect_randombit = 0;
1196 			}
1197 			/*
1198 			 * this_random will be 0 - 256 ms
1199 			 * RTO is in ms.
1200 			 */
1201 			if ((stcb->asoc.heart_beat_delay == 0) &&
1202 			    (cnt_of_unconf == 0)) {
1203 				/* no HB on this inp after confirmations */
1204 				return (0);
1205 			}
1206 			if (net) {
1207 				int delay;
1208 				delay = stcb->asoc.heart_beat_delay;
1209 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1210 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1211 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1212 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1213 					    delay = 0;
1214 					}
1215 				}
1216 				if (net->RTO == 0) {
1217 					/* Never been checked */
1218 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
1219 				} else {
1220 					/* set rto_val to the ms */
1221 					to_ticks = delay + net->RTO + this_random;
1222 				}
1223 			} else {
1224 				if (cnt_of_unconf) {
1225 					to_ticks = this_random + stcb->asoc.initial_rto;
1226 				} else {
1227 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1228 				}
1229 			}
1230 			/*
1231 			 * Now we must convert the to_ticks that are now in
1232 			 * ms to ticks.
1233 			 */
1234 			to_ticks *= hz;
1235 			to_ticks /= 1000;
1236 #ifdef SCTP_DEBUG
1237 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1238 				printf("Timer to expire in %d ticks\n", to_ticks);
1239 			}
1240 #endif
1241 			tmr = &stcb->asoc.hb_timer;
1242 		}
1243 		break;
1244 	case SCTP_TIMER_TYPE_COOKIE:
1245 		/*
1246 		 * Here we can use the RTO timer from the network since
1247 		 * one RTT was compelete. If a retran happened then we will
1248 		 * be using the RTO initial value.
1249 		 */
1250 		if ((stcb == NULL) || (net == NULL)) {
1251 			return (EFAULT);
1252 		}
1253 		if (net->RTO == 0) {
1254 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1255 		} else {
1256 			to_ticks = MSEC_TO_TICKS(net->RTO);
1257 		}
1258 		tmr = &net->rxt_timer;
1259 		break;
1260 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1261 		/*
1262 		 * nothing needed but the endpoint here
1263 		 * ususually about 60 minutes.
1264 		 */
1265 		tmr = &inp->sctp_ep.signature_change;
1266 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1267 		break;
1268 	case SCTP_TIMER_TYPE_INPKILL:
1269 		/*
1270 		 * The inp is setup to die. We re-use the
1271 		 * signature_change timer since that has
1272 		 * stopped and we are in the GONE state.
1273 		 */
1274 		tmr = &inp->sctp_ep.signature_change;
1275 		to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1276 		break;
1277 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1278 		/*
1279 		 * Here we use the value found in the EP for PMTU
1280 		 * ususually about 10 minutes.
1281 		 */
1282 		if (stcb == NULL) {
1283 			return (EFAULT);
1284 		}
1285 		if (net == NULL) {
1286 			return (EFAULT);
1287 		}
1288 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1289 		tmr = &net->pmtu_timer;
1290 		break;
1291 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1292 		/* Here we use the RTO of the destination */
1293 		if ((stcb == NULL) || (net == NULL)) {
1294 			return (EFAULT);
1295 		}
1296 		if (net->RTO == 0) {
1297 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1298 		} else {
1299 			to_ticks = MSEC_TO_TICKS(net->RTO);
1300 		}
1301 		tmr = &net->rxt_timer;
1302 		break;
1303 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1304 		/*
1305 		 * Here we use the endpoints shutdown guard timer
1306 		 * usually about 3 minutes.
1307 		 */
1308 		if (stcb == NULL) {
1309 			return (EFAULT);
1310 		}
1311 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1312 		tmr = &stcb->asoc.shut_guard_timer;
1313 		break;
1314 	case SCTP_TIMER_TYPE_STRRESET:
1315 		/*
1316 		 * Here the timer comes from the inp
1317 		 * but its value is from the RTO.
1318 		 */
1319 		if ((stcb == NULL) || (net == NULL)) {
1320 			return (EFAULT);
1321 		}
1322 		if (net->RTO == 0) {
1323 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1324 		} else {
1325 			to_ticks = MSEC_TO_TICKS(net->RTO);
1326 		}
1327 		tmr = &stcb->asoc.strreset_timer;
1328 		break;
1329 
1330 	case SCTP_TIMER_TYPE_ASCONF:
1331 		/*
1332 		 * Here the timer comes from the inp
1333 		 * but its value is from the RTO.
1334 		 */
1335 		if ((stcb == NULL) || (net == NULL)) {
1336 			return (EFAULT);
1337 		}
1338 		if (net->RTO == 0) {
1339 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1340 		} else {
1341 			to_ticks = MSEC_TO_TICKS(net->RTO);
1342 		}
1343 		tmr = &stcb->asoc.asconf_timer;
1344 		break;
1345 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1346 		if (stcb == NULL) {
1347 			return (EFAULT);
1348 		}
1349 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
1350 			/* Really an error since stcb is NOT set to autoclose */
1351 			return (0);
1352 		}
1353 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
1354 		tmr = &stcb->asoc.autoclose_timer;
1355 		break;
1356 	default:
1357 #ifdef SCTP_DEBUG
1358 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1359 			printf("sctp_timer_start:Unknown timer type %d\n",
1360 			       t_type);
1361 		}
1362 #endif /* SCTP_DEBUG */
1363 		return (EFAULT);
1364 		break;
1365 	};
1366 	if ((to_ticks <= 0) || (tmr == NULL)) {
1367 #ifdef SCTP_DEBUG
1368 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1369 			printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1370 			       t_type, to_ticks, tmr);
1371 		}
1372 #endif /* SCTP_DEBUG */
1373 		return (EFAULT);
1374 	}
1375 	if (callout_pending(&tmr->timer)) {
1376 		/*
1377 		 * we do NOT allow you to have it already running.
1378 		 * if it is we leave the current one up unchanged
1379 		 */
1380 		return (EALREADY);
1381 	}
1382 	/* At this point we can proceed */
1383 	if (t_type == SCTP_TIMER_TYPE_SEND) {
1384 		stcb->asoc.num_send_timers_up++;
1385 	}
1386 	tmr->type = t_type;
1387 	tmr->ep = (void *)inp;
1388 	tmr->tcb = (void *)stcb;
1389 	tmr->net = (void *)net;
1390 	callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1391 	return (0);
1392 }
1393 
1394 int
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1395 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1396 		struct sctp_nets *net)
1397 {
1398 	struct sctp_timer *tmr;
1399 
1400 	if (inp == NULL)
1401 		return (EFAULT);
1402 
1403 	tmr = NULL;
1404 	switch (t_type) {
1405 	case SCTP_TIMER_TYPE_ITERATOR:
1406 	{
1407 		struct sctp_iterator *it;
1408 		it = (struct sctp_iterator *)inp;
1409 		tmr = &it->tmr;
1410 	}
1411 	break;
1412 	case SCTP_TIMER_TYPE_SEND:
1413 		if ((stcb == NULL) || (net == NULL)) {
1414 			return (EFAULT);
1415 		}
1416 		tmr = &net->rxt_timer;
1417 		break;
1418 	case SCTP_TIMER_TYPE_INIT:
1419 		if ((stcb == NULL) || (net == NULL)) {
1420 			return (EFAULT);
1421 		}
1422 		tmr = &net->rxt_timer;
1423 		break;
1424 	case SCTP_TIMER_TYPE_RECV:
1425 		if (stcb == NULL) {
1426 			return (EFAULT);
1427 		}
1428 		tmr = &stcb->asoc.dack_timer;
1429 		break;
1430 	case SCTP_TIMER_TYPE_SHUTDOWN:
1431 		if ((stcb == NULL) || (net == NULL)) {
1432 			return (EFAULT);
1433 		}
1434 		tmr = &net->rxt_timer;
1435 		break;
1436 	case SCTP_TIMER_TYPE_HEARTBEAT:
1437 		if (stcb == NULL) {
1438 			return (EFAULT);
1439 		}
1440 		tmr = &stcb->asoc.hb_timer;
1441 		break;
1442 	case SCTP_TIMER_TYPE_COOKIE:
1443 		if ((stcb == NULL) || (net == NULL)) {
1444 			return (EFAULT);
1445 		}
1446 		tmr = &net->rxt_timer;
1447 		break;
1448 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1449 		/* nothing needed but the endpoint here */
1450 		tmr = &inp->sctp_ep.signature_change;
1451 		/* We re-use the newcookie timer for
1452 		 * the INP kill timer. We must assure
1453 		 * that we do not kill it by accident.
1454 		 */
1455 		break;
1456 	case SCTP_TIMER_TYPE_INPKILL:
1457 		/*
1458 		 * The inp is setup to die. We re-use the
1459 		 * signature_change timer since that has
1460 		 * stopped and we are in the GONE state.
1461 		 */
1462 		tmr = &inp->sctp_ep.signature_change;
1463 		break;
1464 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1465 		if (stcb == NULL) {
1466 			return (EFAULT);
1467 		}
1468 		if (net == NULL) {
1469 			return (EFAULT);
1470 		}
1471 		tmr = &net->pmtu_timer;
1472 		break;
1473 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1474 		if ((stcb == NULL) || (net == NULL)) {
1475 			return (EFAULT);
1476 		}
1477 		tmr = &net->rxt_timer;
1478 		break;
1479 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1480 		if (stcb == NULL) {
1481 			return (EFAULT);
1482 		}
1483 		tmr = &stcb->asoc.shut_guard_timer;
1484 		break;
1485 	case SCTP_TIMER_TYPE_STRRESET:
1486 		if (stcb == NULL) {
1487 			return (EFAULT);
1488 		}
1489 		tmr = &stcb->asoc.strreset_timer;
1490 		break;
1491 	case SCTP_TIMER_TYPE_ASCONF:
1492 		if (stcb == NULL) {
1493 			return (EFAULT);
1494 		}
1495 		tmr = &stcb->asoc.asconf_timer;
1496 		break;
1497 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1498 		if (stcb == NULL) {
1499 			return (EFAULT);
1500 		}
1501 		tmr = &stcb->asoc.autoclose_timer;
1502 		break;
1503 	default:
1504 #ifdef SCTP_DEBUG
1505 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1506 			printf("sctp_timer_stop:Unknown timer type %d\n",
1507 			       t_type);
1508 		}
1509 #endif /* SCTP_DEBUG */
1510 		break;
1511 	};
1512 	if (tmr == NULL)
1513 		return (EFAULT);
1514 
1515 	if ((tmr->type != t_type) && tmr->type) {
1516 		/*
1517 		 * Ok we have a timer that is under joint use. Cookie timer
1518 		 * per chance with the SEND timer. We therefore are NOT
1519 		 * running the timer that the caller wants stopped.  So just
1520 		 * return.
1521 		 */
1522 		return (0);
1523 	}
1524 	if (t_type == SCTP_TIMER_TYPE_SEND) {
1525 		stcb->asoc.num_send_timers_up--;
1526 		if (stcb->asoc.num_send_timers_up < 0) {
1527 			stcb->asoc.num_send_timers_up = 0;
1528 		}
1529 	}
1530 	callout_stop(&tmr->timer);
1531 	return (0);
1532 }
1533 
1534 u_int32_t
sctp_calculate_len(struct mbuf * m)1535 sctp_calculate_len(struct mbuf *m)
1536 {
1537 	u_int32_t tlen=0;
1538 	struct mbuf *at;
1539 	at = m;
1540 	while (at) {
1541 		tlen += at->m_len;
1542 		at = at->m_next;
1543 	}
1544 	return (tlen);
1545 }
1546 
1547 uint32_t
sctp_calculate_sum(struct mbuf * m,int32_t * pktlen,uint32_t offset)1548 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1549 {
1550 	/*
1551 	 * given a mbuf chain with a packetheader offset by 'offset'
1552 	 * pointing at a sctphdr (with csum set to 0) go through
1553 	 * the chain of m_next's and calculate the SCTP checksum.
1554 	 * This is CRC32c.
1555 	 * Also has a side bonus calculate the total length
1556 	 * of the mbuf chain.
1557 	 * Note: if offset is greater than the total mbuf length,
1558 	 * checksum=1, pktlen=0 is returned (ie. no real error code)
1559 	 */
1560 	int32_t tlen=0;
1561 	uint32_t base = 0xffffffff;
1562 	struct mbuf *at;
1563 	at = m;
1564 	/* find the correct mbuf and offset into mbuf */
1565 	while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1566 		offset -= at->m_len;	/* update remaining offset left */
1567 		at = at->m_next;
1568 	}
1569 
1570 	while (at != NULL) {
1571 		base = update_crc32(base, at->m_data + offset,
1572 		    at->m_len - offset);
1573 		tlen += at->m_len - offset;
1574 		/* we only offset once into the first mbuf */
1575 		if (offset) {
1576 			offset = 0;
1577 		}
1578 		at = at->m_next;
1579 	}
1580 	if (pktlen != NULL) {
1581 		*pktlen = tlen;
1582 	}
1583 	/* CRC-32c */
1584 	base = sctp_csum_finalize(base);
1585 	return (base);
1586 }
1587 
1588 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,u_long mtu)1589 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1590 		    struct sctp_association *asoc, u_long mtu)
1591 {
1592 	/*
1593 	 * Reset the P-MTU size on this association, this involves changing
1594 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1595 	 * to allow the DF flag to be cleared.
1596 	 */
1597 	struct sctp_tmit_chunk *chk;
1598 	struct sctp_stream_out *strm;
1599 	unsigned int eff_mtu, ovh;
1600 	asoc->smallest_mtu = mtu;
1601 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1602 		ovh = SCTP_MIN_OVERHEAD;
1603 	} else {
1604 		ovh = SCTP_MIN_V4_OVERHEAD;
1605 	}
1606 	eff_mtu = mtu - ovh;
1607 	/* Now mark any chunks that need to let IP fragment */
1608 	TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1609 		TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1610 			if (chk->send_size > eff_mtu) {
1611 				chk->flags &= SCTP_DONT_FRAGMENT;
1612 				chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1613 			}
1614 		}
1615 	}
1616 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1617 		if (chk->send_size > eff_mtu) {
1618 			chk->flags &= SCTP_DONT_FRAGMENT;
1619 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1620 		}
1621 	}
1622 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1623 		if (chk->send_size > eff_mtu) {
1624 			chk->flags &= SCTP_DONT_FRAGMENT;
1625 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1626 		}
1627 	}
1628 }
1629 
1630 
1631 /*
1632  * given an association and starting time of the current RTT period
1633  * return RTO in number of usecs
1634  * net should point to the current network
1635  */
1636 u_int32_t
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * old)1637 sctp_calculate_rto(struct sctp_tcb *stcb,
1638 		   struct sctp_association *asoc,
1639 		   struct sctp_nets *net,
1640 		   struct timeval *old)
1641 {
1642 	/*
1643 	 * given an association and the starting time of the current RTT
1644 	 * period (in value1/value2) return RTO in number of usecs.
1645 	 */
1646 	int calc_time = 0;
1647 	unsigned int new_rto = 0;
1648 	int first_measure = 0;
1649 	struct timeval now;
1650 
1651 	/************************/
1652 	/* 1. calculate new RTT */
1653 	/************************/
1654 	/* get the current time */
1655 	SCTP_GETTIME_TIMEVAL(&now);
1656 	/* compute the RTT value */
1657 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1658 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1659 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1660 			calc_time += (((u_long)now.tv_usec -
1661 				       (u_long)old->tv_usec)/1000);
1662 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1663 			/* Borrow 1,000ms from current calculation */
1664 			calc_time -= 1000;
1665 			/* Add in the slop over */
1666 			calc_time += ((int)now.tv_usec/1000);
1667 			/* Add in the pre-second ms's */
1668 			calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1669 		}
1670 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1671 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1672 			calc_time = ((u_long)now.tv_usec -
1673 				     (u_long)old->tv_usec)/1000;
1674 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1675 			/* impossible .. garbage in nothing out */
1676 			return (((net->lastsa >> 2) + net->lastsv) >> 1);
1677 		} else {
1678 			/* impossible .. garbage in nothing out */
1679 			return (((net->lastsa >> 2) + net->lastsv) >> 1);
1680 		}
1681 	} else {
1682 		/* Clock wrapped? */
1683 		return (((net->lastsa >> 2) + net->lastsv) >> 1);
1684 	}
1685 	/***************************/
1686 	/* 2. update RTTVAR & SRTT */
1687 	/***************************/
1688 #if 0
1689 	/*	if (net->lastsv || net->lastsa) {*/
1690 	/* per Section 5.3.1 C3 in SCTP */
1691 	/*		net->lastsv = (int) 	*//* RTTVAR */
1692 	/*			(((double)(1.0 - 0.25) * (double)net->lastsv) +
1693 				(double)(0.25 * (double)abs(net->lastsa - calc_time)));
1694 				net->lastsa = (int) */	/* SRTT */
1695 	/*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1696 	  (double)(0.125 * (double)calc_time));
1697 	  } else {
1698 	*//* the first RTT calculation, per C2 Section 5.3.1 */
1699 	/*		net->lastsa = calc_time;	*//* SRTT */
1700 	/*		net->lastsv = calc_time / 2;	*//* RTTVAR */
1701 	/*	}*/
1702 	/* if RTTVAR goes to 0 you set to clock grainularity */
1703 	/*	if (net->lastsv == 0) {
1704 		net->lastsv = SCTP_CLOCK_GRANULARITY;
1705 		}
1706 		new_rto = net->lastsa + 4 * net->lastsv;
1707 	*/
1708 #endif
1709 	/* this is Van Jacobson's integer version */
1710 	if (net->RTO) {
1711 		calc_time -= (net->lastsa >> 3);
1712 		net->lastsa += calc_time;
1713 		if (calc_time < 0) {
1714 			calc_time = -calc_time;
1715 		}
1716 		calc_time -= (net->lastsv >> 2);
1717 		net->lastsv += calc_time;
1718 		if (net->lastsv == 0) {
1719 			net->lastsv = SCTP_CLOCK_GRANULARITY;
1720 		}
1721 	} else {
1722 		/* First RTO measurement */
1723 		net->lastsa = calc_time;
1724 		net->lastsv = calc_time >> 1;
1725 		first_measure = 1;
1726 	}
1727 	new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1728 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1729 	    (stcb->asoc.sat_network_lockout == 0)) {
1730 		stcb->asoc.sat_network = 1;
1731 	} else 	if ((!first_measure) && stcb->asoc.sat_network) {
1732 		stcb->asoc.sat_network = 0;
1733 		stcb->asoc.sat_network_lockout = 1;
1734 	}
1735 	/* bound it, per C6/C7 in Section 5.3.1 */
1736 	if (new_rto < stcb->asoc.minrto) {
1737 		new_rto = stcb->asoc.minrto;
1738 	}
1739 	if (new_rto > stcb->asoc.maxrto) {
1740 		new_rto = stcb->asoc.maxrto;
1741 	}
1742 	/* we are now returning the RTT Smoothed */
1743 	return ((u_int32_t)new_rto);
1744 }
1745 
1746 
1747 /*
1748  * return a pointer to a contiguous piece of data from the given
1749  * mbuf chain starting at 'off' for 'len' bytes.  If the desired
1750  * piece spans more than one mbuf, a copy is made at 'ptr'.
1751  * caller must ensure that the buffer size is >= 'len'
1752  * returns NULL if there there isn't 'len' bytes in the chain.
1753  */
1754 void *
sctp_m_getptr(struct mbuf * m,int off,int len,u_int8_t * in_ptr)1755 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1756 {
1757 	uint32_t count;
1758 	uint8_t *ptr;
1759 	ptr = in_ptr;
1760 	if ((off < 0) || (len <= 0))
1761 		return (NULL);
1762 
1763 	/* find the desired start location */
1764 	while ((m != NULL) && (off > 0)) {
1765 		if (off < m->m_len)
1766 			break;
1767 		off -= m->m_len;
1768 		m = m->m_next;
1769 	}
1770 	if (m == NULL)
1771 		return (NULL);
1772 
1773 	/* is the current mbuf large enough (eg. contiguous)? */
1774 	if ((m->m_len - off) >= len) {
1775 		return ((void *)(mtod(m, vaddr_t) + off));
1776 	} else {
1777 		/* else, it spans more than one mbuf, so save a temp copy... */
1778 		while ((m != NULL) && (len > 0)) {
1779 			count = uimin(m->m_len - off, len);
1780 			memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
1781 			len -= count;
1782 			ptr += count;
1783 			off = 0;
1784 			m = m->m_next;
1785 		}
1786 		if ((m == NULL) && (len > 0))
1787 			return (NULL);
1788 		else
1789 			return ((void *)in_ptr);
1790 	}
1791 }
1792 
1793 
1794 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)1795 sctp_get_next_param(struct mbuf *m,
1796 		    int offset,
1797 		    struct sctp_paramhdr *pull,
1798 		    int pull_limit)
1799 {
1800 	/* This just provides a typed signature to Peter's Pull routine */
1801 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
1802     	    (u_int8_t *)pull));
1803 }
1804 
1805 
1806 int
sctp_add_pad_tombuf(struct mbuf * m,int padlen)1807 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
1808 {
1809 	/*
1810 	 * add padlen bytes of 0 filled padding to the end of the mbuf.
1811 	 * If padlen is > 3 this routine will fail.
1812 	 */
1813 	u_int8_t *dp;
1814 	int i;
1815 	if (padlen > 3) {
1816 		return (ENOBUFS);
1817 	}
1818 	if (M_TRAILINGSPACE(m)) {
1819 		/*
1820 		 * The easy way.
1821 		 * We hope the majority of the time we hit here :)
1822 		 */
1823 		dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
1824 		m->m_len += padlen;
1825 	} else {
1826 		/* Hard way we must grow the mbuf */
1827 		struct mbuf *tmp;
1828 		MGET(tmp, M_DONTWAIT, MT_DATA);
1829 		if (tmp == NULL) {
1830 			/* Out of space GAK! we are in big trouble. */
1831 			return (ENOSPC);
1832 		}
1833 		/* setup and insert in middle */
1834 		tmp->m_next = m->m_next;
1835 		tmp->m_len = padlen;
1836 		m->m_next = tmp;
1837 		dp = mtod(tmp, u_int8_t *);
1838 	}
1839 	/* zero out the pad */
1840 	for (i=  0; i < padlen; i++) {
1841 		*dp = 0;
1842 		dp++;
1843 	}
1844 	return (0);
1845 }
1846 
1847 int
sctp_pad_lastmbuf(struct mbuf * m,int padval)1848 sctp_pad_lastmbuf(struct mbuf *m, int padval)
1849 {
1850 	/* find the last mbuf in chain and pad it */
1851 	struct mbuf *m_at;
1852 	m_at = m;
1853 	while (m_at) {
1854 		if (m_at->m_next == NULL) {
1855 			return (sctp_add_pad_tombuf(m_at, padval));
1856 		}
1857 		m_at = m_at->m_next;
1858 	}
1859 	return (EFAULT);
1860 }
1861 
1862 static void
sctp_notify_assoc_change(u_int32_t event,struct sctp_tcb * stcb,u_int32_t error)1863 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
1864     u_int32_t error)
1865 {
1866 	struct mbuf *m_notify;
1867 	struct sctp_assoc_change *sac;
1868 	const struct sockaddr *to;
1869 	struct sockaddr_in6 sin6, lsa6;
1870 
1871 #ifdef SCTP_DEBUG
1872 	printf("notify: %d\n", event);
1873 #endif
1874 	/*
1875 	 * First if we are going down dump everything we
1876 	 * can to the socket rcv queue.
1877 	 */
1878 	if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
1879 		sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
1880 	}
1881 
1882 	/*
1883 	 * For TCP model AND UDP connected sockets we will send
1884 	 * an error up when an ABORT comes in.
1885 	 */
1886 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1887 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1888 	    (event == SCTP_COMM_LOST)) {
1889 		stcb->sctp_socket->so_error = ECONNRESET;
1890 		/* Wake ANY sleepers */
1891 		sowwakeup(stcb->sctp_socket);
1892 		sorwakeup(stcb->sctp_socket);
1893 	}
1894 #if 0
1895 	if ((event == SCTP_COMM_UP) &&
1896 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1897  	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1898 		 soisconnected(stcb->sctp_socket);
1899 	}
1900 #endif
1901 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
1902 		/* event not enabled */
1903 		return;
1904 	}
1905 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
1906 	if (m_notify == NULL)
1907 		/* no space left */
1908 		return;
1909 	m_notify->m_len = 0;
1910 
1911 	sac = mtod(m_notify, struct sctp_assoc_change *);
1912 	sac->sac_type = SCTP_ASSOC_CHANGE;
1913 	sac->sac_flags = 0;
1914 	sac->sac_length = sizeof(struct sctp_assoc_change);
1915 	sac->sac_state = event;
1916 	sac->sac_error = error;
1917 	/* XXX verify these stream counts */
1918 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
1919 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
1920 	sac->sac_assoc_id = sctp_get_associd(stcb);
1921 
1922 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
1923 	m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
1924 	m_reset_rcvif(m_notify);
1925 	m_notify->m_len = sizeof(struct sctp_assoc_change);
1926 	m_notify->m_next = NULL;
1927 
1928 	/* append to socket */
1929 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
1930 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1931 	    to->sa_family == AF_INET) {
1932 		const struct sockaddr_in *sin;
1933 
1934 		sin = (const struct sockaddr_in *)to;
1935 		in6_sin_2_v4mapsin6(sin, &sin6);
1936 		to = (struct sockaddr *)&sin6;
1937 	}
1938 	/* check and strip embedded scope junk */
1939 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
1940 						   &lsa6);
1941 	/*
1942 	 * We need to always notify comm changes.
1943 	 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
1944 	 * 	sctp_m_freem(m_notify);
1945 	 *	return;
1946 	 * }
1947 	*/
1948 	SCTP_TCB_UNLOCK(stcb);
1949 	SCTP_INP_WLOCK(stcb->sctp_ep);
1950 	SCTP_TCB_LOCK(stcb);
1951 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
1952 	    to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
1953 		/* not enough room */
1954 		sctp_m_freem(m_notify);
1955 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1956 		return;
1957 	}
1958 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
1959 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
1960 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
1961 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1962 		}
1963 	} else {
1964 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1965 	}
1966 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
1967 	/* Wake up any sleeper */
1968 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1969 	sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
1970 }
1971 
1972 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,const struct sockaddr * sa,uint32_t error)1973 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
1974     const struct sockaddr *sa, uint32_t error)
1975 {
1976 	struct mbuf *m_notify;
1977 	struct sctp_paddr_change *spc;
1978 	const struct sockaddr *to;
1979 	struct sockaddr_in6 sin6, lsa6;
1980 
1981 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
1982 		/* event not enabled */
1983 		return;
1984 
1985 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
1986 	if (m_notify == NULL)
1987 		return;
1988 	m_notify->m_len = 0;
1989 
1990 	MCLGET(m_notify, M_DONTWAIT);
1991 	if ((m_notify->m_flags & M_EXT) != M_EXT) {
1992 		sctp_m_freem(m_notify);
1993 		return;
1994 	}
1995 
1996 	spc = mtod(m_notify, struct sctp_paddr_change *);
1997 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
1998 	spc->spc_flags = 0;
1999 	spc->spc_length = sizeof(struct sctp_paddr_change);
2000 	if (sa->sa_family == AF_INET) {
2001 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2002 	} else {
2003 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2004 	}
2005 	spc->spc_state = state;
2006 	spc->spc_error = error;
2007 	spc->spc_assoc_id = sctp_get_associd(stcb);
2008 
2009 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2010 	m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2011 	m_reset_rcvif(m_notify);
2012 	m_notify->m_len = sizeof(struct sctp_paddr_change);
2013 	m_notify->m_next = NULL;
2014 
2015 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2016 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2017 	    to->sa_family == AF_INET) {
2018 		const struct sockaddr_in *sin;
2019 
2020 		sin = (const struct sockaddr_in *)to;
2021 		in6_sin_2_v4mapsin6(sin, &sin6);
2022 		to = (struct sockaddr *)&sin6;
2023 	}
2024 	/* check and strip embedded scope junk */
2025 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2026 	    &lsa6);
2027 
2028 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2029 		sctp_m_freem(m_notify);
2030 		return;
2031 	}
2032 	/* append to socket */
2033 	SCTP_TCB_UNLOCK(stcb);
2034 	SCTP_INP_WLOCK(stcb->sctp_ep);
2035 	SCTP_TCB_LOCK(stcb);
2036 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2037 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2038 		/* not enough room */
2039 		sctp_m_freem(m_notify);
2040 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2041 		return;
2042 	}
2043 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2044 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2045 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2046 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2047 		}
2048 	} else {
2049 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2050 	}
2051 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2052 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2053 }
2054 
2055 
2056 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,u_int32_t error,struct sctp_tmit_chunk * chk)2057 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2058 			struct sctp_tmit_chunk *chk)
2059 {
2060 	struct mbuf *m_notify;
2061 	struct sctp_send_failed *ssf;
2062 	struct sockaddr_in6 sin6, lsa6;
2063 	const struct sockaddr *to;
2064 	int length;
2065 
2066 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2067 		/* event not enabled */
2068 		return;
2069 
2070 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2071 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2072 	if (m_notify == NULL)
2073 		/* no space left */
2074 		return;
2075 	m_notify->m_len = 0;
2076 	ssf = mtod(m_notify, struct sctp_send_failed *);
2077 	ssf->ssf_type = SCTP_SEND_FAILED;
2078 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2079 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2080 	else
2081 		ssf->ssf_flags = SCTP_DATA_SENT;
2082 	ssf->ssf_length = length;
2083 	ssf->ssf_error = error;
2084 	/* not exactly what the user sent in, but should be close :) */
2085 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2086 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2087 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2088 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2089 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2090 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2091 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2092 	m_notify->m_next = chk->data;
2093 	if (m_notify->m_next == NULL)
2094 		m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2095 	else {
2096 		struct mbuf *m;
2097 		m_notify->m_flags |= M_NOTIFICATION;
2098 		m = m_notify;
2099 		while (m->m_next != NULL)
2100 			m = m->m_next;
2101 		m->m_flags |= M_EOR;
2102 	}
2103 	m_notify->m_pkthdr.len = length;
2104 	m_reset_rcvif(m_notify);
2105 	m_notify->m_len = sizeof(struct sctp_send_failed);
2106 
2107 	/* Steal off the mbuf */
2108 	chk->data = NULL;
2109 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2110 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2111 	    to->sa_family == AF_INET) {
2112 		const struct sockaddr_in *sin;
2113 
2114 		sin = satocsin(to);
2115 		in6_sin_2_v4mapsin6(sin, &sin6);
2116 		to = (struct sockaddr *)&sin6;
2117 	}
2118 	/* check and strip embedded scope junk */
2119 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2120 						   &lsa6);
2121 
2122 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2123 		sctp_m_freem(m_notify);
2124 		return;
2125 	}
2126 
2127 	/* append to socket */
2128 	SCTP_TCB_UNLOCK(stcb);
2129 	SCTP_INP_WLOCK(stcb->sctp_ep);
2130 	SCTP_TCB_LOCK(stcb);
2131 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2132 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2133 		/* not enough room */
2134 		sctp_m_freem(m_notify);
2135 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2136 		return;
2137 	}
2138 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2139 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2140 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2141 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2142 		}
2143 	} else {
2144 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2145 	}
2146 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2147 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2148 }
2149 
2150 static void
sctp_notify_adaption_layer(struct sctp_tcb * stcb,u_int32_t error)2151 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2152 			   u_int32_t error)
2153 {
2154 	struct mbuf *m_notify;
2155 	struct sctp_adaption_event *sai;
2156 	struct sockaddr_in6 sin6, lsa6;
2157 	const struct sockaddr *to;
2158 
2159 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2160 		/* event not enabled */
2161 		return;
2162 
2163 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2164 	if (m_notify == NULL)
2165 		/* no space left */
2166 		return;
2167 	m_notify->m_len = 0;
2168 	sai = mtod(m_notify, struct sctp_adaption_event *);
2169 	sai->sai_type = SCTP_ADAPTION_INDICATION;
2170 	sai->sai_flags = 0;
2171 	sai->sai_length = sizeof(struct sctp_adaption_event);
2172 	sai->sai_adaption_ind = error;
2173 	sai->sai_assoc_id = sctp_get_associd(stcb);
2174 
2175 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2176 	m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2177 	m_reset_rcvif(m_notify);
2178 	m_notify->m_len = sizeof(struct sctp_adaption_event);
2179 	m_notify->m_next = NULL;
2180 
2181 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2182 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2183 	    (to->sa_family == AF_INET)) {
2184 		const struct sockaddr_in *sin;
2185 
2186 		sin = satocsin(to);
2187 		in6_sin_2_v4mapsin6(sin, &sin6);
2188 		to = (struct sockaddr *)&sin6;
2189 	}
2190 	/* check and strip embedded scope junk */
2191 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2192 						   &lsa6);
2193 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2194 		sctp_m_freem(m_notify);
2195 		return;
2196 	}
2197 	/* append to socket */
2198 	SCTP_TCB_UNLOCK(stcb);
2199 	SCTP_INP_WLOCK(stcb->sctp_ep);
2200 	SCTP_TCB_LOCK(stcb);
2201 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2202 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2203 		/* not enough room */
2204 		sctp_m_freem(m_notify);
2205 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2206 		return;
2207 	}
2208 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2209 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2210 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2211 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2212 		}
2213 	} else {
2214 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2215 	}
2216 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2217 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2218 }
2219 
2220 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,u_int32_t error)2221 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2222 					u_int32_t error)
2223 {
2224 	struct mbuf *m_notify;
2225 	struct sctp_pdapi_event *pdapi;
2226 	struct sockaddr_in6 sin6, lsa6;
2227 	const struct sockaddr *to;
2228 
2229 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2230 		/* event not enabled */
2231 		return;
2232 
2233 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2234 	if (m_notify == NULL)
2235 		/* no space left */
2236 		return;
2237 	m_notify->m_len = 0;
2238 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2239 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2240 	pdapi->pdapi_flags = 0;
2241 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2242 	pdapi->pdapi_indication = error;
2243 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2244 
2245 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2246 	m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2247 	m_reset_rcvif(m_notify);
2248 	m_notify->m_len = sizeof(struct sctp_pdapi_event);
2249 	m_notify->m_next = NULL;
2250 
2251 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2252 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2253 	    (to->sa_family == AF_INET)) {
2254 		const struct sockaddr_in *sin;
2255 
2256 		sin = satocsin(to);
2257 		in6_sin_2_v4mapsin6(sin, &sin6);
2258 		to = (struct sockaddr *)&sin6;
2259 	}
2260 	/* check and strip embedded scope junk */
2261 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2262 						   &lsa6);
2263 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2264 		sctp_m_freem(m_notify);
2265 		return;
2266 	}
2267 	/* append to socket */
2268 	SCTP_TCB_UNLOCK(stcb);
2269 	SCTP_INP_WLOCK(stcb->sctp_ep);
2270 	SCTP_TCB_LOCK(stcb);
2271 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2272 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2273 		/* not enough room */
2274 		sctp_m_freem(m_notify);
2275 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2276 		return;
2277 	}
2278 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2279 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2280 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2281 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2282 		}
2283 	} else {
2284 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2285 	}
2286 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2287 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2288 }
2289 
2290 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)2291 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2292 {
2293 	struct mbuf *m_notify;
2294 	struct sctp_shutdown_event *sse;
2295 	struct sockaddr_in6 sin6, lsa6;
2296 	const struct sockaddr *to;
2297 
2298 	/*
2299 	 * For TCP model AND UDP connected sockets we will send
2300 	 * an error up when an SHUTDOWN completes
2301 	 */
2302 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2303 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2304 		/* mark socket closed for read/write and wakeup! */
2305 		socantrcvmore(stcb->sctp_socket);
2306 		socantsendmore(stcb->sctp_socket);
2307 	}
2308 
2309 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2310 		/* event not enabled */
2311 		return;
2312 
2313 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2314 	if (m_notify == NULL)
2315 		/* no space left */
2316 		return;
2317 	m_notify->m_len = 0;
2318 	sse = mtod(m_notify, struct sctp_shutdown_event *);
2319 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
2320 	sse->sse_flags = 0;
2321 	sse->sse_length = sizeof(struct sctp_shutdown_event);
2322 	sse->sse_assoc_id = sctp_get_associd(stcb);
2323 
2324 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2325 	m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2326 	m_reset_rcvif(m_notify);
2327 	m_notify->m_len = sizeof(struct sctp_shutdown_event);
2328 	m_notify->m_next = NULL;
2329 
2330 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2331 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2332 	    to->sa_family == AF_INET) {
2333 		const struct sockaddr_in *sin;
2334 
2335 		sin = satocsin(to);
2336 		in6_sin_2_v4mapsin6(sin, &sin6);
2337 		to = (struct sockaddr *)&sin6;
2338 	}
2339 	/* check and strip embedded scope junk */
2340 	to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2341 	    &lsa6);
2342 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2343 		sctp_m_freem(m_notify);
2344 		return;
2345 	}
2346 	/* append to socket */
2347 	SCTP_TCB_UNLOCK(stcb);
2348 	SCTP_INP_WLOCK(stcb->sctp_ep);
2349 	SCTP_TCB_LOCK(stcb);
2350 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2351 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2352 		/* not enough room */
2353 		sctp_m_freem(m_notify);
2354 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2355 		return;
2356 	}
2357 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2358 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2359 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2360 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2361 		}
2362 	} else {
2363 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2364 	}
2365 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2366 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2367 }
2368 
2369 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)2370 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2371     int number_entries, uint16_t *list, int flag)
2372 {
2373 	struct mbuf *m_notify;
2374 	struct sctp_stream_reset_event *strreset;
2375 	struct sockaddr_in6 sin6, lsa6;
2376 	const struct sockaddr *to;
2377 	int len;
2378 
2379 	if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2380 		/* event not enabled */
2381 		return;
2382 
2383 	MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2384 	if (m_notify == NULL)
2385 		/* no space left */
2386 		return;
2387 	m_notify->m_len = 0;
2388 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2389 	if (len > M_TRAILINGSPACE(m_notify)) {
2390 		MCLGET(m_notify, M_WAIT);
2391 	}
2392 	if (m_notify == NULL)
2393 		/* no clusters */
2394 		return;
2395 
2396 	if (len > M_TRAILINGSPACE(m_notify)) {
2397 		/* never enough room */
2398 		m_freem(m_notify);
2399 		return;
2400 	}
2401 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2402 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2403 	if (number_entries == 0) {
2404 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2405 	} else {
2406 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2407 	}
2408 	strreset->strreset_length = len;
2409 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
2410 	if (number_entries) {
2411 		int i;
2412 		for (i=0; i<number_entries; i++) {
2413 			strreset->strreset_list[i] = list[i];
2414 		}
2415 	}
2416 	m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2417 	m_notify->m_pkthdr.len = len;
2418 	m_reset_rcvif(m_notify);
2419 	m_notify->m_len = len;
2420 	m_notify->m_next = NULL;
2421 	if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2422 		/* no space */
2423 		sctp_m_freem(m_notify);
2424 		return;
2425 	}
2426 	to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2427 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2428 	    to->sa_family == AF_INET) {
2429 		const struct sockaddr_in *sin;
2430 
2431 		sin = satocsin(to);
2432 		in6_sin_2_v4mapsin6(sin, &sin6);
2433 		to = (struct sockaddr *)&sin6;
2434 	}
2435 	/* check and strip embedded scope junk */
2436 	to = (const struct sockaddr *) sctp_recover_scope((const struct sockaddr_in6 *)to,
2437 	    &lsa6);
2438 	/* append to socket */
2439 	SCTP_TCB_UNLOCK(stcb);
2440 	SCTP_INP_WLOCK(stcb->sctp_ep);
2441 	SCTP_TCB_LOCK(stcb);
2442 	if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2443 	    m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2444 		/* not enough room */
2445 		sctp_m_freem(m_notify);
2446 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2447 		return;
2448 	}
2449 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2450 	   ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2451 		if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2452 			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2453 		}
2454 	} else {
2455 		stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2456 	}
2457 	SCTP_INP_WUNLOCK(stcb->sctp_ep);
2458 	sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2459 }
2460 
2461 
2462 void
sctp_ulp_notify(u_int32_t notification,struct sctp_tcb * stcb,u_int32_t error,void * data)2463 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2464 		u_int32_t error, void *data)
2465 {
2466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2467 		/* No notifications up when we are in a no socket state */
2468 		return;
2469 	}
2470 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2471 		/* Can't send up to a closed socket any notifications */
2472 		return;
2473 	}
2474 	switch (notification) {
2475 	case SCTP_NOTIFY_ASSOC_UP:
2476 		sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2477 		break;
2478 	case SCTP_NOTIFY_ASSOC_DOWN:
2479 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2480 		break;
2481 	case SCTP_NOTIFY_INTERFACE_DOWN:
2482 	{
2483 		struct sctp_nets *net;
2484 		net = (struct sctp_nets *)data;
2485 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2486 		    rtcache_getdst(&net->ro), error);
2487 		break;
2488 	}
2489 	case SCTP_NOTIFY_INTERFACE_UP:
2490 	{
2491 		struct sctp_nets *net;
2492 		net = (struct sctp_nets *)data;
2493 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2494 		    rtcache_getdst(&net->ro), error);
2495 		break;
2496 	}
2497 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2498 	{
2499 		struct sctp_nets *net;
2500 		net = (struct sctp_nets *)data;
2501 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2502 		    rtcache_getdst(&net->ro), error);
2503 		break;
2504 	}
2505 	case SCTP_NOTIFY_DG_FAIL:
2506 		sctp_notify_send_failed(stcb, error,
2507 		    (struct sctp_tmit_chunk *)data);
2508 		break;
2509 	case SCTP_NOTIFY_ADAPTION_INDICATION:
2510 		/* Here the error is the adaption indication */
2511 		sctp_notify_adaption_layer(stcb, error);
2512 		break;
2513 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2514 		sctp_notify_partial_delivery_indication(stcb, error);
2515 		break;
2516 	case SCTP_NOTIFY_STRDATA_ERR:
2517 		break;
2518 	case SCTP_NOTIFY_ASSOC_ABORTED:
2519 		sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2520 		break;
2521 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
2522 		break;
2523 	case SCTP_NOTIFY_STREAM_OPENED_OK:
2524 		break;
2525 	case SCTP_NOTIFY_ASSOC_RESTART:
2526 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2527 		break;
2528 	case SCTP_NOTIFY_HB_RESP:
2529 		break;
2530 	case SCTP_NOTIFY_STR_RESET_SEND:
2531 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2532 		break;
2533 	case SCTP_NOTIFY_STR_RESET_RECV:
2534 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2535 		break;
2536 	case SCTP_NOTIFY_ASCONF_ADD_IP:
2537 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2538 		    error);
2539 		break;
2540 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
2541 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2542 		    error);
2543 		break;
2544 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2545 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2546 		    error);
2547 		break;
2548 	case SCTP_NOTIFY_ASCONF_SUCCESS:
2549 		break;
2550 	case SCTP_NOTIFY_ASCONF_FAILED:
2551 		break;
2552 	case SCTP_NOTIFY_PEER_SHUTDOWN:
2553 		sctp_notify_shutdown_event(stcb);
2554 		break;
2555 	default:
2556 #ifdef SCTP_DEBUG
2557 		if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2558 			printf("NOTIFY: unknown notification %xh (%u)\n",
2559 			    notification, notification);
2560 		}
2561 #endif /* SCTP_DEBUG */
2562 		break;
2563 	} /* end switch */
2564 }
2565 
2566 void
sctp_report_all_outbound(struct sctp_tcb * stcb)2567 sctp_report_all_outbound(struct sctp_tcb *stcb)
2568 {
2569 	struct sctp_association *asoc;
2570 	struct sctp_stream_out *outs;
2571 	struct sctp_tmit_chunk *chk;
2572 
2573 	asoc = &stcb->asoc;
2574 
2575 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2576 		return;
2577 	}
2578 	/* now through all the gunk freeing chunks */
2579 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2580 		/* now clean up any chunks here */
2581 		chk = TAILQ_FIRST(&outs->outqueue);
2582 		while (chk) {
2583 			stcb->asoc.stream_queue_cnt--;
2584 			TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2585 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2586 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2587 			sctp_m_freem(chk->data);
2588 			chk->data = NULL;
2589 			if (chk->whoTo)
2590 				sctp_free_remote_addr(chk->whoTo);
2591 			chk->whoTo = NULL;
2592 			chk->asoc = NULL;
2593 			/* Free the chunk */
2594 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2595 			sctppcbinfo.ipi_count_chunk--;
2596 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2597 				panic("Chunk count is negative");
2598 			}
2599 			sctppcbinfo.ipi_gencnt_chunk++;
2600 			chk = TAILQ_FIRST(&outs->outqueue);
2601 		}
2602 	}
2603 	/* pending send queue SHOULD be empty */
2604 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
2605 		chk = TAILQ_FIRST(&asoc->send_queue);
2606 		while (chk) {
2607 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2608 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2609 			sctp_m_freem(chk->data);
2610 			chk->data = NULL;
2611 			if (chk->whoTo)
2612 				sctp_free_remote_addr(chk->whoTo);
2613 			chk->whoTo = NULL;
2614 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2615 			sctppcbinfo.ipi_count_chunk--;
2616 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2617 				panic("Chunk count is negative");
2618 			}
2619 			sctppcbinfo.ipi_gencnt_chunk++;
2620 			chk = TAILQ_FIRST(&asoc->send_queue);
2621 		}
2622 	}
2623 	/* sent queue SHOULD be empty */
2624 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2625 		chk = TAILQ_FIRST(&asoc->sent_queue);
2626 		while (chk) {
2627 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2628 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2629 			    SCTP_NOTIFY_DATAGRAM_SENT, chk);
2630 			sctp_m_freem(chk->data);
2631 			chk->data = NULL;
2632 			if (chk->whoTo)
2633 				sctp_free_remote_addr(chk->whoTo);
2634 			chk->whoTo = NULL;
2635 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2636 			sctppcbinfo.ipi_count_chunk--;
2637 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2638 				panic("Chunk count is negative");
2639 			}
2640 			sctppcbinfo.ipi_gencnt_chunk++;
2641 			chk = TAILQ_FIRST(&asoc->sent_queue);
2642 		}
2643 	}
2644 }
2645 
2646 void
sctp_abort_notification(struct sctp_tcb * stcb,int error)2647 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2648 {
2649 
2650 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2651 		return;
2652 	}
2653 	/* Tell them we lost the asoc */
2654 	sctp_report_all_outbound(stcb);
2655 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2656 }
2657 
2658 void
sctp_abort_association(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct mbuf * m,int iphlen,struct sctphdr * sh,struct mbuf * op_err)2659 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2660     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2661 {
2662 	u_int32_t vtag;
2663 
2664 	vtag = 0;
2665 	if (stcb != NULL) {
2666 		/* We have a TCB to abort, send notification too */
2667 		vtag = stcb->asoc.peer_vtag;
2668 		sctp_abort_notification(stcb, 0);
2669 	}
2670 	sctp_send_abort(m, iphlen, sh, vtag, op_err);
2671 	if (stcb != NULL) {
2672 		/* Ok, now lets free it */
2673 		sctp_free_assoc(inp, stcb);
2674 	} else {
2675 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2676 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2677 				sctp_inpcb_free(inp, 1);
2678 			}
2679 		}
2680 	}
2681 }
2682 
2683 void
sctp_abort_an_association(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int error,struct mbuf * op_err)2684 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2685     int error, struct mbuf *op_err)
2686 {
2687 
2688 	if (stcb == NULL) {
2689 		/* Got to have a TCB */
2690 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2691 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2692 				sctp_inpcb_free(inp, 1);
2693 			}
2694 		}
2695 		return;
2696 	}
2697 	/* notify the ulp */
2698 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
2699 		sctp_abort_notification(stcb, error);
2700 	/* notify the peer */
2701 	sctp_send_abort_tcb(stcb, op_err);
2702 	/* now free the asoc */
2703 	sctp_free_assoc(inp, stcb);
2704 }
2705 
2706 void
sctp_handle_ootb(struct mbuf * m,int iphlen,int offset,struct sctphdr * sh,struct sctp_inpcb * inp,struct mbuf * op_err)2707 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
2708     struct sctp_inpcb *inp, struct mbuf *op_err)
2709 {
2710 	struct sctp_chunkhdr *ch, chunk_buf;
2711 	unsigned int chk_length;
2712 
2713 	/* Generate a TO address for future reference */
2714 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
2715 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2716 			sctp_inpcb_free(inp, 1);
2717 		}
2718 	}
2719 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2720 	    sizeof(*ch), (u_int8_t *)&chunk_buf);
2721 	while (ch != NULL) {
2722 		chk_length = ntohs(ch->chunk_length);
2723 		if (chk_length < sizeof(*ch)) {
2724 			/* break to abort land */
2725 			break;
2726 		}
2727 		switch (ch->chunk_type) {
2728 		case SCTP_PACKET_DROPPED:
2729 			/* we don't respond to pkt-dropped */
2730 			return;
2731 		case SCTP_ABORT_ASSOCIATION:
2732 			/* we don't respond with an ABORT to an ABORT */
2733 			return;
2734 		case SCTP_SHUTDOWN_COMPLETE:
2735 			/*
2736 			 * we ignore it since we are not waiting for it
2737 			 * and peer is gone
2738 			 */
2739 			return;
2740 		case SCTP_SHUTDOWN_ACK:
2741 			sctp_send_shutdown_complete2(m, iphlen, sh);
2742 			return;
2743 		default:
2744 			break;
2745 		}
2746 		offset += SCTP_SIZE32(chk_length);
2747 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2748 		    sizeof(*ch), (u_int8_t *)&chunk_buf);
2749 	}
2750 	sctp_send_abort(m, iphlen, sh, 0, op_err);
2751 }
2752 
2753 /*
2754  * check the inbound datagram to make sure there is not an abort
2755  * inside it, if there is return 1, else return 0.
2756  */
2757 int
sctp_is_there_an_abort_here(struct mbuf * m,int iphlen,int * vtagfill)2758 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
2759 {
2760 	struct sctp_chunkhdr *ch;
2761 	struct sctp_init_chunk *init_chk, chunk_buf;
2762 	int offset;
2763 	unsigned int chk_length;
2764 
2765 	offset = iphlen + sizeof(struct sctphdr);
2766 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
2767 	    (u_int8_t *)&chunk_buf);
2768 	while (ch != NULL) {
2769 		chk_length = ntohs(ch->chunk_length);
2770 		if (chk_length < sizeof(*ch)) {
2771 			/* packet is probably corrupt */
2772 			break;
2773 		}
2774 		/* we seem to be ok, is it an abort? */
2775 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
2776 			/* yep, tell them */
2777 			return (1);
2778 		}
2779 		if (ch->chunk_type == SCTP_INITIATION) {
2780 			/* need to update the Vtag */
2781 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
2782 			    offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
2783 			if (init_chk != NULL) {
2784 				*vtagfill = ntohl(init_chk->init.initiate_tag);
2785 			}
2786 		}
2787 		/* Nope, move to the next chunk */
2788 		offset += SCTP_SIZE32(chk_length);
2789 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2790 		    sizeof(*ch), (u_int8_t *)&chunk_buf);
2791 	}
2792 	return (0);
2793 }
2794 
2795 /*
2796  * currently (2/02), ifa_addr embeds scope_id's and don't
2797  * have sin6_scope_id set (i.e. it's 0)
2798  * so, create this function to compare link local scopes
2799  */
2800 uint32_t
sctp_is_same_scope(const struct sockaddr_in6 * addr1,const struct sockaddr_in6 * addr2)2801 sctp_is_same_scope(const struct sockaddr_in6 *addr1, const struct sockaddr_in6 *addr2)
2802 {
2803 	struct sockaddr_in6 a, b;
2804 
2805 	/* save copies */
2806 	a = *addr1;
2807 	b = *addr2;
2808 
2809 	if (a.sin6_scope_id == 0)
2810 		if (sa6_recoverscope(&a)) {
2811 			/* can't get scope, so can't match */
2812 			return (0);
2813 		}
2814 	if (b.sin6_scope_id == 0)
2815 		if (sa6_recoverscope(&b)) {
2816 			/* can't get scope, so can't match */
2817 			return (0);
2818 		}
2819 	if (a.sin6_scope_id != b.sin6_scope_id)
2820 		return (0);
2821 
2822 	return (1);
2823 }
2824 
2825 /*
2826  * returns a sockaddr_in6 with embedded scope recovered and removed
2827  */
2828 const struct sockaddr_in6 *
sctp_recover_scope(const struct sockaddr_in6 * addr,struct sockaddr_in6 * store)2829 sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
2830 {
2831 	const struct sockaddr_in6 *newaddr;
2832 
2833 	newaddr = addr;
2834 	/* check and strip embedded scope junk */
2835 	if (addr->sin6_family == AF_INET6) {
2836 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
2837 			if (addr->sin6_scope_id == 0) {
2838 				*store = *addr;
2839 				if (sa6_recoverscope(store) == 0) {
2840 					/* use the recovered scope */
2841 					newaddr = store;
2842 				}
2843 				/* else, return the original "to" addr */
2844 			}
2845 		}
2846 	}
2847 	return (newaddr);
2848 }
2849 
2850 /*
2851  * are the two addresses the same?  currently a "scopeless" check
2852  * returns: 1 if same, 0 if not
2853  */
2854 int
sctp_cmpaddr(const struct sockaddr * sa1,const struct sockaddr * sa2)2855 sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
2856 {
2857 
2858 	/* must be valid */
2859 	if (sa1 == NULL || sa2 == NULL)
2860 		return (0);
2861 
2862 	/* must be the same family */
2863 	if (sa1->sa_family != sa2->sa_family)
2864 		return (0);
2865 
2866 	if (sa1->sa_family == AF_INET6) {
2867 		/* IPv6 addresses */
2868 		const struct sockaddr_in6 *sin6_1, *sin6_2;
2869 
2870 		sin6_1 = (const struct sockaddr_in6 *)sa1;
2871 		sin6_2 = (const struct sockaddr_in6 *)sa2;
2872 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
2873 		    &sin6_2->sin6_addr));
2874 	} else if (sa1->sa_family == AF_INET) {
2875 		/* IPv4 addresses */
2876 		const struct sockaddr_in *sin_1, *sin_2;
2877 
2878 		sin_1 = (const struct sockaddr_in *)sa1;
2879 		sin_2 = (const struct sockaddr_in *)sa2;
2880 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
2881 	} else {
2882 		/* we don't do these... */
2883 		return (0);
2884 	}
2885 }
2886 
2887 void
sctp_print_address(const struct sockaddr * sa)2888 sctp_print_address(const struct sockaddr *sa)
2889 {
2890 	char ip6buf[INET6_ADDRSTRLEN];
2891 
2892 	if (sa->sa_family == AF_INET6) {
2893 		const struct sockaddr_in6 *sin6;
2894 		sin6 = (const struct sockaddr_in6 *)sa;
2895 		printf("IPv6 address: %s:%d scope:%u\n",
2896 		    IN6_PRINT(ip6buf, &sin6->sin6_addr), ntohs(sin6->sin6_port),
2897 		    sin6->sin6_scope_id);
2898 	} else if (sa->sa_family == AF_INET) {
2899 		const struct sockaddr_in *sin;
2900 		sin = (const struct sockaddr_in *)sa;
2901 		printf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
2902 		    ntohs(sin->sin_port));
2903 	} else {
2904 		printf("?\n");
2905 	}
2906 }
2907 
2908 void
sctp_print_address_pkt(struct ip * iph,struct sctphdr * sh)2909 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
2910 {
2911 	if (iph->ip_v == IPVERSION) {
2912 		struct sockaddr_in lsa, fsa;
2913 
2914 		memset(&lsa, 0, sizeof(lsa));
2915 		lsa.sin_len = sizeof(lsa);
2916 		lsa.sin_family = AF_INET;
2917 		lsa.sin_addr = iph->ip_src;
2918 		lsa.sin_port = sh->src_port;
2919 		memset(&fsa, 0, sizeof(fsa));
2920 		fsa.sin_len = sizeof(fsa);
2921 		fsa.sin_family = AF_INET;
2922 		fsa.sin_addr = iph->ip_dst;
2923 		fsa.sin_port = sh->dest_port;
2924 		printf("src: ");
2925 		sctp_print_address((struct sockaddr *)&lsa);
2926 		printf("dest: ");
2927 		sctp_print_address((struct sockaddr *)&fsa);
2928 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
2929 		struct ip6_hdr *ip6;
2930 		struct sockaddr_in6 lsa6, fsa6;
2931 
2932 		ip6 = (struct ip6_hdr *)iph;
2933 		memset(&lsa6, 0, sizeof(lsa6));
2934 		lsa6.sin6_len = sizeof(lsa6);
2935 		lsa6.sin6_family = AF_INET6;
2936 		lsa6.sin6_addr = ip6->ip6_src;
2937 		lsa6.sin6_port = sh->src_port;
2938 		memset(&fsa6, 0, sizeof(fsa6));
2939 		fsa6.sin6_len = sizeof(fsa6);
2940 		fsa6.sin6_family = AF_INET6;
2941 		fsa6.sin6_addr = ip6->ip6_dst;
2942 		fsa6.sin6_port = sh->dest_port;
2943 		printf("src: ");
2944 		sctp_print_address((struct sockaddr *)&lsa6);
2945 		printf("dest: ");
2946 		sctp_print_address((struct sockaddr *)&fsa6);
2947 	}
2948 }
2949 
2950 #if defined(__FreeBSD__) || defined(__APPLE__)
2951 
2952 /* cloned from uipc_socket.c */
2953 
2954 #define SCTP_SBLINKRECORD(sb, m0) do {					\
2955 	if ((sb)->sb_lastrecord != NULL)				\
2956 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
2957 	else								\
2958 		(sb)->sb_mb = (m0);					\
2959 	(sb)->sb_lastrecord = (m0);					\
2960 } while (/*CONSTCOND*/0)
2961 #endif
2962 
2963 
2964 int
sbappendaddr_nocheck(struct sockbuf * sb,const struct sockaddr * asa,struct mbuf * m0,struct mbuf * control,u_int32_t tag,struct sctp_inpcb * inp)2965 sbappendaddr_nocheck(struct sockbuf *sb, const struct sockaddr *asa,
2966 	struct mbuf *m0, struct mbuf *control,
2967 	u_int32_t tag, struct sctp_inpcb *inp)
2968 {
2969 #ifdef __NetBSD__
2970 	struct mbuf *m, *n;
2971 
2972 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
2973 		panic("sbappendaddr_nocheck");
2974 
2975 	m0->m_pkthdr.csum_data = (int)tag;
2976 
2977 	for (n = control; n; n = n->m_next) {
2978 		if (n->m_next == 0)	/* keep pointer to last control buf */
2979 			break;
2980 	}
2981 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
2982 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
2983 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
2984 		if (m == 0)
2985 			return (0);
2986 
2987 		m->m_len = asa->sa_len;
2988 		memcpy(mtod(m, void *), (const void *)asa, asa->sa_len);
2989 	} else {
2990 		m = NULL;
2991 	}
2992 	if (n) {
2993 		n->m_next = m0;		/* concatenate data to control */
2994 	}else {
2995 		control = m0;
2996 	}
2997 	if (m)
2998 		m->m_next = control;
2999 	else
3000 		m = control;
3001 	m->m_pkthdr.csum_data = tag;
3002 
3003 	for (n = m; n; n = n->m_next)
3004 		sballoc(sb, n);
3005 	if ((n = sb->sb_mb) != NULL) {
3006 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3007 			inp->sb_last_mpkt = NULL;
3008 		}
3009 		if (inp->sb_last_mpkt)
3010 			inp->sb_last_mpkt->m_nextpkt = m;
3011  		else {
3012 			while (n->m_nextpkt) {
3013 				n = n->m_nextpkt;
3014 			}
3015 			n->m_nextpkt = m;
3016 		}
3017 		inp->sb_last_mpkt = m;
3018 	} else {
3019 		inp->sb_last_mpkt = sb->sb_mb = m;
3020 		inp->sctp_vtag_first = tag;
3021 	}
3022 	return (1);
3023 #endif
3024 #if defined(__FreeBSD__) || defined(__APPLE__)
3025 	struct mbuf *m, *n, *nlast;
3026 	int cnt=0;
3027 
3028 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3029 		panic("sbappendaddr_nocheck");
3030 
3031 	for (n = control; n; n = n->m_next) {
3032 		if (n->m_next == 0)	/* get pointer to last control buf */
3033 			break;
3034 	}
3035 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3036 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3037 		if (asa->sa_len > MHLEN)
3038 			return (0);
3039  try_again:
3040 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3041 		if (m == 0)
3042 			return (0);
3043 		m->m_len = 0;
3044 		/* safety */
3045 		if (m == m0) {
3046 			printf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3047 			       m0, m);
3048 			if (cnt) {
3049 				panic("more than once");
3050 			}
3051 			cnt++;
3052 			goto try_again;
3053 		}
3054 		m->m_len = asa->sa_len;
3055 		bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3056 	}
3057 	else {
3058 		m = NULL;
3059 	}
3060 	if (n)
3061 		n->m_next = m0;		/* concatenate data to control */
3062 	else
3063 		control = m0;
3064 	if (m)
3065 		m->m_next = control;
3066 	else
3067 		m = control;
3068 	m->m_pkthdr.csum_data = (int)tag;
3069 
3070 	for (n = m; n; n = n->m_next)
3071 		sballoc(sb, n);
3072 	nlast = n;
3073 	if (sb->sb_mb == NULL) {
3074 		inp->sctp_vtag_first = tag;
3075 	}
3076 
3077 #ifdef __FREEBSD__
3078 	if (sb->sb_mb == NULL)
3079 		inp->sctp_vtag_first = tag;
3080 	SCTP_SBLINKRECORD(sb, m);
3081 	sb->sb_mbtail = nlast;
3082 #else
3083 	if ((n = sb->sb_mb) != NULL) {
3084 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3085 			inp->sb_last_mpkt = NULL;
3086 		}
3087 		if (inp->sb_last_mpkt)
3088 			inp->sb_last_mpkt->m_nextpkt = m;
3089  		else {
3090 			while (n->m_nextpkt) {
3091 				n = n->m_nextpkt;
3092 			}
3093 			n->m_nextpkt = m;
3094 		}
3095 		inp->sb_last_mpkt = m;
3096 	} else {
3097 		inp->sb_last_mpkt = sb->sb_mb = m;
3098 		inp->sctp_vtag_first = tag;
3099 	}
3100 #endif
3101 	return (1);
3102 #endif
3103 #ifdef __OpenBSD__
3104 	struct mbuf *m, *n;
3105 
3106 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3107 		panic("sbappendaddr_nocheck");
3108 	m0->m_pkthdr.csum = (int)tag;
3109 	for (n = control; n; n = n->m_next) {
3110 		if (n->m_next == 0)	/* keep pointer to last control buf */
3111 			break;
3112 	}
3113 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3114 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3115 		if (asa->sa_len > MHLEN)
3116 			return (0);
3117 		MGETHDR(m, M_DONTWAIT, MT_SONAME);
3118 		if (m == 0)
3119 			return (0);
3120 		m->m_len = asa->sa_len;
3121 		bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3122 	} else {
3123 		m = NULL;
3124 	}
3125 	if (n)
3126 		n->m_next = m0;		/* concatenate data to control */
3127 	else
3128 		control = m0;
3129 
3130 	m->m_pkthdr.csum = (int)tag;
3131 	m->m_next = control;
3132 	for (n = m; n; n = n->m_next)
3133 		sballoc(sb, n);
3134 	if ((n = sb->sb_mb) != NULL) {
3135 		if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3136 			inp->sb_last_mpkt = NULL;
3137 		}
3138 		if (inp->sb_last_mpkt)
3139 			inp->sb_last_mpkt->m_nextpkt = m;
3140  		else {
3141 			while (n->m_nextpkt) {
3142 				n = n->m_nextpkt;
3143 			}
3144 			n->m_nextpkt = m;
3145 		}
3146 		inp->sb_last_mpkt = m;
3147 	} else {
3148 		inp->sb_last_mpkt = sb->sb_mb = m;
3149 		inp->sctp_vtag_first = tag;
3150 	}
3151 	return (1);
3152 #endif
3153 }
3154 
3155 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3156  *************ALTERNATE ROUTING CODE
3157  */
3158 
3159 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3160  *************ALTERNATE ROUTING CODE
3161  */
3162 
3163 struct mbuf *
sctp_generate_invmanparam(int err)3164 sctp_generate_invmanparam(int err)
3165 {
3166 	/* Return a MBUF with a invalid mandatory parameter */
3167 	struct mbuf *m;
3168 
3169 	MGET(m, M_DONTWAIT, MT_DATA);
3170 	if (m) {
3171 		struct sctp_paramhdr *ph;
3172 		m->m_len = sizeof(struct sctp_paramhdr);
3173 		ph = mtod(m, struct sctp_paramhdr *);
3174 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
3175 		ph->param_type = htons(err);
3176 	}
3177 	return (m);
3178 }
3179 
3180 static int
sctp_should_be_moved(struct mbuf * this,struct sctp_association * asoc)3181 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3182 {
3183 	struct mbuf *m;
3184 	/*
3185 	 * given a mbuf chain, look through it finding
3186 	 * the M_PKTHDR and return 1 if it belongs to
3187 	 * the association given. We tell this by
3188 	 * a kludge where we stuff the my_vtag of the asoc
3189 	 * into the m->m_pkthdr.csum_data/csum field.
3190 	 */
3191 	m = this;
3192 	while (m) {
3193 		if (m->m_flags & M_PKTHDR) {
3194 			/* check it */
3195 #if defined(__OpenBSD__)
3196 			if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3197 #else
3198 			if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3199 #endif
3200 			{
3201 				/* Yep */
3202 				return (1);
3203 			}
3204 		}
3205 		m = m->m_next;
3206 	}
3207 	return (0);
3208 }
3209 
3210 u_int32_t
sctp_get_first_vtag_from_sb(struct socket * so)3211 sctp_get_first_vtag_from_sb(struct socket *so)
3212 {
3213 	struct mbuf *this, *at;
3214 	u_int32_t retval;
3215 
3216 	retval = 0;
3217 	if (so->so_rcv.sb_mb) {
3218 		/* grubbing time */
3219 		this = so->so_rcv.sb_mb;
3220 		while (this) {
3221 			at = this;
3222 			/* get to the m_pkthdr */
3223 			while (at) {
3224 				if (at->m_flags & M_PKTHDR)
3225 					break;
3226 				else {
3227 					at = at->m_next;
3228 				}
3229 			}
3230 			/* now do we have a m_pkthdr */
3231 			if (at && (at->m_flags & M_PKTHDR)) {
3232 				/* check it */
3233 #if defined(__OpenBSD__)
3234 				if ((u_int32_t)at->m_pkthdr.csum != 0)
3235 #else
3236 				if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3237 #endif
3238 				{
3239 					/* its the one */
3240 #if defined(__OpenBSD__)
3241 					retval = (u_int32_t)at->m_pkthdr.csum;
3242 #else
3243 					retval =
3244 					    (u_int32_t)at->m_pkthdr.csum_data;
3245 #endif
3246 					break;
3247 				}
3248 			}
3249 			this = this->m_nextpkt;
3250 		}
3251 
3252 	}
3253 	return (retval);
3254 
3255 }
3256 void
sctp_grub_through_socket_buffer(struct sctp_inpcb * inp,struct socket * old,struct socket * new,struct sctp_tcb * stcb)3257 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3258     struct socket *new, struct sctp_tcb *stcb)
3259 {
3260 	struct mbuf **put, **take, *next, *this;
3261 	struct sockbuf *old_sb, *new_sb;
3262 	struct sctp_association *asoc;
3263 	int moved_top = 0;
3264 
3265 	asoc = &stcb->asoc;
3266 	old_sb = &old->so_rcv;
3267 	new_sb = &new->so_rcv;
3268 	if (old_sb->sb_mb == NULL) {
3269 		/* Nothing to move */
3270 		return;
3271 	}
3272 
3273 	if (inp->sctp_vtag_first == asoc->my_vtag) {
3274 		/* First one must be moved */
3275 		struct mbuf *mm;
3276 		for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
3277 			/*
3278 			 * Go down the chain and fix
3279 			 * the space allocation of the
3280 			 * two sockets.
3281 			 */
3282 			sbfree(old_sb, mm);
3283 			sballoc(new_sb, mm);
3284 		}
3285 		new_sb->sb_mb = old_sb->sb_mb;
3286 		old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
3287 		new_sb->sb_mb->m_nextpkt = NULL;
3288 		put = &new_sb->sb_mb->m_nextpkt;
3289 		moved_top = 1;
3290 	} else {
3291 		put = &new_sb->sb_mb;
3292 	}
3293 
3294 	take = &old_sb->sb_mb;
3295 	next = old_sb->sb_mb;
3296 	while (next) {
3297 		this = next;
3298 		/* position for next one */
3299 		next = this->m_nextpkt;
3300 		/* check the tag of this packet */
3301 		if (sctp_should_be_moved(this, asoc)) {
3302 			/* yes this needs to be moved */
3303 			struct mbuf *mm;
3304 			*take = this->m_nextpkt;
3305 			this->m_nextpkt = NULL;
3306 			*put = this;
3307 			for (mm = this; mm; mm = mm->m_next) {
3308 				/*
3309 				 * Go down the chain and fix
3310 				 * the space allocation of the
3311 				 * two sockets.
3312 				 */
3313 				sbfree(old_sb, mm);
3314 				sballoc(new_sb, mm);
3315 			}
3316 			put = &this->m_nextpkt;
3317 
3318 		} else {
3319 			/* no advance our take point. */
3320 			take = &this->m_nextpkt;
3321 		}
3322 	}
3323 	if (moved_top) {
3324 		/*
3325 		 * Ok so now we must re-position vtag_first to
3326 		 * match the new first one since we moved the
3327 		 * mbuf at the top.
3328 		 */
3329 		inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3330 	}
3331 }
3332 
3333 void
sctp_free_bufspace(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * tp1)3334 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3335     struct sctp_tmit_chunk *tp1)
3336 {
3337 	if (tp1->data == NULL) {
3338 		return;
3339 	}
3340 #ifdef SCTP_MBCNT_LOGGING
3341 	sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3342 		       asoc->total_output_queue_size,
3343 		       tp1->book_size,
3344 		       asoc->total_output_mbuf_queue_size,
3345 		       tp1->mbcnt);
3346 #endif
3347 	if (asoc->total_output_queue_size >= tp1->book_size) {
3348 		asoc->total_output_queue_size -= tp1->book_size;
3349 	} else {
3350 		asoc->total_output_queue_size = 0;
3351 	}
3352 
3353 	/* Now free the mbuf */
3354 	if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3355 		asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3356 	} else {
3357 		asoc->total_output_mbuf_queue_size = 0;
3358 	}
3359 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3360 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3361 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3362 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3363 		} else {
3364 			stcb->sctp_socket->so_snd.sb_cc = 0;
3365 
3366 		}
3367 		if (stcb->sctp_socket->so_snd.sb_mbcnt >= tp1->mbcnt) {
3368 			stcb->sctp_socket->so_snd.sb_mbcnt -= tp1->mbcnt;
3369 		} else {
3370 			stcb->sctp_socket->so_snd.sb_mbcnt = 0;
3371 		}
3372 	}
3373 }
3374 
3375 int
sctp_release_pr_sctp_chunk(struct sctp_tcb * stcb,struct sctp_tmit_chunk * tp1,int reason,struct sctpchunk_listhead * queue)3376 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3377     int reason, struct sctpchunk_listhead *queue)
3378 {
3379 	int ret_sz = 0;
3380 	int notdone;
3381 	uint8_t foundeom = 0;
3382 
3383 	do {
3384 		ret_sz += tp1->book_size;
3385 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
3386 		if (tp1->data) {
3387 			sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3388 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3389 			sctp_m_freem(tp1->data);
3390 			tp1->data = NULL;
3391 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3392 		}
3393 		if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3394 			stcb->asoc.sent_queue_cnt_removeable--;
3395 		}
3396 		if (queue == &stcb->asoc.send_queue) {
3397 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3398 			/* on to the sent queue */
3399 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3400 			    sctp_next);
3401 			stcb->asoc.sent_queue_cnt++;
3402 		}
3403 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3404 		    SCTP_DATA_NOT_FRAG) {
3405 			/* not frag'ed we ae done   */
3406 			notdone = 0;
3407 			foundeom = 1;
3408 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3409 			/* end of frag, we are done */
3410 			notdone = 0;
3411 			foundeom = 1;
3412 		} else {
3413 			/* Its a begin or middle piece, we must mark all of it */
3414 			notdone = 1;
3415 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3416 		}
3417 	} while (tp1 && notdone);
3418 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3419 		/*
3420 		 * The multi-part message was scattered
3421 		 * across the send and sent queue.
3422 		 */
3423 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3424 		/*
3425 		 * recurse throught the send_queue too, starting at the
3426 		 * beginning.
3427 		 */
3428 		if (tp1) {
3429 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3430 			    &stcb->asoc.send_queue);
3431 		} else {
3432 			printf("hmm, nothing on the send queue and no EOM?\n");
3433 		}
3434 	}
3435 	return (ret_sz);
3436 }
3437 
3438 /*
3439  * checks to see if the given address, sa, is one that is currently
3440  * known by the kernel
3441  * note: can't distinguish the same address on multiple interfaces and
3442  *       doesn't handle multiple addresses with different zone/scope id's
3443  * note: ifa_ifwithaddr() compares the entire sockaddr struct
3444  */
3445 struct ifaddr *
sctp_find_ifa_by_addr(struct sockaddr * sa)3446 sctp_find_ifa_by_addr(struct sockaddr *sa)
3447 {
3448 	struct ifnet *ifn;
3449 	struct ifaddr *ifa;
3450 	int s;
3451 
3452 	/* go through all our known interfaces */
3453 	s = pserialize_read_enter();
3454 	IFNET_READER_FOREACH(ifn) {
3455 		/* go through each interface addresses */
3456 		IFADDR_READER_FOREACH(ifa, ifn) {
3457 			/* correct family? */
3458 			if (ifa->ifa_addr->sa_family != sa->sa_family)
3459 				continue;
3460 
3461 #ifdef INET6
3462 			if (ifa->ifa_addr->sa_family == AF_INET6) {
3463 				/* IPv6 address */
3464 				struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3465 				sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3466 				if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3467 					/* create a copy and clear scope */
3468 					memcpy(&sin6_tmp, sin1,
3469 					    sizeof(struct sockaddr_in6));
3470 					sin1 = &sin6_tmp;
3471 					in6_clearscope(&sin1->sin6_addr);
3472 				}
3473 				sin2 = (struct sockaddr_in6 *)sa;
3474 				if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3475 					   sizeof(struct in6_addr)) == 0) {
3476 					/* found it */
3477 					pserialize_read_exit(s);
3478 					return (ifa);
3479 				}
3480 			} else
3481 #endif
3482 			if (ifa->ifa_addr->sa_family == AF_INET) {
3483 				/* IPv4 address */
3484 				struct sockaddr_in *sin1, *sin2;
3485 				sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3486 				sin2 = (struct sockaddr_in *)sa;
3487 				if (sin1->sin_addr.s_addr ==
3488 				    sin2->sin_addr.s_addr) {
3489 					/* found it */
3490 					pserialize_read_exit(s);
3491 					return (ifa);
3492 				}
3493 			}
3494 			/* else, not AF_INET or AF_INET6, so skip */
3495 		} /* end foreach ifa */
3496 	} /* end foreach ifn */
3497 	pserialize_read_exit(s);
3498 
3499 	/* not found! */
3500 	return (NULL);
3501 }
3502 
3503 
3504 #ifdef __APPLE__
3505 /*
3506  * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3507  * in the chain is a M_PKTHDR and the length is zero
3508  */
3509 static void
sctp_pkthdr_fix(struct mbuf * m)3510 sctp_pkthdr_fix(struct mbuf *m)
3511 {
3512 	struct mbuf *m_nxt;
3513 
3514 	if ((m->m_flags & M_PKTHDR) == 0) {
3515 		/* not a PKTHDR */
3516 		return;
3517 	}
3518 
3519 	if (m->m_len != 0) {
3520 		/* not a zero length PKTHDR mbuf */
3521 		return;
3522 	}
3523 
3524 	/* let's move in a word into the first mbuf... yes, ugly! */
3525 	m_nxt = m->m_next;
3526 	if (m_nxt == NULL) {
3527 		/* umm... not a very useful mbuf chain... */
3528 		return;
3529 	}
3530 	if ((size_t)m_nxt->m_len > sizeof(long)) {
3531 		/* move over a long */
3532 		bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
3533 		/* update mbuf data pointers and lengths */
3534 		m->m_len += sizeof(long);
3535 		m_nxt->m_data += sizeof(long);
3536 		m_nxt->m_len -= sizeof(long);
3537 	}
3538 }
3539 
3540 inline struct mbuf *
sctp_m_copym(struct mbuf * m,int off,int len,int wait)3541 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3542 {
3543 	sctp_pkthdr_fix(m);
3544 	return (m_copym(m, off, len, wait));
3545 }
3546 #endif /* __APPLE__ */
3547