xref: /netbsd-src/sys/netinet/sctp_timer.c (revision fc4f42693f9b1c31f39f9cf50af1bf2010325808)
1 /*	$KAME: sctp_timer.c,v 1.30 2005/06/16 18:29:25 jinmei Exp $	*/
2 /*	$NetBSD: sctp_timer.c,v 1.4 2017/12/10 11:03:58 rjs Exp $	*/
3 
4 /*
5  * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sctp_timer.c,v 1.4 2017/12/10 11:03:58 rjs Exp $");
34 
35 #ifdef _KERNEL_OPT
36 #include "opt_inet.h"
37 #include "opt_sctp.h"
38 #include "opt_ipsec.h"
39 #endif /* _KERNEL_OPT */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #ifdef INET6
53 #include <sys/domain.h>
54 #endif
55 
56 #include <machine/limits.h>
57 
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #define _IP_VHL
64 #include <netinet/ip.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip_var.h>
68 
69 #ifdef INET6
70 #include <netinet/ip6.h>
71 #include <netinet6/ip6_var.h>
72 #endif /* INET6 */
73 
74 #include <netinet/sctp_pcb.h>
75 
76 #ifdef IPSEC
77 #include <netipsec/ipsec.h>
78 #include <netipsec/key.h>
79 #endif /* IPSEC */
80 #ifdef INET6
81 #include <netinet6/sctp6_var.h>
82 #endif
83 #include <netinet/sctp_var.h>
84 #include <netinet/sctp_timer.h>
85 #include <netinet/sctputil.h>
86 #include <netinet/sctp_output.h>
87 #include <netinet/sctp_hashdriver.h>
88 #include <netinet/sctp_header.h>
89 #include <netinet/sctp_indata.h>
90 #include <netinet/sctp_asconf.h>
91 
92 #include <netinet/sctp.h>
93 #include <netinet/sctp_uio.h>
94 
95 #include <net/net_osdep.h>
96 
97 #ifdef SCTP_DEBUG
98 extern u_int32_t sctp_debug_on;
99 #endif /* SCTP_DEBUG */
100 
101 void
102 sctp_audit_retranmission_queue(struct sctp_association *asoc)
103 {
104 	struct sctp_tmit_chunk *chk;
105 
106 #ifdef SCTP_DEBUG
107 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
108 		printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
109 		    asoc->sent_queue_retran_cnt,
110 		    asoc->sent_queue_cnt);
111 	}
112 #endif /* SCTP_DEBUG */
113 	asoc->sent_queue_retran_cnt = 0;
114 	asoc->sent_queue_cnt = 0;
115 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
116 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
117 			asoc->sent_queue_retran_cnt++;
118 		}
119 		asoc->sent_queue_cnt++;
120 	}
121 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
122 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
123 			asoc->sent_queue_retran_cnt++;
124 		}
125 	}
126 #ifdef SCTP_DEBUG
127 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
128 		printf("Audit completes retran:%d onqueue:%d\n",
129 		    asoc->sent_queue_retran_cnt,
130 		    asoc->sent_queue_cnt);
131 	}
132 #endif /* SCTP_DEBUG */
133 }
134 
135 int
136 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
137     struct sctp_nets *net, uint16_t threshold)
138 {
139 	if (net) {
140 		net->error_count++;
141 #ifdef SCTP_DEBUG
142 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
143 			printf("Error count for %p now %d thresh:%d\n",
144 			    net, net->error_count,
145 			    net->failure_threshold);
146 		}
147 #endif /* SCTP_DEBUG */
148 		if (net->error_count >= net->failure_threshold) {
149 			/* We had a threshold failure */
150 			if (net->dest_state & SCTP_ADDR_REACHABLE) {
151 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
152 				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
153 				if (net == stcb->asoc.primary_destination) {
154 					net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
155 				}
156 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
157 						stcb,
158 						SCTP_FAILED_THRESHOLD,
159 						(void *)net);
160 			}
161 		}
162 		/*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
163 		 *********ROUTING CODE
164 		 */
165 		/*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
166 		 *********ROUTING CODE
167 		 */
168 	}
169 	if (stcb == NULL)
170 		return (0);
171 
172 	if (net) {
173 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
174 			stcb->asoc.overall_error_count++;
175 		}
176 	} else {
177 		stcb->asoc.overall_error_count++;
178 	}
179 #ifdef SCTP_DEBUG
180 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
181 		printf("Overall error count for %p now %d thresh:%u state:%x\n",
182 		       &stcb->asoc,
183 		       stcb->asoc.overall_error_count,
184 		       (u_int)threshold,
185 		       ((net == NULL) ? (u_int)0 : (u_int)net->dest_state));
186 	}
187 #endif /* SCTP_DEBUG */
188 	/* We specifically do not do >= to give the assoc one more
189 	 * change before we fail it.
190 	 */
191 	if (stcb->asoc.overall_error_count > threshold) {
192 		/* Abort notification sends a ULP notify */
193 		struct mbuf *oper;
194 		MGET(oper, M_DONTWAIT, MT_DATA);
195 		if (oper) {
196 			struct sctp_paramhdr *ph;
197 			u_int32_t *ippp;
198 
199 			oper->m_len = sizeof(struct sctp_paramhdr) +
200 			    sizeof(*ippp);
201 			ph = mtod(oper, struct sctp_paramhdr *);
202 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
203 			ph->param_length = htons(oper->m_len);
204 			ippp = (u_int32_t *)(ph + 1);
205 			*ippp = htonl(0x40000001);
206 		}
207 		sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
208 		return (1);
209 	}
210 	return (0);
211 }
212 
213 struct sctp_nets *
214 sctp_find_alternate_net(struct sctp_tcb *stcb,
215 			struct sctp_nets *net)
216 {
217 	/* Find and return an alternate network if possible */
218 	struct sctp_nets *alt, *mnet;
219 	struct rtentry *rt;
220 	int once;
221 
222 	if (stcb->asoc.numnets == 1) {
223 		/* No others but net */
224 		return (TAILQ_FIRST(&stcb->asoc.nets));
225 	}
226 	mnet = net;
227 	once = 0;
228 
229 	if (mnet == NULL) {
230 		mnet = TAILQ_FIRST(&stcb->asoc.nets);
231 	}
232 	do {
233 		alt = TAILQ_NEXT(mnet, sctp_next);
234 		if (alt == NULL) {
235 			once++;
236 			if (once > 1) {
237 				break;
238 			}
239 			alt = TAILQ_FIRST(&stcb->asoc.nets);
240 		}
241 		rt = rtcache_validate(&alt->ro);
242 		if (rt == NULL) {
243 			alt->src_addr_selected = 0;
244 		}
245 		if (
246 			((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
247 			(rt != NULL) &&
248 			(!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
249 			) {
250 			/* Found a reachable address */
251 			rtcache_unref(rt, &alt->ro);
252 			break;
253 		}
254 		rtcache_unref(rt, &alt->ro);
255 		mnet = alt;
256 	} while (alt != NULL);
257 
258 	if (alt == NULL) {
259 		/* Case where NO insv network exists (dormant state) */
260 		/* we rotate destinations */
261 		once = 0;
262 		mnet = net;
263 		do {
264 			alt = TAILQ_NEXT(mnet, sctp_next);
265 			if (alt == NULL) {
266 				once++;
267 				if (once > 1) {
268 					break;
269 				}
270 				alt = TAILQ_FIRST(&stcb->asoc.nets);
271 			}
272 			if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
273 			    (alt != net)) {
274 				/* Found an alternate address */
275 				break;
276 			}
277 			mnet = alt;
278 		} while (alt != NULL);
279 	}
280 	if (alt == NULL) {
281 		return (net);
282 	}
283 	return (alt);
284 }
285 
286 static void
287 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
288 			struct sctp_nets *net,
289 			int win_probe,
290 			int num_marked)
291 {
292 #ifdef SCTP_DEBUG
293 	int oldRTO;
294 
295 	oldRTO = net->RTO;
296 #endif /* SCTP_DEBUG */
297 	net->RTO <<= 1;
298 #ifdef SCTP_DEBUG
299 	if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
300 		printf("Timer doubles from %d ms -to-> %d ms\n",
301 		       oldRTO, net->RTO);
302 	}
303 #endif /* SCTP_DEBUG */
304 
305 	if (net->RTO > stcb->asoc.maxrto) {
306 		net->RTO = stcb->asoc.maxrto;
307 #ifdef SCTP_DEBUG
308 		if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
309 			printf("Growth capped by maxrto %d\n",
310 			       net->RTO);
311 		}
312 #endif /* SCTP_DEBUG */
313 	}
314 
315 
316 	if ((win_probe == 0) && num_marked) {
317 		/* We don't apply penalty to window probe scenarios */
318 #ifdef SCTP_CWND_LOGGING
319 		int old_cwnd=net->cwnd;
320 #endif
321 		net->ssthresh = net->cwnd >> 1;
322 		if (net->ssthresh < (net->mtu << 1)) {
323 			net->ssthresh = (net->mtu << 1);
324 		}
325 		net->cwnd = net->mtu;
326 		/* floor of 1 mtu */
327 		if (net->cwnd < net->mtu)
328 			net->cwnd = net->mtu;
329 #ifdef SCTP_CWND_LOGGING
330 		sctp_log_cwnd(net, net->cwnd-old_cwnd, SCTP_CWND_LOG_FROM_RTX);
331 #endif
332 
333 		net->partial_bytes_acked = 0;
334 #ifdef SCTP_DEBUG
335 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
336 			printf("collapse cwnd to 1MTU ssthresh to %d\n",
337 			       net->ssthresh);
338 		}
339 #endif
340 
341 	}
342 }
343 
344 
345 static int
346 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
347 			 struct sctp_nets *net,
348 			 struct sctp_nets *alt,
349 			 int *num_marked)
350 {
351 
352 	/*
353 	 * Mark all chunks (well not all) that were sent to *net for retransmission.
354 	 * Move them to alt for there destination as well... We only
355 	 * mark chunks that have been outstanding long enough to have
356 	 * received feed-back.
357 	 */
358 	struct sctp_tmit_chunk *chk, *tp2;
359 	struct sctp_nets *lnets;
360 	struct timeval now, min_wait, tv;
361 	int cur_rto;
362 	int win_probes, non_win_probes, orig_rwnd, audit_tf, num_mk, fir;
363 	unsigned int cnt_mk;
364 	u_int32_t orig_flight;
365 #ifdef SCTP_FR_LOGGING
366 	u_int32_t tsnfirst, tsnlast;
367 #endif
368 
369 	/* none in flight now */
370 	audit_tf = 0;
371 	fir=0;
372 	/* figure out how long a data chunk must be pending
373 	 * before we can mark it ..
374 	 */
375 	SCTP_GETTIME_TIMEVAL(&now);
376 	/* get cur rto in micro-seconds */
377 	cur_rto = (((net->lastsa >> 2) + net->lastsv) >> 1);
378 #ifdef SCTP_FR_LOGGING
379 	sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
380 #endif
381 	cur_rto *= 1000;
382 #ifdef SCTP_FR_LOGGING
383 	sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
384 #endif
385 	tv.tv_sec = cur_rto / 1000000;
386 	tv.tv_usec = cur_rto % 1000000;
387 #ifndef __FreeBSD__
388 	timersub(&now, &tv, &min_wait);
389 #else
390 	min_wait = now;
391 	timevalsub(&min_wait, &tv);
392 #endif
393 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
394 		/*
395 		 * if we hit here, we don't
396 		 * have enough seconds on the clock to account
397 		 * for the RTO. We just let the lower seconds
398 		 * be the bounds and don't worry about it. This
399 		 * may mean we will mark a lot more than we should.
400 		 */
401 		min_wait.tv_sec = min_wait.tv_usec = 0;
402 	}
403 #ifdef SCTP_FR_LOGGING
404 	sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
405 	sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
406 #endif
407 	if (stcb->asoc.total_flight >= net->flight_size) {
408 		stcb->asoc.total_flight -= net->flight_size;
409 	} else {
410 		audit_tf = 1;
411 		stcb->asoc.total_flight = 0;
412 	}
413         /* Our rwnd will be incorrect here since we are not adding
414 	 * back the cnt * mbuf but we will fix that down below.
415 	 */
416 	orig_rwnd = stcb->asoc.peers_rwnd;
417 	orig_flight = net->flight_size;
418 	stcb->asoc.peers_rwnd += net->flight_size;
419 	net->flight_size = 0;
420 	net->rto_pending = 0;
421 	net->fast_retran_ip= 0;
422 	win_probes = non_win_probes = 0;
423 #ifdef SCTP_DEBUG
424 	if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
425 		printf("Marking ALL un-acked for retransmission at t3-timeout\n");
426 	}
427 #endif /* SCTP_DEBUG */
428 	/* Now on to each chunk */
429 	num_mk = cnt_mk = 0;
430 #ifdef SCTP_FR_LOGGING
431 	tsnlast = tsnfirst = 0;
432 #endif
433 	chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
434 	for (;chk != NULL; chk = tp2) {
435 		tp2 = TAILQ_NEXT(chk, sctp_next);
436 		if ((compare_with_wrap(stcb->asoc.last_acked_seq,
437 				       chk->rec.data.TSN_seq,
438 				       MAX_TSN)) ||
439 		    (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
440 			/* Strange case our list got out of order? */
441 			printf("Our list is out of order?\n");
442 			TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next);
443 			if (chk->data) {
444 				sctp_release_pr_sctp_chunk(stcb, chk, 0xffff,
445 				    &stcb->asoc.sent_queue);
446 				if (chk->flags & SCTP_PR_SCTP_BUFFER) {
447 					stcb->asoc.sent_queue_cnt_removeable--;
448 				}
449 			}
450 			stcb->asoc.sent_queue_cnt--;
451 			sctp_free_remote_addr(chk->whoTo);
452 			sctppcbinfo.ipi_count_chunk--;
453 			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
454 				panic("Chunk count is going negative");
455 			}
456 			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
457 			sctppcbinfo.ipi_gencnt_chunk++;
458 			continue;
459 		}
460 		if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
461 			/* found one to mark:
462 			 * If it is less than DATAGRAM_ACKED it MUST
463 			 * not be a skipped or marked TSN but instead
464 			 * one that is either already set for retransmission OR
465 			 * one that needs retransmission.
466 			 */
467 
468 			/* validate its been outstanding long enough */
469 #ifdef SCTP_FR_LOGGING
470 			sctp_log_fr(chk->rec.data.TSN_seq,
471 				    chk->sent_rcv_time.tv_sec,
472 				    chk->sent_rcv_time.tv_usec,
473 				    SCTP_FR_T3_MARK_TIME);
474 #endif
475 			if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
476 				/* we have reached a chunk that was sent some
477 				 * seconds past our min.. forget it we will
478 				 * find no more to send.
479 				 */
480 #ifdef SCTP_FR_LOGGING
481 				sctp_log_fr(0,
482 					    chk->sent_rcv_time.tv_sec,
483 					    chk->sent_rcv_time.tv_usec,
484 					    SCTP_FR_T3_STOPPED);
485 #endif
486 				continue;
487 			} else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
488 				/* we must look at the micro seconds to know.
489 				 */
490 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
491 					/* ok it was sent after our boundary time. */
492 #ifdef SCTP_FR_LOGGING
493 					sctp_log_fr(0,
494 						    chk->sent_rcv_time.tv_sec,
495 						    chk->sent_rcv_time.tv_usec,
496 						    SCTP_FR_T3_STOPPED);
497 #endif
498 					continue;
499 				}
500 			}
501 			if (stcb->asoc.total_flight_count > 0) {
502 				stcb->asoc.total_flight_count--;
503 			}
504 			if ((chk->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == SCTP_PR_SCTP_ENABLED) {
505 				/* Is it expired? */
506 				if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
507 				    ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
508 				     (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
509 					/* Yes so drop it */
510 					if (chk->data) {
511 						sctp_release_pr_sctp_chunk(stcb,
512 						    chk,
513 						    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
514 						    &stcb->asoc.sent_queue);
515 					}
516 				}
517 				continue;
518 			}
519 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
520  				stcb->asoc.sent_queue_retran_cnt++;
521  				num_mk++;
522 				if (fir == 0) {
523 					fir = 1;
524 #ifdef SCTP_DEBUG
525 					if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
526 						printf("First TSN marked was %x\n",
527 						       chk->rec.data.TSN_seq);
528 					}
529 #endif
530 #ifdef SCTP_FR_LOGGING
531 					tsnfirst = chk->rec.data.TSN_seq;
532 #endif
533 				}
534 #ifdef SCTP_FR_LOGGING
535 				tsnlast = chk->rec.data.TSN_seq;
536 				sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
537 					    0, SCTP_FR_T3_MARKED);
538 
539 #endif
540 			}
541 			chk->sent = SCTP_DATAGRAM_RESEND;
542 			/* reset the TSN for striking and other FR stuff */
543 			chk->rec.data.doing_fast_retransmit = 0;
544 #ifdef SCTP_DEBUG
545 			if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
546 				printf("mark TSN:%x for retransmission\n", chk->rec.data.TSN_seq);
547 			}
548 #endif /* SCTP_DEBUG */
549 			/* Clear any time so NO RTT is being done */
550 			chk->do_rtt = 0;
551 			/* Bump up the count */
552 			if (compare_with_wrap(chk->rec.data.TSN_seq,
553 					      stcb->asoc.t3timeout_highest_marked,
554 					      MAX_TSN)) {
555 				/* TSN_seq > than t3timeout so update */
556 				stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq;
557 			}
558 			if (alt != net) {
559 				sctp_free_remote_addr(chk->whoTo);
560 				chk->whoTo = alt;
561 				alt->ref_count++;
562 			}
563 			if ((chk->rec.data.state_flags & SCTP_WINDOW_PROBE) !=
564 			    SCTP_WINDOW_PROBE) {
565 				non_win_probes++;
566 			} else {
567 				chk->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
568 				win_probes++;
569 			}
570 		}
571 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
572 			cnt_mk++;
573 		}
574 	}
575 
576 #ifdef SCTP_FR_LOGGING
577 	sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
578 #endif
579 	/* compensate for the number we marked */
580 	stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/);
581 
582 #ifdef SCTP_DEBUG
583 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
584 		if (num_mk) {
585 #ifdef SCTP_FR_LOGGING
586 			printf("LAST TSN marked was %x\n", tsnlast);
587 #endif
588 			printf("Num marked for retransmission was %d peer-rwd:%ld\n",
589 			       num_mk, (u_long)stcb->asoc.peers_rwnd);
590 #ifdef SCTP_FR_LOGGING
591 			printf("LAST TSN marked was %x\n", tsnlast);
592 #endif
593 			printf("Num marked for retransmission was %d peer-rwd:%d\n",
594 			       num_mk,
595 			       (int)stcb->asoc.peers_rwnd
596 				);
597 		}
598 	}
599 #endif
600 	*num_marked = num_mk;
601 	if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
602 		printf("Local Audit says there are %d for retran asoc cnt:%d\n",
603 		       cnt_mk, stcb->asoc.sent_queue_retran_cnt);
604 #ifndef SCTP_AUDITING_ENABLED
605 		stcb->asoc.sent_queue_retran_cnt = cnt_mk;
606 #endif
607 	}
608 #ifdef SCTP_DEBUG
609 	if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
610 		printf("**************************\n");
611 	}
612 #endif /* SCTP_DEBUG */
613 
614 	/* Now check for a ECN Echo that may be stranded */
615 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
616 		if ((chk->whoTo == net) &&
617 		    (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
618 			sctp_free_remote_addr(chk->whoTo);
619 			chk->whoTo = alt;
620 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
621 				chk->sent = SCTP_DATAGRAM_RESEND;
622 				stcb->asoc.sent_queue_retran_cnt++;
623 			}
624 			alt->ref_count++;
625 		}
626 	}
627 	if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) &&
628 	    (orig_flight <= net->mtu)) {
629 		/*
630 		 * If the LAST packet sent was not acked and our rwnd is 0
631 		 * then we are in a win-probe state.
632 		 */
633 		win_probes = 1;
634 		non_win_probes = 0;
635 #ifdef SCTP_DEBUG
636 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
637 			printf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n",
638 			       orig_flight, net->mtu);
639 		}
640 #endif
641 	}
642 
643 	if (audit_tf) {
644 #ifdef SCTP_DEBUG
645 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
646 			printf("Audit total flight due to negative value net:%p\n",
647 			    net);
648 		}
649 #endif /* SCTP_DEBUG */
650 		stcb->asoc.total_flight = 0;
651 		stcb->asoc.total_flight_count = 0;
652 		/* Clear all networks flight size */
653 		TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
654 			lnets->flight_size = 0;
655 #ifdef SCTP_DEBUG
656 			if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
657 				printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
658 				    lnets, lnets->cwnd, lnets->ssthresh);
659 			}
660 #endif /* SCTP_DEBUG */
661 		}
662 		TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
663 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
664 				stcb->asoc.total_flight += chk->book_size;
665 				chk->whoTo->flight_size += chk->book_size;
666 				stcb->asoc.total_flight_count++;
667 			}
668 		}
669 	}
670 	/* Setup the ecn nonce re-sync point. We
671 	 * do this since retranmissions are NOT
672 	 * setup for ECN. This means that do to
673 	 * Karn's rule, we don't know the total
674 	 * of the peers ecn bits.
675 	 */
676 	chk = TAILQ_FIRST(&stcb->asoc.send_queue);
677 	if (chk == NULL) {
678 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
679 	} else {
680 		stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
681 	}
682 	stcb->asoc.nonce_wait_for_ecne = 0;
683 	stcb->asoc.nonce_sum_check = 0;
684 	/* We return 1 if we only have a window probe outstanding */
685 	if (win_probes && (non_win_probes == 0)) {
686 		return (1);
687 	}
688 	return (0);
689 }
690 
691 static void
692 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
693 			    struct sctp_nets *net,
694 			    struct sctp_nets *alt)
695 {
696 	struct sctp_association *asoc;
697 	struct sctp_stream_out *outs;
698 	struct sctp_tmit_chunk *chk;
699 
700 	if (net == alt)
701 		/* nothing to do */
702 		return;
703 
704 	asoc = &stcb->asoc;
705 
706 	/*
707 	 * now through all the streams checking for chunks sent to our
708 	 * bad network.
709 	 */
710 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
711 		/* now clean up any chunks here */
712 		TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
713 			if (chk->whoTo == net) {
714 				sctp_free_remote_addr(chk->whoTo);
715 				chk->whoTo = alt;
716 				alt->ref_count++;
717 			}
718 		}
719 	}
720 	/* Now check the pending queue */
721 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
722 		if (chk->whoTo == net) {
723 			sctp_free_remote_addr(chk->whoTo);
724 			chk->whoTo = alt;
725 			alt->ref_count++;
726 		}
727 	}
728 
729 }
730 
731 int
732 sctp_t3rxt_timer(struct sctp_inpcb *inp,
733 		 struct sctp_tcb *stcb,
734 		 struct sctp_nets *net)
735 {
736 	struct sctp_nets *alt;
737 	int win_probe, num_mk;
738 
739 
740 #ifdef SCTP_FR_LOGGING
741 	sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
742 #endif
743 	/* Find an alternate and mark those for retransmission */
744 	alt = sctp_find_alternate_net(stcb, net);
745 	win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk);
746 
747 	/* FR Loss recovery just ended with the T3. */
748 	stcb->asoc.fast_retran_loss_recovery = 0;
749 
750 	/* setup the sat loss recovery that prevents
751 	 * satellite cwnd advance.
752 	 */
753  	stcb->asoc.sat_t3_loss_recovery = 1;
754 	stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
755 
756 	/* Backoff the timer and cwnd */
757 	sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
758 	if (win_probe == 0) {
759 		/* We don't do normal threshold management on window probes */
760 		if (sctp_threshold_management(inp, stcb, net,
761 					      stcb->asoc.max_send_times)) {
762 			/* Association was destroyed */
763 			return (1);
764 		} else {
765 			if (net != stcb->asoc.primary_destination) {
766 				/* send a immediate HB if our RTO is stale */
767 				struct  timeval now;
768 				unsigned int ms_goneby;
769 				SCTP_GETTIME_TIMEVAL(&now);
770 				if (net->last_sent_time.tv_sec) {
771 					ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
772 				} else {
773 					ms_goneby = 0;
774 				}
775 				if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
776 					/* no recent feed back in an RTO or more, request a RTT update */
777 					sctp_send_hb(stcb, 1, net);
778 				}
779 			}
780 		}
781 	} else {
782 		/*
783 		 * For a window probe we don't penalize the net's but only
784 		 * the association. This may fail it if SACKs are not coming
785 		 * back. If sack's are coming with rwnd locked at 0, we will
786 		 * continue to hold things waiting for rwnd to raise
787 		 */
788 		if (sctp_threshold_management(inp, stcb, NULL,
789 					      stcb->asoc.max_send_times)) {
790 			/* Association was destroyed */
791 			return (1);
792 		}
793 	}
794 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
795 		/* Move all pending over too */
796 		sctp_move_all_chunks_to_alt(stcb, net, alt);
797 		/* Was it our primary? */
798 		if ((stcb->asoc.primary_destination == net) && (alt != net)) {
799 			/*
800 			 * Yes, note it as such and find an alternate
801 			 * note: this means HB code must use this to resent
802 			 * the primary if it goes active AND if someone does
803 			 * a change-primary then this flag must be cleared
804 			 * from any net structures.
805 			 */
806 			if (sctp_set_primary_addr(stcb,
807 						 (struct sockaddr *)NULL,
808 						 alt) == 0) {
809 				net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
810 				net->src_addr_selected = 0;
811 			}
812 		}
813 	}
814 	/*
815 	 * Special case for cookie-echo'ed case, we don't do output
816 	 * but must await the COOKIE-ACK before retransmission
817 	 */
818 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
819 		/*
820 		 * Here we just reset the timer and start again since we
821 		 * have not established the asoc
822 		 */
823 #ifdef SCTP_DEBUG
824 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
825 			printf("Special cookie case return\n");
826 		}
827 #endif /* SCTP_DEBUG */
828 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
829 		return (0);
830 	}
831 	if (stcb->asoc.peer_supports_prsctp) {
832 		struct sctp_tmit_chunk *lchk;
833 		lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
834 		/* C3. See if we need to send a Fwd-TSN */
835 		if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
836 				      stcb->asoc.last_acked_seq, MAX_TSN)) {
837 			/*
838 			 * ISSUE with ECN, see FWD-TSN processing for notes
839 			 * on issues that will occur when the ECN NONCE stuff
840 			 * is put into SCTP for cross checking.
841 			 */
842 #ifdef SCTP_DEBUG
843 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
844 				printf("Forward TSN time\n");
845 			}
846 #endif /* SCTP_DEBUG */
847 			send_forward_tsn(stcb, &stcb->asoc);
848 			if (lchk) {
849 				/* Assure a timer is up */
850 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
851 			}
852 		}
853 	}
854 	return (0);
855 }
856 
857 int
858 sctp_t1init_timer(struct sctp_inpcb *inp,
859 		  struct sctp_tcb *stcb,
860 		  struct sctp_nets *net)
861 {
862 	/* bump the thresholds */
863 	if (stcb->asoc.delayed_connection) {
864 		/* special hook for delayed connection. The
865 		 * library did NOT complete the rest of its
866 		 * sends.
867 		 */
868 		stcb->asoc.delayed_connection = 0;
869 		sctp_send_initiate(inp, stcb);
870 		return (0);
871 	}
872 	if (sctp_threshold_management(inp, stcb, net,
873 				      stcb->asoc.max_init_times)) {
874 		/* Association was destroyed */
875 		return (1);
876 	}
877 	stcb->asoc.dropped_special_cnt = 0;
878 	sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
879 	if (stcb->asoc.initial_init_rto_max < net->RTO) {
880 		net->RTO = stcb->asoc.initial_init_rto_max;
881 	}
882 	if (stcb->asoc.numnets > 1) {
883 		/* If we have more than one addr use it */
884 		struct sctp_nets *alt;
885 		alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination);
886 		if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
887 			sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
888 			stcb->asoc.primary_destination = alt;
889 		}
890 	}
891 	/* Send out a new init */
892 	sctp_send_initiate(inp, stcb);
893 	return (0);
894 }
895 
896 /*
897  * For cookie and asconf we actually need to find and mark for resend,
898  * then increment the resend counter (after all the threshold management
899  * stuff of course).
900  */
901 int  sctp_cookie_timer(struct sctp_inpcb *inp,
902 		       struct sctp_tcb *stcb,
903 		       struct sctp_nets *net)
904 {
905 	struct sctp_nets *alt;
906 	struct sctp_tmit_chunk *cookie;
907 	/* first before all else we must find the cookie */
908 	TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
909 		if (cookie->rec.chunk_id == SCTP_COOKIE_ECHO) {
910 			break;
911 		}
912 	}
913 	if (cookie == NULL) {
914 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
915 			/* FOOBAR! */
916 			struct mbuf *oper;
917 			MGET(oper, M_DONTWAIT, MT_DATA);
918 			if (oper) {
919 				struct sctp_paramhdr *ph;
920 				u_int32_t *ippp;
921 
922 				oper->m_len = sizeof(struct sctp_paramhdr) +
923 				    sizeof(*ippp);
924 				ph = mtod(oper, struct sctp_paramhdr *);
925 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
926 				ph->param_length = htons(oper->m_len);
927 				ippp = (u_int32_t *)(ph + 1);
928 				*ippp = htonl(0x40000002);
929 			}
930 			sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
931 			    oper);
932 		}
933 		return (1);
934 	}
935 	/* Ok we found the cookie, threshold management next */
936 	if (sctp_threshold_management(inp, stcb, cookie->whoTo,
937 	    stcb->asoc.max_init_times)) {
938 		/* Assoc is over */
939 		return (1);
940 	}
941 	/*
942 	 * cleared theshold management now lets backoff the address &
943 	 * select an alternate
944 	 */
945 	stcb->asoc.dropped_special_cnt = 0;
946 	sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
947 	alt = sctp_find_alternate_net(stcb, cookie->whoTo);
948 	if (alt != cookie->whoTo) {
949 		sctp_free_remote_addr(cookie->whoTo);
950 		cookie->whoTo = alt;
951 		alt->ref_count++;
952 	}
953 	/* Now mark the retran info */
954 	if (cookie->sent != SCTP_DATAGRAM_RESEND) {
955 		stcb->asoc.sent_queue_retran_cnt++;
956 	}
957 	cookie->sent = SCTP_DATAGRAM_RESEND;
958 	/*
959 	 * Now call the output routine to kick out the cookie again, Note we
960 	 * don't mark any chunks for retran so that FR will need to kick in
961 	 * to move these (or a send timer).
962 	 */
963 	return (0);
964 }
965 
966 int sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
967     struct sctp_nets *net)
968 {
969 	struct sctp_nets *alt;
970 	struct sctp_tmit_chunk *strrst, *chk;
971 	struct sctp_stream_reset_req *strreq;
972 	/* find the existing STRRESET */
973 	TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue,
974 		      sctp_next) {
975 		if (strrst->rec.chunk_id == SCTP_STREAM_RESET) {
976 			/* is it what we want */
977 			strreq = mtod(strrst->data, struct sctp_stream_reset_req *);
978 			if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_REQUEST)) {
979 				break;
980 			}
981 		}
982 	}
983 	if (strrst == NULL) {
984 #ifdef SCTP_DEBUG
985 		if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
986 			printf("Strange, strreset timer fires, but I can't find an str-reset?\n");
987 		}
988 #endif /* SCTP_DEBUG */
989 		return (0);
990 	}
991 	/* do threshold management */
992 	if (sctp_threshold_management(inp, stcb, strrst->whoTo,
993 				      stcb->asoc.max_send_times)) {
994 		/* Assoc is over */
995 		return (1);
996 	}
997 
998 	/*
999 	 * cleared theshold management
1000 	 * now lets backoff the address & select an alternate
1001 	 */
1002 	sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1003 	alt = sctp_find_alternate_net(stcb, strrst->whoTo);
1004 	sctp_free_remote_addr(strrst->whoTo);
1005 	strrst->whoTo = alt;
1006 	alt->ref_count++;
1007 
1008 	/* See if a ECN Echo is also stranded */
1009 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1010 		if ((chk->whoTo == net) &&
1011 		    (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1012 			sctp_free_remote_addr(chk->whoTo);
1013 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
1014 				chk->sent = SCTP_DATAGRAM_RESEND;
1015 				stcb->asoc.sent_queue_retran_cnt++;
1016 			}
1017 			chk->whoTo = alt;
1018 			alt->ref_count++;
1019 		}
1020 	}
1021 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1022 		/*
1023 		 * If the address went un-reachable, we need to move
1024 		 * to alternates for ALL chk's in queue
1025 		 */
1026 		sctp_move_all_chunks_to_alt(stcb, net, alt);
1027 	}
1028 	/* mark the retran info */
1029 	if (strrst->sent != SCTP_DATAGRAM_RESEND)
1030 		stcb->asoc.sent_queue_retran_cnt++;
1031 	strrst->sent = SCTP_DATAGRAM_RESEND;
1032 
1033 	/* restart the timer */
1034 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1035 	return (0);
1036 }
1037 
1038 int sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1039     struct sctp_nets *net)
1040 {
1041 	struct sctp_nets *alt;
1042 	struct sctp_tmit_chunk *asconf, *chk;
1043 
1044 	/* is this the first send, or a retransmission? */
1045 	if (stcb->asoc.asconf_sent == 0) {
1046 		/* compose a new ASCONF chunk and send it */
1047 		sctp_send_asconf(stcb, net);
1048 	} else {
1049 		/* Retransmission of the existing ASCONF needed... */
1050 
1051 		/* find the existing ASCONF */
1052 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1053 		    sctp_next) {
1054 			if (asconf->rec.chunk_id == SCTP_ASCONF) {
1055 				break;
1056 			}
1057 		}
1058 		if (asconf == NULL) {
1059 #ifdef SCTP_DEBUG
1060 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1061 				printf("Strange, asconf timer fires, but I can't find an asconf?\n");
1062 			}
1063 #endif /* SCTP_DEBUG */
1064 			return (0);
1065 		}
1066 		/* do threshold management */
1067 		if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1068 		    stcb->asoc.max_send_times)) {
1069 			/* Assoc is over */
1070 			return (1);
1071 		}
1072 
1073 		/* PETER? FIX? How will the following code ever run? If
1074 		 * the max_send_times is hit, threshold managment will
1075 		 * blow away the association?
1076 		 */
1077 		if (asconf->snd_count > stcb->asoc.max_send_times) {
1078 			/*
1079 			 * Something is rotten, peer is not responding to
1080 			 * ASCONFs but maybe is to data etc.  e.g. it is not
1081 			 * properly handling the chunk type upper bits
1082 			 * Mark this peer as ASCONF incapable and cleanup
1083 			 */
1084 #ifdef SCTP_DEBUG
1085 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1086 				printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1087 			}
1088 #endif /* SCTP_DEBUG */
1089 			sctp_asconf_cleanup(stcb, net);
1090 			return (0);
1091 		}
1092 		/*
1093 		 * cleared theshold management
1094 		 * now lets backoff the address & select an alternate
1095 		 */
1096 		sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1097 		alt = sctp_find_alternate_net(stcb, asconf->whoTo);
1098 		sctp_free_remote_addr(asconf->whoTo);
1099 		asconf->whoTo = alt;
1100 		alt->ref_count++;
1101 
1102 		/* See if a ECN Echo is also stranded */
1103 		TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1104 			if ((chk->whoTo == net) &&
1105 			    (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1106 				sctp_free_remote_addr(chk->whoTo);
1107 				chk->whoTo = alt;
1108 				if (chk->sent != SCTP_DATAGRAM_RESEND) {
1109 					chk->sent = SCTP_DATAGRAM_RESEND;
1110 					stcb->asoc.sent_queue_retran_cnt++;
1111 				}
1112 				alt->ref_count++;
1113 
1114 			}
1115 		}
1116 		if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1117 			/*
1118 			 * If the address went un-reachable, we need to move
1119 			 * to alternates for ALL chk's in queue
1120 			 */
1121 			sctp_move_all_chunks_to_alt(stcb, net, alt);
1122 		}
1123 		/* mark the retran info */
1124 		if (asconf->sent != SCTP_DATAGRAM_RESEND)
1125 			stcb->asoc.sent_queue_retran_cnt++;
1126 		asconf->sent = SCTP_DATAGRAM_RESEND;
1127 	}
1128 	return (0);
1129 }
1130 
1131 /*
1132  * For the shutdown and shutdown-ack, we do not keep one around on the
1133  * control queue. This means we must generate a new one and call the general
1134  * chunk output routine, AFTER having done threshold
1135  * management.
1136  */
1137 int
1138 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1139     struct sctp_nets *net)
1140 {
1141 	struct sctp_nets *alt;
1142 	/* first threshold managment */
1143 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1144 		/* Assoc is over */
1145 		return (1);
1146 	}
1147 	/* second select an alternative */
1148 	alt = sctp_find_alternate_net(stcb, net);
1149 
1150 	/* third generate a shutdown into the queue for out net */
1151 #ifdef SCTP_DEBUG
1152 	if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1153 		printf("%s:%d sends a shutdown\n",
1154 		       __FILE__,
1155 		       __LINE__
1156 			);
1157 	}
1158 #endif
1159 	if (alt) {
1160 		sctp_send_shutdown(stcb, alt);
1161 	} else {
1162 		/* if alt is NULL, there is no dest
1163 		 * to send to??
1164 		 */
1165 		return (0);
1166 	}
1167 	/* fourth restart timer */
1168 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1169 	return (0);
1170 }
1171 
1172 int sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1173     struct sctp_nets *net)
1174 {
1175 	struct sctp_nets *alt;
1176 	/* first threshold managment */
1177 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1178 		/* Assoc is over */
1179 		return (1);
1180 	}
1181 	/* second select an alternative */
1182 	alt = sctp_find_alternate_net(stcb, net);
1183 
1184 	/* third generate a shutdown into the queue for out net */
1185 	sctp_send_shutdown_ack(stcb, alt);
1186 
1187 	/* fourth restart timer */
1188 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1189 	return (0);
1190 }
1191 
1192 static void
1193 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1194 				  struct sctp_tcb *stcb)
1195 {
1196 	struct sctp_stream_out *outs;
1197 	struct sctp_tmit_chunk *chk;
1198 	unsigned int chks_in_queue=0;
1199 
1200 	if ((stcb == NULL) || (inp == NULL))
1201 		return;
1202 	if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1203 		printf("Strange, out_wheel empty nothing on sent/send and  tot=%lu?\n",
1204 		    (u_long)stcb->asoc.total_output_queue_size);
1205 		stcb->asoc.total_output_queue_size = 0;
1206 		return;
1207 	}
1208 	if (stcb->asoc.sent_queue_retran_cnt) {
1209 		printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1210 		    stcb->asoc.sent_queue_retran_cnt);
1211 		stcb->asoc.sent_queue_retran_cnt = 0;
1212 	}
1213 	/* Check to see if some data queued, if so report it */
1214 	TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1215 		if (!TAILQ_EMPTY(&outs->outqueue)) {
1216 			TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
1217 				chks_in_queue++;
1218 			}
1219 		}
1220 	}
1221 	if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1222 		printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1223 		       stcb->asoc.stream_queue_cnt, chks_in_queue);
1224 	}
1225 	if (chks_in_queue) {
1226 		/* call the output queue function */
1227 		sctp_chunk_output(inp, stcb, 1);
1228 		if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1229 		    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1230 			/* Probably should go in and make it go back through and add fragments allowed */
1231 			printf("Still nothing moved %d chunks are stuck\n", chks_in_queue);
1232 		}
1233 	} else {
1234 		printf("Found no chunks on any queue tot:%lu\n",
1235 		    (u_long)stcb->asoc.total_output_queue_size);
1236 		stcb->asoc.total_output_queue_size = 0;
1237 	}
1238 }
1239 
1240 int
1241 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1242     struct sctp_nets *net)
1243 {
1244 	int cnt_of_unconf=0;
1245 
1246 	if (net) {
1247 		if (net->hb_responded == 0) {
1248 			sctp_backoff_on_timeout(stcb, net, 1, 0);
1249 		}
1250 		/* Zero PBA, if it needs it */
1251 		if (net->partial_bytes_acked) {
1252 			net->partial_bytes_acked = 0;
1253 		}
1254 	}
1255 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1256 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1257 		    (net->dest_state & SCTP_ADDR_REACHABLE)) {
1258 			cnt_of_unconf++;
1259 		}
1260 	}
1261 	if ((stcb->asoc.total_output_queue_size > 0) &&
1262 	    (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1263 	    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1264 		sctp_audit_stream_queues_for_size(inp, stcb);
1265 	}
1266 	/* Send a new HB, this will do threshold managment, pick a new dest */
1267 	if (sctp_send_hb(stcb, 0, NULL) < 0) {
1268 		return (1);
1269 	}
1270 	if (cnt_of_unconf > 1) {
1271 		/*
1272 		 * this will send out extra hb's up to maxburst if
1273 		 * there are any unconfirmed addresses.
1274 		 */
1275 		int cnt_sent = 1;
1276 		while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) {
1277 			if (sctp_send_hb(stcb, 0, NULL) == 0)
1278 				break;
1279 			cnt_of_unconf--;
1280 			cnt_sent++;
1281 		}
1282 	}
1283 	return (0);
1284 }
1285 
1286 #define SCTP_NUMBER_OF_MTU_SIZES 18
1287 static u_int32_t mtu_sizes[]={
1288 	68,
1289 	296,
1290 	508,
1291 	512,
1292 	544,
1293 	576,
1294 	1006,
1295 	1492,
1296 	1500,
1297 	1536,
1298 	2002,
1299 	2048,
1300 	4352,
1301 	4464,
1302 	8166,
1303 	17914,
1304 	32000,
1305 	65535
1306 };
1307 
1308 
1309 static u_int32_t
1310 sctp_getnext_mtu(struct sctp_inpcb *inp, u_int32_t cur_mtu)
1311 {
1312 	/* select another MTU that is just bigger than this one */
1313 	int i;
1314 
1315 	for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1316 		if (cur_mtu < mtu_sizes[i]) {
1317 		    /* no max_mtu is bigger than this one */
1318 		    return (mtu_sizes[i]);
1319 		}
1320 	}
1321 	/* here return the highest allowable */
1322 	return (cur_mtu);
1323 }
1324 
1325 
1326 void sctp_pathmtu_timer(struct sctp_inpcb *inp,
1327 			struct sctp_tcb *stcb,
1328 			struct sctp_nets *net)
1329 {
1330 	u_int32_t next_mtu;
1331 	struct rtentry *rt;
1332 
1333 	/* restart the timer in any case */
1334 	next_mtu = sctp_getnext_mtu(inp, net->mtu);
1335 	if (next_mtu <= net->mtu) {
1336 	    /* nothing to do */
1337 	    return;
1338 	}
1339 	rt = rtcache_validate(&net->ro);
1340 	if (rt != NULL) {
1341 		/* only if we have a route and interface do we
1342 		 * set anything. Note we always restart
1343 		 * the timer though just in case it is updated
1344 		 * (i.e. the ifp) or route/ifp is populated.
1345 		 */
1346 		if (rt->rt_ifp != NULL) {
1347 			if (rt->rt_ifp->if_mtu > next_mtu) {
1348 				/* ok it will fit out the door */
1349 				net->mtu = next_mtu;
1350 			}
1351 		}
1352 		rtcache_unref(rt, &net->ro);
1353 	}
1354 	/* restart the timer */
1355 	sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1356 }
1357 
1358 void sctp_autoclose_timer(struct sctp_inpcb *inp,
1359 			  struct sctp_tcb *stcb,
1360 			  struct sctp_nets *net)
1361 {
1362 	struct timeval tn, *tim_touse;
1363 	struct sctp_association *asoc;
1364 	int ticks_gone_by;
1365 
1366 	SCTP_GETTIME_TIMEVAL(&tn);
1367 	if (stcb->asoc.sctp_autoclose_ticks &&
1368 	    (inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE)) {
1369 		/* Auto close is on */
1370 		asoc = &stcb->asoc;
1371 		/* pick the time to use */
1372 		if (asoc->time_last_rcvd.tv_sec >
1373 		    asoc->time_last_sent.tv_sec) {
1374 			tim_touse = &asoc->time_last_rcvd;
1375 		} else {
1376 			tim_touse = &asoc->time_last_sent;
1377 		}
1378 		/* Now has long enough transpired to autoclose? */
1379 		ticks_gone_by = ((tn.tv_sec - tim_touse->tv_sec) * hz);
1380 		if ((ticks_gone_by > 0) &&
1381 		    (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1382 			/*
1383 			 * autoclose time has hit, call the output routine,
1384 			 * which should do nothing just to be SURE we don't
1385 			 * have hanging data. We can then safely check the
1386 			 * queues and know that we are clear to send shutdown
1387 			 */
1388 			sctp_chunk_output(inp, stcb, 9);
1389 			/* Are we clean? */
1390 			if (TAILQ_EMPTY(&asoc->send_queue) &&
1391 			    TAILQ_EMPTY(&asoc->sent_queue)) {
1392 				/*
1393 				 * there is nothing queued to send,
1394 				 * so I'm done...
1395 				 */
1396 				if (SCTP_GET_STATE(asoc) !=
1397 				    SCTP_STATE_SHUTDOWN_SENT) {
1398 					/* only send SHUTDOWN 1st time thru */
1399 #ifdef SCTP_DEBUG
1400 					if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1401 						printf("%s:%d sends a shutdown\n",
1402 						       __FILE__,
1403 						       __LINE__
1404 							);
1405 					}
1406 #endif
1407 					sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1408 					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1409 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1410 					    stcb->sctp_ep, stcb,
1411 					    asoc->primary_destination);
1412 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1413 					    stcb->sctp_ep, stcb,
1414 					    asoc->primary_destination);
1415 				}
1416 			}
1417 		} else {
1418 			/*
1419 			 * No auto close at this time, reset t-o to
1420 			 * check later
1421 			 */
1422 			int tmp;
1423 			/* fool the timer startup to use the time left */
1424 			tmp = asoc->sctp_autoclose_ticks;
1425 			asoc->sctp_autoclose_ticks -= ticks_gone_by;
1426 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1427 					 net);
1428 			/* restore the real tick value */
1429 			asoc->sctp_autoclose_ticks = tmp;
1430 		}
1431 	}
1432 }
1433 
1434 void
1435 sctp_iterator_timer(struct sctp_iterator *it)
1436 {
1437 	int cnt= 0;
1438 	/* only one iterator can run at a
1439 	 * time. This is the only way we
1440 	 * can cleanly pull ep's from underneath
1441 	 * all the running interators when a
1442 	 * ep is freed.
1443 	 */
1444  	SCTP_ITERATOR_LOCK();
1445 	if (it->inp == NULL) {
1446 		/* iterator is complete */
1447 	done_with_iterator:
1448 		SCTP_ITERATOR_UNLOCK();
1449 		SCTP_INP_INFO_WLOCK();
1450 		LIST_REMOVE(it, sctp_nxt_itr);
1451 		/* stopping the callout is not needed, in theory,
1452 		 * but I am paranoid.
1453 		 */
1454 		SCTP_INP_INFO_WUNLOCK();
1455 		callout_stop(&it->tmr.timer);
1456 		if (it->function_atend != NULL) {
1457 			(*it->function_atend)(it->pointer, it->val);
1458 		}
1459 		callout_destroy(&it->tmr.timer);
1460 		free(it, M_PCB);
1461 		return;
1462 	}
1463  select_a_new_ep:
1464 	SCTP_INP_WLOCK(it->inp);
1465 	while ((it->pcb_flags) && ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) {
1466 		/* we do not like this ep */
1467 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1468 			SCTP_INP_WUNLOCK(it->inp);
1469 			goto done_with_iterator;
1470 		}
1471 		SCTP_INP_WUNLOCK(it->inp);
1472 		it->inp = LIST_NEXT(it->inp, sctp_list);
1473 		if (it->inp == NULL) {
1474 			goto done_with_iterator;
1475 		}
1476 		SCTP_INP_WLOCK(it->inp);
1477 	}
1478 	if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1479 	    (it->inp->inp_starting_point_for_iterator != it)) {
1480 		printf("Iterator collision, we must wait for other iterator at %p\n",
1481 		       it->inp);
1482 		SCTP_INP_WUNLOCK(it->inp);
1483 		goto start_timer_return;
1484 	}
1485 	/* now we do the actual write to this guy */
1486 	it->inp->inp_starting_point_for_iterator = it;
1487 	SCTP_INP_WUNLOCK(it->inp);
1488 	SCTP_INP_RLOCK(it->inp);
1489 	/* if we reach here we found a inp acceptable, now through each
1490 	 * one that has the association in the right state
1491 	 */
1492 	if (it->stcb == NULL) {
1493 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1494 	}
1495 	if (it->stcb->asoc.stcb_starting_point_for_iterator == it) {
1496 		it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1497 	}
1498 	while (it->stcb) {
1499 		SCTP_TCB_LOCK(it->stcb);
1500 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1501 			SCTP_TCB_UNLOCK(it->stcb);
1502 			it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1503 			continue;
1504 		}
1505 		cnt++;
1506 		/* run function on this one */
1507 		SCTP_INP_RUNLOCK(it->inp);
1508 		(*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val);
1509 		sctp_chunk_output(it->inp, it->stcb, 1);
1510 		SCTP_TCB_UNLOCK(it->stcb);
1511 		/* see if we have limited out */
1512 		if (cnt > SCTP_MAX_ITERATOR_AT_ONCE) {
1513 			it->stcb->asoc.stcb_starting_point_for_iterator = it;
1514 		start_timer_return:
1515 			SCTP_ITERATOR_UNLOCK();
1516 			sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL);
1517 			return;
1518 		}
1519 		SCTP_INP_RLOCK(it->inp);
1520 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1521 	}
1522 	/* if we reach here, we ran out of stcb's in the inp we are looking at */
1523 	SCTP_INP_RUNLOCK(it->inp);
1524 	SCTP_INP_WLOCK(it->inp);
1525 	it->inp->inp_starting_point_for_iterator = NULL;
1526 	SCTP_INP_WUNLOCK(it->inp);
1527 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1528 		it->inp = NULL;
1529 	} else {
1530 		SCTP_INP_INFO_RLOCK();
1531 		it->inp = LIST_NEXT(it->inp, sctp_list);
1532 		SCTP_INP_INFO_RUNLOCK();
1533 	}
1534 	if (it->inp == NULL) {
1535 		goto done_with_iterator;
1536 	}
1537 	goto select_a_new_ep;
1538 }
1539