1 /* $KAME: sctp_timer.c,v 1.30 2005/06/16 18:29:25 jinmei Exp $ */ 2 /* $NetBSD: sctp_timer.c,v 1.1 2015/10/13 21:28:35 rjs Exp $ */ 3 4 /* 5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc, 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: sctp_timer.c,v 1.1 2015/10/13 21:28:35 rjs Exp $"); 34 35 #ifdef _KERNEL_OPT 36 #include "opt_inet.h" 37 #include "opt_sctp.h" 38 #endif /* _KERNEL_OPT */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/domain.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/proc.h> 49 #include <sys/kernel.h> 50 #include <sys/sysctl.h> 51 #ifdef INET6 52 #include <sys/domain.h> 53 #endif 54 55 #include <machine/limits.h> 56 57 #include <net/if.h> 58 #include <net/if_types.h> 59 #include <net/route.h> 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #define _IP_VHL 63 #include <netinet/ip.h> 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 68 #ifdef INET6 69 #include <netinet/ip6.h> 70 #include <netinet6/ip6_var.h> 71 #endif /* INET6 */ 72 73 #include <netinet/sctp_pcb.h> 74 75 #ifdef IPSEC 76 #include <netinet6/ipsec.h> 77 #include <netkey/key.h> 78 #endif /* IPSEC */ 79 #ifdef INET6 80 #include <netinet6/sctp6_var.h> 81 #endif 82 #include <netinet/sctp_var.h> 83 #include <netinet/sctp_timer.h> 84 #include <netinet/sctputil.h> 85 #include <netinet/sctp_output.h> 86 #include <netinet/sctp_hashdriver.h> 87 #include <netinet/sctp_header.h> 88 #include <netinet/sctp_indata.h> 89 #include <netinet/sctp_asconf.h> 90 91 #include <netinet/sctp.h> 92 #include <netinet/sctp_uio.h> 93 94 #include <net/net_osdep.h> 95 96 #ifdef SCTP_DEBUG 97 extern u_int32_t sctp_debug_on; 98 #endif /* SCTP_DEBUG */ 99 100 void 101 sctp_audit_retranmission_queue(struct sctp_association *asoc) 102 { 103 struct sctp_tmit_chunk *chk; 104 105 #ifdef SCTP_DEBUG 106 if (sctp_debug_on & SCTP_DEBUG_TIMER4) { 107 printf("Audit invoked on send queue cnt:%d onqueue:%d\n", 108 asoc->sent_queue_retran_cnt, 109 asoc->sent_queue_cnt); 110 } 111 #endif /* SCTP_DEBUG */ 112 asoc->sent_queue_retran_cnt = 0; 113 asoc->sent_queue_cnt = 0; 114 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 115 if (chk->sent == SCTP_DATAGRAM_RESEND) { 116 asoc->sent_queue_retran_cnt++; 117 } 118 asoc->sent_queue_cnt++; 119 } 120 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 121 if (chk->sent == SCTP_DATAGRAM_RESEND) { 122 asoc->sent_queue_retran_cnt++; 123 } 124 } 125 #ifdef SCTP_DEBUG 126 if (sctp_debug_on & SCTP_DEBUG_TIMER4) { 127 printf("Audit completes retran:%d onqueue:%d\n", 128 asoc->sent_queue_retran_cnt, 129 asoc->sent_queue_cnt); 130 } 131 #endif /* SCTP_DEBUG */ 132 } 133 134 int 135 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 136 struct sctp_nets *net, uint16_t threshold) 137 { 138 if (net) { 139 net->error_count++; 140 #ifdef SCTP_DEBUG 141 if (sctp_debug_on & SCTP_DEBUG_TIMER4) { 142 printf("Error count for %p now %d thresh:%d\n", 143 net, net->error_count, 144 net->failure_threshold); 145 } 146 #endif /* SCTP_DEBUG */ 147 if (net->error_count >= net->failure_threshold) { 148 /* We had a threshold failure */ 149 if (net->dest_state & SCTP_ADDR_REACHABLE) { 150 net->dest_state &= ~SCTP_ADDR_REACHABLE; 151 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 152 if (net == stcb->asoc.primary_destination) { 153 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 154 } 155 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 156 stcb, 157 SCTP_FAILED_THRESHOLD, 158 (void *)net); 159 } 160 } 161 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 162 *********ROUTING CODE 163 */ 164 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 165 *********ROUTING CODE 166 */ 167 } 168 if (stcb == NULL) 169 return (0); 170 171 if (net) { 172 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 173 stcb->asoc.overall_error_count++; 174 } 175 } else { 176 stcb->asoc.overall_error_count++; 177 } 178 #ifdef SCTP_DEBUG 179 if (sctp_debug_on & SCTP_DEBUG_TIMER4) { 180 printf("Overall error count for %p now %d thresh:%u state:%x\n", 181 &stcb->asoc, 182 stcb->asoc.overall_error_count, 183 (u_int)threshold, 184 ((net == NULL) ? (u_int)0 : (u_int)net->dest_state)); 185 } 186 #endif /* SCTP_DEBUG */ 187 /* We specifically do not do >= to give the assoc one more 188 * change before we fail it. 189 */ 190 if (stcb->asoc.overall_error_count > threshold) { 191 /* Abort notification sends a ULP notify */ 192 struct mbuf *oper; 193 MGET(oper, M_DONTWAIT, MT_DATA); 194 if (oper) { 195 struct sctp_paramhdr *ph; 196 u_int32_t *ippp; 197 198 oper->m_len = sizeof(struct sctp_paramhdr) + 199 sizeof(*ippp); 200 ph = mtod(oper, struct sctp_paramhdr *); 201 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 202 ph->param_length = htons(oper->m_len); 203 ippp = (u_int32_t *)(ph + 1); 204 *ippp = htonl(0x40000001); 205 } 206 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper); 207 return (1); 208 } 209 return (0); 210 } 211 212 struct sctp_nets * 213 sctp_find_alternate_net(struct sctp_tcb *stcb, 214 struct sctp_nets *net) 215 { 216 /* Find and return an alternate network if possible */ 217 struct sctp_nets *alt, *mnet; 218 struct rtentry *rt; 219 int once; 220 221 if (stcb->asoc.numnets == 1) { 222 /* No others but net */ 223 return (TAILQ_FIRST(&stcb->asoc.nets)); 224 } 225 mnet = net; 226 once = 0; 227 228 if (mnet == NULL) { 229 mnet = TAILQ_FIRST(&stcb->asoc.nets); 230 } 231 do { 232 alt = TAILQ_NEXT(mnet, sctp_next); 233 if (alt == NULL) { 234 once++; 235 if (once > 1) { 236 break; 237 } 238 alt = TAILQ_FIRST(&stcb->asoc.nets); 239 } 240 rt = rtcache_validate(&alt->ro); 241 if (rt == NULL) { 242 alt->src_addr_selected = 0; 243 } 244 if ( 245 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 246 (rt != NULL) && 247 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 248 ) { 249 /* Found a reachable address */ 250 break; 251 } 252 mnet = alt; 253 } while (alt != NULL); 254 255 if (alt == NULL) { 256 /* Case where NO insv network exists (dormant state) */ 257 /* we rotate destinations */ 258 once = 0; 259 mnet = net; 260 do { 261 alt = TAILQ_NEXT(mnet, sctp_next); 262 if (alt == NULL) { 263 once++; 264 if (once > 1) { 265 break; 266 } 267 alt = TAILQ_FIRST(&stcb->asoc.nets); 268 } 269 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 270 (alt != net)) { 271 /* Found an alternate address */ 272 break; 273 } 274 mnet = alt; 275 } while (alt != NULL); 276 } 277 if (alt == NULL) { 278 return (net); 279 } 280 return (alt); 281 } 282 283 static void 284 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 285 struct sctp_nets *net, 286 int win_probe, 287 int num_marked) 288 { 289 #ifdef SCTP_DEBUG 290 int oldRTO; 291 292 oldRTO = net->RTO; 293 #endif /* SCTP_DEBUG */ 294 net->RTO <<= 1; 295 #ifdef SCTP_DEBUG 296 if (sctp_debug_on & SCTP_DEBUG_TIMER2) { 297 printf("Timer doubles from %d ms -to-> %d ms\n", 298 oldRTO, net->RTO); 299 } 300 #endif /* SCTP_DEBUG */ 301 302 if (net->RTO > stcb->asoc.maxrto) { 303 net->RTO = stcb->asoc.maxrto; 304 #ifdef SCTP_DEBUG 305 if (sctp_debug_on & SCTP_DEBUG_TIMER2) { 306 printf("Growth capped by maxrto %d\n", 307 net->RTO); 308 } 309 #endif /* SCTP_DEBUG */ 310 } 311 312 313 if ((win_probe == 0) && num_marked) { 314 /* We don't apply penalty to window probe scenarios */ 315 #ifdef SCTP_CWND_LOGGING 316 int old_cwnd=net->cwnd; 317 #endif 318 net->ssthresh = net->cwnd >> 1; 319 if (net->ssthresh < (net->mtu << 1)) { 320 net->ssthresh = (net->mtu << 1); 321 } 322 net->cwnd = net->mtu; 323 /* floor of 1 mtu */ 324 if (net->cwnd < net->mtu) 325 net->cwnd = net->mtu; 326 #ifdef SCTP_CWND_LOGGING 327 sctp_log_cwnd(net, net->cwnd-old_cwnd, SCTP_CWND_LOG_FROM_RTX); 328 #endif 329 330 net->partial_bytes_acked = 0; 331 #ifdef SCTP_DEBUG 332 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 333 printf("collapse cwnd to 1MTU ssthresh to %d\n", 334 net->ssthresh); 335 } 336 #endif 337 338 } 339 } 340 341 342 static int 343 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 344 struct sctp_nets *net, 345 struct sctp_nets *alt, 346 int *num_marked) 347 { 348 349 /* 350 * Mark all chunks (well not all) that were sent to *net for retransmission. 351 * Move them to alt for there destination as well... We only 352 * mark chunks that have been outstanding long enough to have 353 * received feed-back. 354 */ 355 struct sctp_tmit_chunk *chk, *tp2; 356 struct sctp_nets *lnets; 357 struct timeval now, min_wait, tv; 358 int cur_rto; 359 int win_probes, non_win_probes, orig_rwnd, audit_tf, num_mk, fir; 360 unsigned int cnt_mk; 361 u_int32_t orig_flight; 362 #ifdef SCTP_FR_LOGGING 363 u_int32_t tsnfirst, tsnlast; 364 #endif 365 366 /* none in flight now */ 367 audit_tf = 0; 368 fir=0; 369 /* figure out how long a data chunk must be pending 370 * before we can mark it .. 371 */ 372 SCTP_GETTIME_TIMEVAL(&now); 373 /* get cur rto in micro-seconds */ 374 cur_rto = (((net->lastsa >> 2) + net->lastsv) >> 1); 375 #ifdef SCTP_FR_LOGGING 376 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME); 377 #endif 378 cur_rto *= 1000; 379 #ifdef SCTP_FR_LOGGING 380 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME); 381 #endif 382 tv.tv_sec = cur_rto / 1000000; 383 tv.tv_usec = cur_rto % 1000000; 384 #ifndef __FreeBSD__ 385 timersub(&now, &tv, &min_wait); 386 #else 387 min_wait = now; 388 timevalsub(&min_wait, &tv); 389 #endif 390 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 391 /* 392 * if we hit here, we don't 393 * have enough seconds on the clock to account 394 * for the RTO. We just let the lower seconds 395 * be the bounds and don't worry about it. This 396 * may mean we will mark a lot more than we should. 397 */ 398 min_wait.tv_sec = min_wait.tv_usec = 0; 399 } 400 #ifdef SCTP_FR_LOGGING 401 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 402 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 403 #endif 404 if (stcb->asoc.total_flight >= net->flight_size) { 405 stcb->asoc.total_flight -= net->flight_size; 406 } else { 407 audit_tf = 1; 408 stcb->asoc.total_flight = 0; 409 } 410 /* Our rwnd will be incorrect here since we are not adding 411 * back the cnt * mbuf but we will fix that down below. 412 */ 413 orig_rwnd = stcb->asoc.peers_rwnd; 414 orig_flight = net->flight_size; 415 stcb->asoc.peers_rwnd += net->flight_size; 416 net->flight_size = 0; 417 net->rto_pending = 0; 418 net->fast_retran_ip= 0; 419 win_probes = non_win_probes = 0; 420 #ifdef SCTP_DEBUG 421 if (sctp_debug_on & SCTP_DEBUG_TIMER2) { 422 printf("Marking ALL un-acked for retransmission at t3-timeout\n"); 423 } 424 #endif /* SCTP_DEBUG */ 425 /* Now on to each chunk */ 426 num_mk = cnt_mk = 0; 427 #ifdef SCTP_FR_LOGGING 428 tsnlast = tsnfirst = 0; 429 #endif 430 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 431 for (;chk != NULL; chk = tp2) { 432 tp2 = TAILQ_NEXT(chk, sctp_next); 433 if ((compare_with_wrap(stcb->asoc.last_acked_seq, 434 chk->rec.data.TSN_seq, 435 MAX_TSN)) || 436 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 437 /* Strange case our list got out of order? */ 438 printf("Our list is out of order?\n"); 439 TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next); 440 if (chk->data) { 441 sctp_release_pr_sctp_chunk(stcb, chk, 0xffff, 442 &stcb->asoc.sent_queue); 443 if (chk->flags & SCTP_PR_SCTP_BUFFER) { 444 stcb->asoc.sent_queue_cnt_removeable--; 445 } 446 } 447 stcb->asoc.sent_queue_cnt--; 448 sctp_free_remote_addr(chk->whoTo); 449 sctppcbinfo.ipi_count_chunk--; 450 if ((int)sctppcbinfo.ipi_count_chunk < 0) { 451 panic("Chunk count is going negative"); 452 } 453 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 454 sctppcbinfo.ipi_gencnt_chunk++; 455 continue; 456 } 457 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 458 /* found one to mark: 459 * If it is less than DATAGRAM_ACKED it MUST 460 * not be a skipped or marked TSN but instead 461 * one that is either already set for retransmission OR 462 * one that needs retransmission. 463 */ 464 465 /* validate its been outstanding long enough */ 466 #ifdef SCTP_FR_LOGGING 467 sctp_log_fr(chk->rec.data.TSN_seq, 468 chk->sent_rcv_time.tv_sec, 469 chk->sent_rcv_time.tv_usec, 470 SCTP_FR_T3_MARK_TIME); 471 #endif 472 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 473 /* we have reached a chunk that was sent some 474 * seconds past our min.. forget it we will 475 * find no more to send. 476 */ 477 #ifdef SCTP_FR_LOGGING 478 sctp_log_fr(0, 479 chk->sent_rcv_time.tv_sec, 480 chk->sent_rcv_time.tv_usec, 481 SCTP_FR_T3_STOPPED); 482 #endif 483 continue; 484 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 485 /* we must look at the micro seconds to know. 486 */ 487 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 488 /* ok it was sent after our boundary time. */ 489 #ifdef SCTP_FR_LOGGING 490 sctp_log_fr(0, 491 chk->sent_rcv_time.tv_sec, 492 chk->sent_rcv_time.tv_usec, 493 SCTP_FR_T3_STOPPED); 494 #endif 495 continue; 496 } 497 } 498 if (stcb->asoc.total_flight_count > 0) { 499 stcb->asoc.total_flight_count--; 500 } 501 if ((chk->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == SCTP_PR_SCTP_ENABLED) { 502 /* Is it expired? */ 503 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 504 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 505 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 506 /* Yes so drop it */ 507 if (chk->data) { 508 sctp_release_pr_sctp_chunk(stcb, 509 chk, 510 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT), 511 &stcb->asoc.sent_queue); 512 } 513 } 514 continue; 515 } 516 if (chk->sent != SCTP_DATAGRAM_RESEND) { 517 stcb->asoc.sent_queue_retran_cnt++; 518 num_mk++; 519 if (fir == 0) { 520 fir = 1; 521 #ifdef SCTP_DEBUG 522 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 523 printf("First TSN marked was %x\n", 524 chk->rec.data.TSN_seq); 525 } 526 #endif 527 #ifdef SCTP_FR_LOGGING 528 tsnfirst = chk->rec.data.TSN_seq; 529 #endif 530 } 531 #ifdef SCTP_FR_LOGGING 532 tsnlast = chk->rec.data.TSN_seq; 533 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 534 0, SCTP_FR_T3_MARKED); 535 536 #endif 537 } 538 chk->sent = SCTP_DATAGRAM_RESEND; 539 /* reset the TSN for striking and other FR stuff */ 540 chk->rec.data.doing_fast_retransmit = 0; 541 #ifdef SCTP_DEBUG 542 if (sctp_debug_on & SCTP_DEBUG_TIMER3) { 543 printf("mark TSN:%x for retransmission\n", chk->rec.data.TSN_seq); 544 } 545 #endif /* SCTP_DEBUG */ 546 /* Clear any time so NO RTT is being done */ 547 chk->do_rtt = 0; 548 /* Bump up the count */ 549 if (compare_with_wrap(chk->rec.data.TSN_seq, 550 stcb->asoc.t3timeout_highest_marked, 551 MAX_TSN)) { 552 /* TSN_seq > than t3timeout so update */ 553 stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq; 554 } 555 if (alt != net) { 556 sctp_free_remote_addr(chk->whoTo); 557 chk->whoTo = alt; 558 alt->ref_count++; 559 } 560 if ((chk->rec.data.state_flags & SCTP_WINDOW_PROBE) != 561 SCTP_WINDOW_PROBE) { 562 non_win_probes++; 563 } else { 564 chk->rec.data.state_flags &= ~SCTP_WINDOW_PROBE; 565 win_probes++; 566 } 567 } 568 if (chk->sent == SCTP_DATAGRAM_RESEND) { 569 cnt_mk++; 570 } 571 } 572 573 #ifdef SCTP_FR_LOGGING 574 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 575 #endif 576 /* compensate for the number we marked */ 577 stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/); 578 579 #ifdef SCTP_DEBUG 580 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 581 if (num_mk) { 582 #ifdef SCTP_FR_LOGGING 583 printf("LAST TSN marked was %x\n", tsnlast); 584 #endif 585 printf("Num marked for retransmission was %d peer-rwd:%ld\n", 586 num_mk, (u_long)stcb->asoc.peers_rwnd); 587 #ifdef SCTP_FR_LOGGING 588 printf("LAST TSN marked was %x\n", tsnlast); 589 #endif 590 printf("Num marked for retransmission was %d peer-rwd:%d\n", 591 num_mk, 592 (int)stcb->asoc.peers_rwnd 593 ); 594 } 595 } 596 #endif 597 *num_marked = num_mk; 598 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 599 printf("Local Audit says there are %d for retran asoc cnt:%d\n", 600 cnt_mk, stcb->asoc.sent_queue_retran_cnt); 601 #ifndef SCTP_AUDITING_ENABLED 602 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 603 #endif 604 } 605 #ifdef SCTP_DEBUG 606 if (sctp_debug_on & SCTP_DEBUG_TIMER3) { 607 printf("**************************\n"); 608 } 609 #endif /* SCTP_DEBUG */ 610 611 /* Now check for a ECN Echo that may be stranded */ 612 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 613 if ((chk->whoTo == net) && 614 (chk->rec.chunk_id == SCTP_ECN_ECHO)) { 615 sctp_free_remote_addr(chk->whoTo); 616 chk->whoTo = alt; 617 if (chk->sent != SCTP_DATAGRAM_RESEND) { 618 chk->sent = SCTP_DATAGRAM_RESEND; 619 stcb->asoc.sent_queue_retran_cnt++; 620 } 621 alt->ref_count++; 622 } 623 } 624 if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) && 625 (orig_flight <= net->mtu)) { 626 /* 627 * If the LAST packet sent was not acked and our rwnd is 0 628 * then we are in a win-probe state. 629 */ 630 win_probes = 1; 631 non_win_probes = 0; 632 #ifdef SCTP_DEBUG 633 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 634 printf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n", 635 orig_flight, net->mtu); 636 } 637 #endif 638 } 639 640 if (audit_tf) { 641 #ifdef SCTP_DEBUG 642 if (sctp_debug_on & SCTP_DEBUG_TIMER4) { 643 printf("Audit total flight due to negative value net:%p\n", 644 net); 645 } 646 #endif /* SCTP_DEBUG */ 647 stcb->asoc.total_flight = 0; 648 stcb->asoc.total_flight_count = 0; 649 /* Clear all networks flight size */ 650 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 651 lnets->flight_size = 0; 652 #ifdef SCTP_DEBUG 653 if (sctp_debug_on & SCTP_DEBUG_TIMER4) { 654 printf("Net:%p c-f cwnd:%d ssthresh:%d\n", 655 lnets, lnets->cwnd, lnets->ssthresh); 656 } 657 #endif /* SCTP_DEBUG */ 658 } 659 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 660 if (chk->sent < SCTP_DATAGRAM_RESEND) { 661 stcb->asoc.total_flight += chk->book_size; 662 chk->whoTo->flight_size += chk->book_size; 663 stcb->asoc.total_flight_count++; 664 } 665 } 666 } 667 /* Setup the ecn nonce re-sync point. We 668 * do this since retranmissions are NOT 669 * setup for ECN. This means that do to 670 * Karn's rule, we don't know the total 671 * of the peers ecn bits. 672 */ 673 chk = TAILQ_FIRST(&stcb->asoc.send_queue); 674 if (chk == NULL) { 675 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 676 } else { 677 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 678 } 679 stcb->asoc.nonce_wait_for_ecne = 0; 680 stcb->asoc.nonce_sum_check = 0; 681 /* We return 1 if we only have a window probe outstanding */ 682 if (win_probes && (non_win_probes == 0)) { 683 return (1); 684 } 685 return (0); 686 } 687 688 static void 689 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 690 struct sctp_nets *net, 691 struct sctp_nets *alt) 692 { 693 struct sctp_association *asoc; 694 struct sctp_stream_out *outs; 695 struct sctp_tmit_chunk *chk; 696 697 if (net == alt) 698 /* nothing to do */ 699 return; 700 701 asoc = &stcb->asoc; 702 703 /* 704 * now through all the streams checking for chunks sent to our 705 * bad network. 706 */ 707 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 708 /* now clean up any chunks here */ 709 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) { 710 if (chk->whoTo == net) { 711 sctp_free_remote_addr(chk->whoTo); 712 chk->whoTo = alt; 713 alt->ref_count++; 714 } 715 } 716 } 717 /* Now check the pending queue */ 718 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 719 if (chk->whoTo == net) { 720 sctp_free_remote_addr(chk->whoTo); 721 chk->whoTo = alt; 722 alt->ref_count++; 723 } 724 } 725 726 } 727 728 int 729 sctp_t3rxt_timer(struct sctp_inpcb *inp, 730 struct sctp_tcb *stcb, 731 struct sctp_nets *net) 732 { 733 struct sctp_nets *alt; 734 int win_probe, num_mk; 735 736 737 #ifdef SCTP_FR_LOGGING 738 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 739 #endif 740 /* Find an alternate and mark those for retransmission */ 741 alt = sctp_find_alternate_net(stcb, net); 742 win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk); 743 744 /* FR Loss recovery just ended with the T3. */ 745 stcb->asoc.fast_retran_loss_recovery = 0; 746 747 /* setup the sat loss recovery that prevents 748 * satellite cwnd advance. 749 */ 750 stcb->asoc.sat_t3_loss_recovery = 1; 751 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 752 753 /* Backoff the timer and cwnd */ 754 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk); 755 if (win_probe == 0) { 756 /* We don't do normal threshold management on window probes */ 757 if (sctp_threshold_management(inp, stcb, net, 758 stcb->asoc.max_send_times)) { 759 /* Association was destroyed */ 760 return (1); 761 } else { 762 if (net != stcb->asoc.primary_destination) { 763 /* send a immediate HB if our RTO is stale */ 764 struct timeval now; 765 unsigned int ms_goneby; 766 SCTP_GETTIME_TIMEVAL(&now); 767 if (net->last_sent_time.tv_sec) { 768 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 769 } else { 770 ms_goneby = 0; 771 } 772 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 773 /* no recent feed back in an RTO or more, request a RTT update */ 774 sctp_send_hb(stcb, 1, net); 775 } 776 } 777 } 778 } else { 779 /* 780 * For a window probe we don't penalize the net's but only 781 * the association. This may fail it if SACKs are not coming 782 * back. If sack's are coming with rwnd locked at 0, we will 783 * continue to hold things waiting for rwnd to raise 784 */ 785 if (sctp_threshold_management(inp, stcb, NULL, 786 stcb->asoc.max_send_times)) { 787 /* Association was destroyed */ 788 return (1); 789 } 790 } 791 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 792 /* Move all pending over too */ 793 sctp_move_all_chunks_to_alt(stcb, net, alt); 794 /* Was it our primary? */ 795 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 796 /* 797 * Yes, note it as such and find an alternate 798 * note: this means HB code must use this to resent 799 * the primary if it goes active AND if someone does 800 * a change-primary then this flag must be cleared 801 * from any net structures. 802 */ 803 if (sctp_set_primary_addr(stcb, 804 (struct sockaddr *)NULL, 805 alt) == 0) { 806 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 807 net->src_addr_selected = 0; 808 } 809 } 810 } 811 /* 812 * Special case for cookie-echo'ed case, we don't do output 813 * but must await the COOKIE-ACK before retransmission 814 */ 815 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 816 /* 817 * Here we just reset the timer and start again since we 818 * have not established the asoc 819 */ 820 #ifdef SCTP_DEBUG 821 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 822 printf("Special cookie case return\n"); 823 } 824 #endif /* SCTP_DEBUG */ 825 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 826 return (0); 827 } 828 if (stcb->asoc.peer_supports_prsctp) { 829 struct sctp_tmit_chunk *lchk; 830 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 831 /* C3. See if we need to send a Fwd-TSN */ 832 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 833 stcb->asoc.last_acked_seq, MAX_TSN)) { 834 /* 835 * ISSUE with ECN, see FWD-TSN processing for notes 836 * on issues that will occur when the ECN NONCE stuff 837 * is put into SCTP for cross checking. 838 */ 839 #ifdef SCTP_DEBUG 840 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 841 printf("Forward TSN time\n"); 842 } 843 #endif /* SCTP_DEBUG */ 844 send_forward_tsn(stcb, &stcb->asoc); 845 if (lchk) { 846 /* Assure a timer is up */ 847 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 848 } 849 } 850 } 851 return (0); 852 } 853 854 int 855 sctp_t1init_timer(struct sctp_inpcb *inp, 856 struct sctp_tcb *stcb, 857 struct sctp_nets *net) 858 { 859 /* bump the thresholds */ 860 if (stcb->asoc.delayed_connection) { 861 /* special hook for delayed connection. The 862 * library did NOT complete the rest of its 863 * sends. 864 */ 865 stcb->asoc.delayed_connection = 0; 866 sctp_send_initiate(inp, stcb); 867 return (0); 868 } 869 if (sctp_threshold_management(inp, stcb, net, 870 stcb->asoc.max_init_times)) { 871 /* Association was destroyed */ 872 return (1); 873 } 874 stcb->asoc.dropped_special_cnt = 0; 875 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0); 876 if (stcb->asoc.initial_init_rto_max < net->RTO) { 877 net->RTO = stcb->asoc.initial_init_rto_max; 878 } 879 if (stcb->asoc.numnets > 1) { 880 /* If we have more than one addr use it */ 881 struct sctp_nets *alt; 882 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination); 883 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 884 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 885 stcb->asoc.primary_destination = alt; 886 } 887 } 888 /* Send out a new init */ 889 sctp_send_initiate(inp, stcb); 890 return (0); 891 } 892 893 /* 894 * For cookie and asconf we actually need to find and mark for resend, 895 * then increment the resend counter (after all the threshold management 896 * stuff of course). 897 */ 898 int sctp_cookie_timer(struct sctp_inpcb *inp, 899 struct sctp_tcb *stcb, 900 struct sctp_nets *net) 901 { 902 struct sctp_nets *alt; 903 struct sctp_tmit_chunk *cookie; 904 /* first before all else we must find the cookie */ 905 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 906 if (cookie->rec.chunk_id == SCTP_COOKIE_ECHO) { 907 break; 908 } 909 } 910 if (cookie == NULL) { 911 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 912 /* FOOBAR! */ 913 struct mbuf *oper; 914 MGET(oper, M_DONTWAIT, MT_DATA); 915 if (oper) { 916 struct sctp_paramhdr *ph; 917 u_int32_t *ippp; 918 919 oper->m_len = sizeof(struct sctp_paramhdr) + 920 sizeof(*ippp); 921 ph = mtod(oper, struct sctp_paramhdr *); 922 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 923 ph->param_length = htons(oper->m_len); 924 ippp = (u_int32_t *)(ph + 1); 925 *ippp = htonl(0x40000002); 926 } 927 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 928 oper); 929 } 930 return (1); 931 } 932 /* Ok we found the cookie, threshold management next */ 933 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 934 stcb->asoc.max_init_times)) { 935 /* Assoc is over */ 936 return (1); 937 } 938 /* 939 * cleared theshold management now lets backoff the address & 940 * select an alternate 941 */ 942 stcb->asoc.dropped_special_cnt = 0; 943 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0); 944 alt = sctp_find_alternate_net(stcb, cookie->whoTo); 945 if (alt != cookie->whoTo) { 946 sctp_free_remote_addr(cookie->whoTo); 947 cookie->whoTo = alt; 948 alt->ref_count++; 949 } 950 /* Now mark the retran info */ 951 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 952 stcb->asoc.sent_queue_retran_cnt++; 953 } 954 cookie->sent = SCTP_DATAGRAM_RESEND; 955 /* 956 * Now call the output routine to kick out the cookie again, Note we 957 * don't mark any chunks for retran so that FR will need to kick in 958 * to move these (or a send timer). 959 */ 960 return (0); 961 } 962 963 int sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 964 struct sctp_nets *net) 965 { 966 struct sctp_nets *alt; 967 struct sctp_tmit_chunk *strrst, *chk; 968 struct sctp_stream_reset_req *strreq; 969 /* find the existing STRRESET */ 970 TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue, 971 sctp_next) { 972 if (strrst->rec.chunk_id == SCTP_STREAM_RESET) { 973 /* is it what we want */ 974 strreq = mtod(strrst->data, struct sctp_stream_reset_req *); 975 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_REQUEST)) { 976 break; 977 } 978 } 979 } 980 if (strrst == NULL) { 981 #ifdef SCTP_DEBUG 982 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 983 printf("Strange, strreset timer fires, but I can't find an str-reset?\n"); 984 } 985 #endif /* SCTP_DEBUG */ 986 return (0); 987 } 988 /* do threshold management */ 989 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 990 stcb->asoc.max_send_times)) { 991 /* Assoc is over */ 992 return (1); 993 } 994 995 /* 996 * cleared theshold management 997 * now lets backoff the address & select an alternate 998 */ 999 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0); 1000 alt = sctp_find_alternate_net(stcb, strrst->whoTo); 1001 sctp_free_remote_addr(strrst->whoTo); 1002 strrst->whoTo = alt; 1003 alt->ref_count++; 1004 1005 /* See if a ECN Echo is also stranded */ 1006 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1007 if ((chk->whoTo == net) && 1008 (chk->rec.chunk_id == SCTP_ECN_ECHO)) { 1009 sctp_free_remote_addr(chk->whoTo); 1010 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1011 chk->sent = SCTP_DATAGRAM_RESEND; 1012 stcb->asoc.sent_queue_retran_cnt++; 1013 } 1014 chk->whoTo = alt; 1015 alt->ref_count++; 1016 } 1017 } 1018 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1019 /* 1020 * If the address went un-reachable, we need to move 1021 * to alternates for ALL chk's in queue 1022 */ 1023 sctp_move_all_chunks_to_alt(stcb, net, alt); 1024 } 1025 /* mark the retran info */ 1026 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1027 stcb->asoc.sent_queue_retran_cnt++; 1028 strrst->sent = SCTP_DATAGRAM_RESEND; 1029 1030 /* restart the timer */ 1031 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1032 return (0); 1033 } 1034 1035 int sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1036 struct sctp_nets *net) 1037 { 1038 struct sctp_nets *alt; 1039 struct sctp_tmit_chunk *asconf, *chk; 1040 1041 /* is this the first send, or a retransmission? */ 1042 if (stcb->asoc.asconf_sent == 0) { 1043 /* compose a new ASCONF chunk and send it */ 1044 sctp_send_asconf(stcb, net); 1045 } else { 1046 /* Retransmission of the existing ASCONF needed... */ 1047 1048 /* find the existing ASCONF */ 1049 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 1050 sctp_next) { 1051 if (asconf->rec.chunk_id == SCTP_ASCONF) { 1052 break; 1053 } 1054 } 1055 if (asconf == NULL) { 1056 #ifdef SCTP_DEBUG 1057 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1058 printf("Strange, asconf timer fires, but I can't find an asconf?\n"); 1059 } 1060 #endif /* SCTP_DEBUG */ 1061 return (0); 1062 } 1063 /* do threshold management */ 1064 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1065 stcb->asoc.max_send_times)) { 1066 /* Assoc is over */ 1067 return (1); 1068 } 1069 1070 /* PETER? FIX? How will the following code ever run? If 1071 * the max_send_times is hit, threshold managment will 1072 * blow away the association? 1073 */ 1074 if (asconf->snd_count > stcb->asoc.max_send_times) { 1075 /* 1076 * Something is rotten, peer is not responding to 1077 * ASCONFs but maybe is to data etc. e.g. it is not 1078 * properly handling the chunk type upper bits 1079 * Mark this peer as ASCONF incapable and cleanup 1080 */ 1081 #ifdef SCTP_DEBUG 1082 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1083 printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1084 } 1085 #endif /* SCTP_DEBUG */ 1086 sctp_asconf_cleanup(stcb, net); 1087 return (0); 1088 } 1089 /* 1090 * cleared theshold management 1091 * now lets backoff the address & select an alternate 1092 */ 1093 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0); 1094 alt = sctp_find_alternate_net(stcb, asconf->whoTo); 1095 sctp_free_remote_addr(asconf->whoTo); 1096 asconf->whoTo = alt; 1097 alt->ref_count++; 1098 1099 /* See if a ECN Echo is also stranded */ 1100 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1101 if ((chk->whoTo == net) && 1102 (chk->rec.chunk_id == SCTP_ECN_ECHO)) { 1103 sctp_free_remote_addr(chk->whoTo); 1104 chk->whoTo = alt; 1105 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1106 chk->sent = SCTP_DATAGRAM_RESEND; 1107 stcb->asoc.sent_queue_retran_cnt++; 1108 } 1109 alt->ref_count++; 1110 1111 } 1112 } 1113 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1114 /* 1115 * If the address went un-reachable, we need to move 1116 * to alternates for ALL chk's in queue 1117 */ 1118 sctp_move_all_chunks_to_alt(stcb, net, alt); 1119 } 1120 /* mark the retran info */ 1121 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1122 stcb->asoc.sent_queue_retran_cnt++; 1123 asconf->sent = SCTP_DATAGRAM_RESEND; 1124 } 1125 return (0); 1126 } 1127 1128 /* 1129 * For the shutdown and shutdown-ack, we do not keep one around on the 1130 * control queue. This means we must generate a new one and call the general 1131 * chunk output routine, AFTER having done threshold 1132 * management. 1133 */ 1134 int 1135 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1136 struct sctp_nets *net) 1137 { 1138 struct sctp_nets *alt; 1139 /* first threshold managment */ 1140 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1141 /* Assoc is over */ 1142 return (1); 1143 } 1144 /* second select an alternative */ 1145 alt = sctp_find_alternate_net(stcb, net); 1146 1147 /* third generate a shutdown into the queue for out net */ 1148 #ifdef SCTP_DEBUG 1149 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 1150 printf("%s:%d sends a shutdown\n", 1151 __FILE__, 1152 __LINE__ 1153 ); 1154 } 1155 #endif 1156 if (alt) { 1157 sctp_send_shutdown(stcb, alt); 1158 } else { 1159 /* if alt is NULL, there is no dest 1160 * to send to?? 1161 */ 1162 return (0); 1163 } 1164 /* fourth restart timer */ 1165 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1166 return (0); 1167 } 1168 1169 int sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1170 struct sctp_nets *net) 1171 { 1172 struct sctp_nets *alt; 1173 /* first threshold managment */ 1174 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1175 /* Assoc is over */ 1176 return (1); 1177 } 1178 /* second select an alternative */ 1179 alt = sctp_find_alternate_net(stcb, net); 1180 1181 /* third generate a shutdown into the queue for out net */ 1182 sctp_send_shutdown_ack(stcb, alt); 1183 1184 /* fourth restart timer */ 1185 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1186 return (0); 1187 } 1188 1189 static void 1190 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1191 struct sctp_tcb *stcb) 1192 { 1193 struct sctp_stream_out *outs; 1194 struct sctp_tmit_chunk *chk; 1195 unsigned int chks_in_queue=0; 1196 1197 if ((stcb == NULL) || (inp == NULL)) 1198 return; 1199 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1200 printf("Strange, out_wheel empty nothing on sent/send and tot=%lu?\n", 1201 (u_long)stcb->asoc.total_output_queue_size); 1202 stcb->asoc.total_output_queue_size = 0; 1203 return; 1204 } 1205 if (stcb->asoc.sent_queue_retran_cnt) { 1206 printf("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1207 stcb->asoc.sent_queue_retran_cnt); 1208 stcb->asoc.sent_queue_retran_cnt = 0; 1209 } 1210 /* Check to see if some data queued, if so report it */ 1211 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1212 if (!TAILQ_EMPTY(&outs->outqueue)) { 1213 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) { 1214 chks_in_queue++; 1215 } 1216 } 1217 } 1218 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1219 printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1220 stcb->asoc.stream_queue_cnt, chks_in_queue); 1221 } 1222 if (chks_in_queue) { 1223 /* call the output queue function */ 1224 sctp_chunk_output(inp, stcb, 1); 1225 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1226 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1227 /* Probably should go in and make it go back through and add fragments allowed */ 1228 printf("Still nothing moved %d chunks are stuck\n", chks_in_queue); 1229 } 1230 } else { 1231 printf("Found no chunks on any queue tot:%lu\n", 1232 (u_long)stcb->asoc.total_output_queue_size); 1233 stcb->asoc.total_output_queue_size = 0; 1234 } 1235 } 1236 1237 int 1238 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1239 struct sctp_nets *net) 1240 { 1241 int cnt_of_unconf=0; 1242 1243 if (net) { 1244 if (net->hb_responded == 0) { 1245 sctp_backoff_on_timeout(stcb, net, 1, 0); 1246 } 1247 /* Zero PBA, if it needs it */ 1248 if (net->partial_bytes_acked) { 1249 net->partial_bytes_acked = 0; 1250 } 1251 } 1252 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1253 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1254 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1255 cnt_of_unconf++; 1256 } 1257 } 1258 if ((stcb->asoc.total_output_queue_size > 0) && 1259 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1260 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1261 sctp_audit_stream_queues_for_size(inp, stcb); 1262 } 1263 /* Send a new HB, this will do threshold managment, pick a new dest */ 1264 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1265 return (1); 1266 } 1267 if (cnt_of_unconf > 1) { 1268 /* 1269 * this will send out extra hb's up to maxburst if 1270 * there are any unconfirmed addresses. 1271 */ 1272 int cnt_sent = 1; 1273 while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) { 1274 if (sctp_send_hb(stcb, 0, NULL) == 0) 1275 break; 1276 cnt_of_unconf--; 1277 cnt_sent++; 1278 } 1279 } 1280 return (0); 1281 } 1282 1283 #define SCTP_NUMBER_OF_MTU_SIZES 18 1284 static u_int32_t mtu_sizes[]={ 1285 68, 1286 296, 1287 508, 1288 512, 1289 544, 1290 576, 1291 1006, 1292 1492, 1293 1500, 1294 1536, 1295 2002, 1296 2048, 1297 4352, 1298 4464, 1299 8166, 1300 17914, 1301 32000, 1302 65535 1303 }; 1304 1305 1306 static u_int32_t 1307 sctp_getnext_mtu(struct sctp_inpcb *inp, u_int32_t cur_mtu) 1308 { 1309 /* select another MTU that is just bigger than this one */ 1310 int i; 1311 1312 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1313 if (cur_mtu < mtu_sizes[i]) { 1314 /* no max_mtu is bigger than this one */ 1315 return (mtu_sizes[i]); 1316 } 1317 } 1318 /* here return the highest allowable */ 1319 return (cur_mtu); 1320 } 1321 1322 1323 void sctp_pathmtu_timer(struct sctp_inpcb *inp, 1324 struct sctp_tcb *stcb, 1325 struct sctp_nets *net) 1326 { 1327 u_int32_t next_mtu; 1328 struct rtentry *rt; 1329 1330 /* restart the timer in any case */ 1331 next_mtu = sctp_getnext_mtu(inp, net->mtu); 1332 if (next_mtu <= net->mtu) { 1333 /* nothing to do */ 1334 return; 1335 } 1336 rt = rtcache_validate(&net->ro); 1337 if (rt != NULL) { 1338 /* only if we have a route and interface do we 1339 * set anything. Note we always restart 1340 * the timer though just in case it is updated 1341 * (i.e. the ifp) or route/ifp is populated. 1342 */ 1343 if (rt->rt_ifp != NULL) { 1344 if (rt->rt_ifp->if_mtu > next_mtu) { 1345 /* ok it will fit out the door */ 1346 net->mtu = next_mtu; 1347 } 1348 } 1349 } 1350 /* restart the timer */ 1351 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1352 } 1353 1354 void sctp_autoclose_timer(struct sctp_inpcb *inp, 1355 struct sctp_tcb *stcb, 1356 struct sctp_nets *net) 1357 { 1358 struct timeval tn, *tim_touse; 1359 struct sctp_association *asoc; 1360 int ticks_gone_by; 1361 1362 SCTP_GETTIME_TIMEVAL(&tn); 1363 if (stcb->asoc.sctp_autoclose_ticks && 1364 (inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE)) { 1365 /* Auto close is on */ 1366 asoc = &stcb->asoc; 1367 /* pick the time to use */ 1368 if (asoc->time_last_rcvd.tv_sec > 1369 asoc->time_last_sent.tv_sec) { 1370 tim_touse = &asoc->time_last_rcvd; 1371 } else { 1372 tim_touse = &asoc->time_last_sent; 1373 } 1374 /* Now has long enough transpired to autoclose? */ 1375 ticks_gone_by = ((tn.tv_sec - tim_touse->tv_sec) * hz); 1376 if ((ticks_gone_by > 0) && 1377 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1378 /* 1379 * autoclose time has hit, call the output routine, 1380 * which should do nothing just to be SURE we don't 1381 * have hanging data. We can then safely check the 1382 * queues and know that we are clear to send shutdown 1383 */ 1384 sctp_chunk_output(inp, stcb, 9); 1385 /* Are we clean? */ 1386 if (TAILQ_EMPTY(&asoc->send_queue) && 1387 TAILQ_EMPTY(&asoc->sent_queue)) { 1388 /* 1389 * there is nothing queued to send, 1390 * so I'm done... 1391 */ 1392 if (SCTP_GET_STATE(asoc) != 1393 SCTP_STATE_SHUTDOWN_SENT) { 1394 /* only send SHUTDOWN 1st time thru */ 1395 #ifdef SCTP_DEBUG 1396 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 1397 printf("%s:%d sends a shutdown\n", 1398 __FILE__, 1399 __LINE__ 1400 ); 1401 } 1402 #endif 1403 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1404 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 1405 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1406 stcb->sctp_ep, stcb, 1407 asoc->primary_destination); 1408 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1409 stcb->sctp_ep, stcb, 1410 asoc->primary_destination); 1411 } 1412 } 1413 } else { 1414 /* 1415 * No auto close at this time, reset t-o to 1416 * check later 1417 */ 1418 int tmp; 1419 /* fool the timer startup to use the time left */ 1420 tmp = asoc->sctp_autoclose_ticks; 1421 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1422 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1423 net); 1424 /* restore the real tick value */ 1425 asoc->sctp_autoclose_ticks = tmp; 1426 } 1427 } 1428 } 1429 1430 void 1431 sctp_iterator_timer(struct sctp_iterator *it) 1432 { 1433 int cnt= 0; 1434 /* only one iterator can run at a 1435 * time. This is the only way we 1436 * can cleanly pull ep's from underneath 1437 * all the running interators when a 1438 * ep is freed. 1439 */ 1440 SCTP_ITERATOR_LOCK(); 1441 if (it->inp == NULL) { 1442 /* iterator is complete */ 1443 done_with_iterator: 1444 SCTP_ITERATOR_UNLOCK(); 1445 SCTP_INP_INFO_WLOCK(); 1446 LIST_REMOVE(it, sctp_nxt_itr); 1447 /* stopping the callout is not needed, in theory, 1448 * but I am paranoid. 1449 */ 1450 SCTP_INP_INFO_WUNLOCK(); 1451 callout_stop(&it->tmr.timer); 1452 if (it->function_atend != NULL) { 1453 (*it->function_atend)(it->pointer, it->val); 1454 } 1455 callout_destroy(&it->tmr.timer); 1456 free(it, M_PCB); 1457 return; 1458 } 1459 select_a_new_ep: 1460 SCTP_INP_WLOCK(it->inp); 1461 while ((it->pcb_flags) && ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) { 1462 /* we do not like this ep */ 1463 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1464 SCTP_INP_WUNLOCK(it->inp); 1465 goto done_with_iterator; 1466 } 1467 SCTP_INP_WUNLOCK(it->inp); 1468 it->inp = LIST_NEXT(it->inp, sctp_list); 1469 if (it->inp == NULL) { 1470 goto done_with_iterator; 1471 } 1472 SCTP_INP_WLOCK(it->inp); 1473 } 1474 if ((it->inp->inp_starting_point_for_iterator != NULL) && 1475 (it->inp->inp_starting_point_for_iterator != it)) { 1476 printf("Iterator collision, we must wait for other iterator at %p\n", 1477 it->inp); 1478 SCTP_INP_WUNLOCK(it->inp); 1479 goto start_timer_return; 1480 } 1481 /* now we do the actual write to this guy */ 1482 it->inp->inp_starting_point_for_iterator = it; 1483 SCTP_INP_WUNLOCK(it->inp); 1484 SCTP_INP_RLOCK(it->inp); 1485 /* if we reach here we found a inp acceptable, now through each 1486 * one that has the association in the right state 1487 */ 1488 if (it->stcb == NULL) { 1489 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1490 } 1491 if (it->stcb->asoc.stcb_starting_point_for_iterator == it) { 1492 it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1493 } 1494 while (it->stcb) { 1495 SCTP_TCB_LOCK(it->stcb); 1496 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1497 SCTP_TCB_UNLOCK(it->stcb); 1498 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1499 continue; 1500 } 1501 cnt++; 1502 /* run function on this one */ 1503 SCTP_INP_RUNLOCK(it->inp); 1504 (*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val); 1505 sctp_chunk_output(it->inp, it->stcb, 1); 1506 SCTP_TCB_UNLOCK(it->stcb); 1507 /* see if we have limited out */ 1508 if (cnt > SCTP_MAX_ITERATOR_AT_ONCE) { 1509 it->stcb->asoc.stcb_starting_point_for_iterator = it; 1510 start_timer_return: 1511 SCTP_ITERATOR_UNLOCK(); 1512 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL); 1513 return; 1514 } 1515 SCTP_INP_RLOCK(it->inp); 1516 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1517 } 1518 /* if we reach here, we ran out of stcb's in the inp we are looking at */ 1519 SCTP_INP_RUNLOCK(it->inp); 1520 SCTP_INP_WLOCK(it->inp); 1521 it->inp->inp_starting_point_for_iterator = NULL; 1522 SCTP_INP_WUNLOCK(it->inp); 1523 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1524 it->inp = NULL; 1525 } else { 1526 SCTP_INP_INFO_RLOCK(); 1527 it->inp = LIST_NEXT(it->inp, sctp_list); 1528 SCTP_INP_INFO_RUNLOCK(); 1529 } 1530 if (it->inp == NULL) { 1531 goto done_with_iterator; 1532 } 1533 goto select_a_new_ep; 1534 } 1535