1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 34 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.14 2003/02/03 02:33:41 hsu Exp $ 35 * $DragonFly: src/sys/netinet/tcp_timer.c,v 1.3 2003/07/23 06:21:01 dillon Exp $ 36 */ 37 38 #include "opt_compat.h" 39 #include "opt_inet6.h" 40 #include "opt_tcpdebug.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/mbuf.h> 46 #include <sys/sysctl.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/protosw.h> 50 51 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 52 53 #include <net/route.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/in_pcb.h> 58 #ifdef INET6 59 #include <netinet6/in6_pcb.h> 60 #endif 61 #include <netinet/ip_var.h> 62 #include <netinet/tcp.h> 63 #include <netinet/tcp_fsm.h> 64 #include <netinet/tcp_seq.h> 65 #include <netinet/tcp_timer.h> 66 #include <netinet/tcp_var.h> 67 #include <netinet/tcpip.h> 68 #ifdef TCPDEBUG 69 #include <netinet/tcp_debug.h> 70 #endif 71 72 static int 73 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS) 74 { 75 int error, s, tt; 76 77 tt = *(int *)oidp->oid_arg1; 78 s = (int)((int64_t)tt * 1000 / hz); 79 80 error = sysctl_handle_int(oidp, &s, 0, req); 81 if (error || !req->newptr) 82 return (error); 83 84 tt = (int)((int64_t)s * hz / 1000); 85 if (tt < 1) 86 return (EINVAL); 87 88 *(int *)oidp->oid_arg1 = tt; 89 return (0); 90 } 91 92 int tcp_keepinit; 93 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW, 94 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", ""); 95 96 int tcp_keepidle; 97 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW, 98 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", ""); 99 100 int tcp_keepintvl; 101 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW, 102 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", ""); 103 104 int tcp_delacktime; 105 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, 106 CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 107 "Time before a delayed ACK is sent"); 108 109 int tcp_msl; 110 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW, 111 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); 112 113 int tcp_rexmit_min; 114 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW, 115 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", "Minimum Retransmission Timeout"); 116 117 int tcp_rexmit_slop; 118 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW, 119 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", "Retransmission Timer Slop"); 120 121 static int always_keepalive = 0; 122 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW, 123 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); 124 125 static int tcp_keepcnt = TCPTV_KEEPCNT; 126 /* max idle probes */ 127 int tcp_maxpersistidle; 128 /* max idle time in persist */ 129 int tcp_maxidle; 130 131 /* 132 * Tcp protocol timeout routine called every 500 ms. 133 * Updates timestamps used for TCP 134 * causes finite state machine actions if timers expire. 135 */ 136 void 137 tcp_slowtimo() 138 { 139 int s; 140 141 s = splnet(); 142 143 tcp_maxidle = tcp_keepcnt * tcp_keepintvl; 144 145 splx(s); 146 } 147 148 /* 149 * Cancel all timers for TCP tp. 150 */ 151 void 152 tcp_canceltimers(tp) 153 struct tcpcb *tp; 154 { 155 callout_stop(tp->tt_2msl); 156 callout_stop(tp->tt_persist); 157 callout_stop(tp->tt_keep); 158 callout_stop(tp->tt_rexmt); 159 } 160 161 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = 162 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; 163 164 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 165 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; 166 167 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ 168 169 /* 170 * TCP timer processing. 171 */ 172 void 173 tcp_timer_delack(xtp) 174 void *xtp; 175 { 176 struct tcpcb *tp = xtp; 177 int s; 178 179 s = splnet(); 180 if (callout_pending(tp->tt_delack) || !callout_active(tp->tt_delack)) { 181 splx(s); 182 return; 183 } 184 callout_deactivate(tp->tt_delack); 185 186 tp->t_flags |= TF_ACKNOW; 187 tcpstat.tcps_delack++; 188 (void) tcp_output(tp); 189 splx(s); 190 } 191 192 void 193 tcp_timer_2msl(xtp) 194 void *xtp; 195 { 196 struct tcpcb *tp = xtp; 197 int s; 198 #ifdef TCPDEBUG 199 int ostate; 200 201 ostate = tp->t_state; 202 #endif 203 s = splnet(); 204 if (callout_pending(tp->tt_2msl) || !callout_active(tp->tt_2msl)) { 205 splx(s); 206 return; 207 } 208 callout_deactivate(tp->tt_2msl); 209 /* 210 * 2 MSL timeout in shutdown went off. If we're closed but 211 * still waiting for peer to close and connection has been idle 212 * too long, or if 2MSL time is up from TIME_WAIT, delete connection 213 * control block. Otherwise, check again in a bit. 214 */ 215 if (tp->t_state != TCPS_TIME_WAIT && 216 (ticks - tp->t_rcvtime) <= tcp_maxidle) 217 callout_reset(tp->tt_2msl, tcp_keepintvl, 218 tcp_timer_2msl, tp); 219 else 220 tp = tcp_close(tp); 221 222 #ifdef TCPDEBUG 223 if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 224 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 225 PRU_SLOWTIMO); 226 #endif 227 splx(s); 228 } 229 230 void 231 tcp_timer_keep(xtp) 232 void *xtp; 233 { 234 struct tcpcb *tp = xtp; 235 struct tcptemp *t_template; 236 int s; 237 #ifdef TCPDEBUG 238 int ostate; 239 240 ostate = tp->t_state; 241 #endif 242 s = splnet(); 243 if (callout_pending(tp->tt_keep) || !callout_active(tp->tt_keep)) { 244 splx(s); 245 return; 246 } 247 callout_deactivate(tp->tt_keep); 248 /* 249 * Keep-alive timer went off; send something 250 * or drop connection if idle for too long. 251 */ 252 tcpstat.tcps_keeptimeo++; 253 if (tp->t_state < TCPS_ESTABLISHED) 254 goto dropit; 255 if ((always_keepalive || 256 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) && 257 tp->t_state <= TCPS_CLOSING) { 258 if ((ticks - tp->t_rcvtime) >= tcp_keepidle + tcp_maxidle) 259 goto dropit; 260 /* 261 * Send a packet designed to force a response 262 * if the peer is up and reachable: 263 * either an ACK if the connection is still alive, 264 * or an RST if the peer has closed the connection 265 * due to timeout or reboot. 266 * Using sequence number tp->snd_una-1 267 * causes the transmitted zero-length segment 268 * to lie outside the receive window; 269 * by the protocol spec, this requires the 270 * correspondent TCP to respond. 271 */ 272 tcpstat.tcps_keepprobe++; 273 t_template = tcp_maketemplate(tp); 274 if (t_template) { 275 tcp_respond(tp, t_template->tt_ipgen, 276 &t_template->tt_t, (struct mbuf *)NULL, 277 tp->rcv_nxt, tp->snd_una - 1, 0); 278 (void) m_free(dtom(t_template)); 279 } 280 callout_reset(tp->tt_keep, tcp_keepintvl, tcp_timer_keep, tp); 281 } else 282 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp); 283 284 #ifdef TCPDEBUG 285 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) 286 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 287 PRU_SLOWTIMO); 288 #endif 289 splx(s); 290 return; 291 292 dropit: 293 tcpstat.tcps_keepdrops++; 294 tp = tcp_drop(tp, ETIMEDOUT); 295 296 #ifdef TCPDEBUG 297 if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 298 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 299 PRU_SLOWTIMO); 300 #endif 301 splx(s); 302 } 303 304 void 305 tcp_timer_persist(xtp) 306 void *xtp; 307 { 308 struct tcpcb *tp = xtp; 309 int s; 310 #ifdef TCPDEBUG 311 int ostate; 312 313 ostate = tp->t_state; 314 #endif 315 s = splnet(); 316 if (callout_pending(tp->tt_persist) || !callout_active(tp->tt_persist)){ 317 splx(s); 318 return; 319 } 320 callout_deactivate(tp->tt_persist); 321 /* 322 * Persistance timer into zero window. 323 * Force a byte to be output, if possible. 324 */ 325 tcpstat.tcps_persisttimeo++; 326 /* 327 * Hack: if the peer is dead/unreachable, we do not 328 * time out if the window is closed. After a full 329 * backoff, drop the connection if the idle time 330 * (no responses to probes) reaches the maximum 331 * backoff that we would use if retransmitting. 332 */ 333 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 334 ((ticks - tp->t_rcvtime) >= tcp_maxpersistidle || 335 (ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 336 tcpstat.tcps_persistdrop++; 337 tp = tcp_drop(tp, ETIMEDOUT); 338 goto out; 339 } 340 tcp_setpersist(tp); 341 tp->t_force = 1; 342 (void) tcp_output(tp); 343 tp->t_force = 0; 344 345 out: 346 #ifdef TCPDEBUG 347 if (tp && tp->t_inpcb->inp_socket->so_options & SO_DEBUG) 348 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 349 PRU_SLOWTIMO); 350 #endif 351 splx(s); 352 } 353 354 void 355 tcp_timer_rexmt(xtp) 356 void *xtp; 357 { 358 struct tcpcb *tp = xtp; 359 int s; 360 int rexmt; 361 #ifdef TCPDEBUG 362 int ostate; 363 364 ostate = tp->t_state; 365 #endif 366 s = splnet(); 367 if (callout_pending(tp->tt_rexmt) || !callout_active(tp->tt_rexmt)) { 368 splx(s); 369 return; 370 } 371 callout_deactivate(tp->tt_rexmt); 372 /* 373 * Retransmission timer went off. Message has not 374 * been acked within retransmit interval. Back off 375 * to a longer retransmit interval and retransmit one segment. 376 */ 377 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { 378 tp->t_rxtshift = TCP_MAXRXTSHIFT; 379 tcpstat.tcps_timeoutdrop++; 380 tp = tcp_drop(tp, tp->t_softerror ? 381 tp->t_softerror : ETIMEDOUT); 382 goto out; 383 } 384 if (tp->t_rxtshift == 1) { 385 /* 386 * first retransmit; record ssthresh and cwnd so they can 387 * be recovered if this turns out to be a "bad" retransmit. 388 * A retransmit is considered "bad" if an ACK for this 389 * segment is received within RTT/2 interval; the assumption 390 * here is that the ACK was already in flight. See 391 * "On Estimating End-to-End Network Path Properties" by 392 * Allman and Paxson for more details. 393 */ 394 tp->snd_cwnd_prev = tp->snd_cwnd; 395 tp->snd_ssthresh_prev = tp->snd_ssthresh; 396 tp->snd_recover_prev = tp->snd_recover; 397 if (IN_FASTRECOVERY(tp)) 398 tp->t_flags |= TF_WASFRECOVERY; 399 else 400 tp->t_flags &= ~TF_WASFRECOVERY; 401 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 402 } 403 tcpstat.tcps_rexmttimeo++; 404 if (tp->t_state == TCPS_SYN_SENT) 405 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift]; 406 else 407 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 408 TCPT_RANGESET(tp->t_rxtcur, rexmt, 409 tp->t_rttmin, TCPTV_REXMTMAX); 410 /* 411 * Disable rfc1323 and rfc1644 if we havn't got any response to 412 * our third SYN to work-around some broken terminal servers 413 * (most of which have hopefully been retired) that have bad VJ 414 * header compression code which trashes TCP segments containing 415 * unknown-to-them TCP options. 416 */ 417 if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3)) 418 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC); 419 /* 420 * If losing, let the lower level know and try for 421 * a better route. Also, if we backed off this far, 422 * our srtt estimate is probably bogus. Clobber it 423 * so we'll take the next rtt measurement as our srtt; 424 * move the current srtt into rttvar to keep the current 425 * retransmit times until then. 426 */ 427 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 428 #ifdef INET6 429 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 430 in6_losing(tp->t_inpcb); 431 else 432 #endif 433 in_losing(tp->t_inpcb); 434 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); 435 tp->t_srtt = 0; 436 } 437 tp->snd_nxt = tp->snd_una; 438 tp->snd_recover = tp->snd_max; 439 /* 440 * Force a segment to be sent. 441 */ 442 tp->t_flags |= TF_ACKNOW; 443 /* 444 * If timing a segment in this window, stop the timer. 445 */ 446 tp->t_rtttime = 0; 447 /* 448 * Close the congestion window down to one segment 449 * (we'll open it by one segment for each ack we get). 450 * Since we probably have a window's worth of unacked 451 * data accumulated, this "slow start" keeps us from 452 * dumping all that data as back-to-back packets (which 453 * might overwhelm an intermediate gateway). 454 * 455 * There are two phases to the opening: Initially we 456 * open by one mss on each ack. This makes the window 457 * size increase exponentially with time. If the 458 * window is larger than the path can handle, this 459 * exponential growth results in dropped packet(s) 460 * almost immediately. To get more time between 461 * drops but still "push" the network to take advantage 462 * of improving conditions, we switch from exponential 463 * to linear window opening at some threshhold size. 464 * For a threshhold, we use half the current window 465 * size, truncated to a multiple of the mss. 466 * 467 * (the minimum cwnd that will give us exponential 468 * growth is 2 mss. We don't allow the threshhold 469 * to go below this.) 470 */ 471 { 472 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; 473 if (win < 2) 474 win = 2; 475 tp->snd_cwnd = tp->t_maxseg; 476 tp->snd_ssthresh = win * tp->t_maxseg; 477 tp->t_dupacks = 0; 478 } 479 EXIT_FASTRECOVERY(tp); 480 (void) tcp_output(tp); 481 482 out: 483 #ifdef TCPDEBUG 484 if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 485 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 486 PRU_SLOWTIMO); 487 #endif 488 splx(s); 489 } 490