xref: /openbsd-src/sys/netinet/tcp_timer.c (revision 11efff7f3ac2b3cfeff0c0cddc14294d9b3aca4f)
1 /*	$OpenBSD: tcp_timer.c,v 1.36 2004/12/13 12:01:49 espie Exp $	*/
2 /*	$NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)tcp_timer.c	8.1 (Berkeley) 6/10/93
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/socket.h>
39 #include <sys/socketvar.h>
40 #include <sys/protosw.h>
41 #include <sys/kernel.h>
42 
43 #include <net/route.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/in_systm.h>
47 #include <netinet/ip.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip_var.h>
50 #include <netinet/tcp.h>
51 #include <netinet/tcp_fsm.h>
52 #include <netinet/tcp_timer.h>
53 #include <netinet/tcp_var.h>
54 #include <netinet/ip_icmp.h>
55 
56 int	tcp_keepidle;
57 int	tcp_keepintvl;
58 int	tcp_maxpersistidle;	/* max idle time in persist */
59 int	tcp_maxidle;
60 
61 /*
62  * Time to delay the ACK.  This is initialized in tcp_init(), unless
63  * its patched.
64  */
65 int	tcp_delack_ticks;
66 
67 void	tcp_timer_rexmt(void *);
68 void	tcp_timer_persist(void *);
69 void	tcp_timer_keep(void *);
70 void	tcp_timer_2msl(void *);
71 
72 const tcp_timer_func_t tcp_timer_funcs[TCPT_NTIMERS] = {
73 	tcp_timer_rexmt,
74 	tcp_timer_persist,
75 	tcp_timer_keep,
76 	tcp_timer_2msl,
77 };
78 
79 /*
80  * Timer state initialization, called from tcp_init().
81  */
82 void
83 tcp_timer_init(void)
84 {
85 
86 	if (tcp_keepidle == 0)
87 		tcp_keepidle = TCPTV_KEEP_IDLE;
88 
89 	if (tcp_keepintvl == 0)
90 		tcp_keepintvl = TCPTV_KEEPINTVL;
91 
92 	if (tcp_maxpersistidle == 0)
93 		tcp_maxpersistidle = TCPTV_KEEP_IDLE;
94 
95 	if (tcp_delack_ticks == 0)
96 		tcp_delack_ticks = TCP_DELACK_TICKS;
97 }
98 
99 /*
100  * Callout to process delayed ACKs for a TCPCB.
101  */
102 void
103 tcp_delack(void *arg)
104 {
105 	struct tcpcb *tp = arg;
106 	int s;
107 
108 	/*
109 	 * If tcp_output() wasn't able to transmit the ACK
110 	 * for whatever reason, it will restart the delayed
111 	 * ACK callout.
112 	 */
113 
114 	s = splsoftnet();
115 	if (tp->t_flags & TF_DEAD) {
116 		splx(s);
117 		return;
118 	}
119 	tp->t_flags |= TF_ACKNOW;
120 	(void) tcp_output(tp);
121 	splx(s);
122 }
123 
124 /*
125  * Tcp protocol timeout routine called every 500 ms.
126  * Updates the timers in all active tcb's and
127  * causes finite state machine actions if timers expire.
128  */
129 void
130 tcp_slowtimo()
131 {
132 	int s;
133 
134 	s = splsoftnet();
135 	tcp_maxidle = TCPTV_KEEPCNT * tcp_keepintvl;
136 #ifdef TCP_COMPAT_42
137 	tcp_iss += TCP_ISSINCR/PR_SLOWHZ;		/* increment iss */
138 	if ((int)tcp_iss < 0)
139 		tcp_iss = 0;				/* XXX */
140 #endif /* TCP_COMPAT_42 */
141 	tcp_now++;					/* for timestamps */
142 	splx(s);
143 }
144 
145 /*
146  * Cancel all timers for TCP tp.
147  */
148 void
149 tcp_canceltimers(tp)
150 	struct tcpcb *tp;
151 {
152 	int i;
153 
154 	for (i = 0; i < TCPT_NTIMERS; i++)
155 		TCP_TIMER_DISARM(tp, i);
156 }
157 
158 int	tcp_backoff[TCP_MAXRXTSHIFT + 1] =
159     { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
160 
161 int tcp_totbackoff = 511;	/* sum of tcp_backoff[] */
162 
163 /*
164  * TCP timer processing.
165  */
166 
167 #ifdef TCP_SACK
168 void	tcp_timer_freesack(struct tcpcb *);
169 
170 void
171 tcp_timer_freesack(struct tcpcb *tp)
172 {
173 	struct sackhole *p, *q;
174 	/*
175 	 * Free SACK holes for 2MSL and REXMT timers.
176 	 */
177 	q = tp->snd_holes;
178 	while (q != NULL) {
179 		p = q;
180 		q = q->next;
181 		pool_put(&sackhl_pool, p);
182 	}
183 	tp->snd_holes = 0;
184 #ifdef TCP_FACK
185 	tp->snd_fack = tp->snd_una;
186 	tp->retran_data = 0;
187 	tp->snd_awnd = 0;
188 #endif /* TCP_FACK */
189 }
190 #endif /* TCP_SACK */
191 
192 void
193 tcp_timer_rexmt(void *arg)
194 {
195 	struct tcpcb *tp = arg;
196 	uint32_t rto;
197 	int s;
198 
199 	s = splsoftnet();
200 	if (tp->t_flags & TF_DEAD) {
201 		splx(s);
202 		return;
203 	}
204 
205 #ifdef TCP_SACK
206 	tcp_timer_freesack(tp);
207 #endif
208 	if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
209 		tp->t_rxtshift = TCP_MAXRXTSHIFT;
210 		tcpstat.tcps_timeoutdrop++;
211 		(void)tcp_drop(tp, tp->t_softerror ?
212 		    tp->t_softerror : ETIMEDOUT);
213 		goto out;
214 	}
215 	tcpstat.tcps_rexmttimeo++;
216 	rto = TCP_REXMTVAL(tp);
217 	if (rto < tp->t_rttmin)
218 		rto = tp->t_rttmin;
219 	TCPT_RANGESET(tp->t_rxtcur,
220 	    rto * tcp_backoff[tp->t_rxtshift],
221 	    tp->t_rttmin, TCPTV_REXMTMAX);
222 	TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
223 
224 	/*
225 	 * If we are losing and we are trying path MTU discovery,
226 	 * try turning it off.  This will avoid black holes in
227 	 * the network which suppress or fail to send "packet
228 	 * too big" ICMP messages.  We should ideally do
229 	 * lots more sophisticated searching to find the right
230 	 * value here...
231 	 */
232 	if (ip_mtudisc && tp->t_inpcb &&
233 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
234 	    tp->t_rxtshift > TCP_MAXRXTSHIFT / 6) {
235 		struct inpcb *inp = tp->t_inpcb;
236 		struct rtentry *rt = NULL;
237 		struct sockaddr_in sin;
238 
239 		/* No data to send means path mtu is not a problem */
240 		if (!inp->inp_socket->so_snd.sb_cc)
241 			goto leave;
242 
243 		rt = in_pcbrtentry(inp);
244 		/* Check if path MTU discovery is disabled already */
245 		if (rt && (rt->rt_flags & RTF_HOST) &&
246 		    (rt->rt_rmx.rmx_locks & RTV_MTU))
247 			goto leave;
248 
249 		rt = NULL;
250 		switch(tp->pf) {
251 #ifdef INET6
252 		case PF_INET6:
253 			/*
254 			 * We can not turn off path MTU for IPv6.
255 			 * Do nothing for now, maybe lower to
256 			 * minimum MTU.
257 			 */
258 			break;
259 #endif
260 		case PF_INET:
261 			bzero(&sin, sizeof(struct sockaddr_in));
262 			sin.sin_family = AF_INET;
263 			sin.sin_len = sizeof(struct sockaddr_in);
264 			sin.sin_addr = inp->inp_faddr;
265 			rt = icmp_mtudisc_clone(sintosa(&sin));
266 			break;
267 		}
268 		if (rt != NULL) {
269 			/* Disable path MTU discovery */
270 			if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) {
271 				rt->rt_rmx.rmx_locks |= RTV_MTU;
272 				in_rtchange(inp, 0);
273 			}
274 
275 			rtfree(rt);
276 		}
277 	leave:
278 		;
279 	}
280 
281 	/*
282 	 * If losing, let the lower level know and try for
283 	 * a better route.  Also, if we backed off this far,
284 	 * our srtt estimate is probably bogus.  Clobber it
285 	 * so we'll take the next rtt measurement as our srtt;
286 	 * move the current srtt into rttvar to keep the current
287 	 * retransmit times until then.
288 	 */
289 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
290 		in_losing(tp->t_inpcb);
291 		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
292 		tp->t_srtt = 0;
293 	}
294 	tp->snd_nxt = tp->snd_una;
295 #if defined(TCP_SACK)
296 	/*
297 	 * Note:  We overload snd_last to function also as the
298 	 * snd_last variable described in RFC 2582
299 	 */
300 	tp->snd_last = tp->snd_max;
301 #endif /* TCP_SACK */
302 	/*
303 	 * If timing a segment in this window, stop the timer.
304 	 */
305 	tp->t_rtttime = 0;
306 #ifdef TCP_ECN
307 	/*
308 	 * if ECN is enabled, there might be a broken firewall which
309 	 * blocks ecn packets.  fall back to non-ecn.
310 	 */
311 	if ((tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED)
312 	    && tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN))
313 		tp->t_flags |= TF_DISABLE_ECN;
314 #endif
315 	/*
316 	 * Close the congestion window down to one segment
317 	 * (we'll open it by one segment for each ack we get).
318 	 * Since we probably have a window's worth of unacked
319 	 * data accumulated, this "slow start" keeps us from
320 	 * dumping all that data as back-to-back packets (which
321 	 * might overwhelm an intermediate gateway).
322 	 *
323 	 * There are two phases to the opening: Initially we
324 	 * open by one mss on each ack.  This makes the window
325 	 * size increase exponentially with time.  If the
326 	 * window is larger than the path can handle, this
327 	 * exponential growth results in dropped packet(s)
328 	 * almost immediately.  To get more time between
329 	 * drops but still "push" the network to take advantage
330 	 * of improving conditions, we switch from exponential
331 	 * to linear window opening at some threshhold size.
332 	 * For a threshhold, we use half the current window
333 	 * size, truncated to a multiple of the mss.
334 	 *
335 	 * (the minimum cwnd that will give us exponential
336 	 * growth is 2 mss.  We don't allow the threshhold
337 	 * to go below this.)
338 	 */
339 	{
340 		u_long win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
341 		if (win < 2)
342 			win = 2;
343 		tp->snd_cwnd = tp->t_maxseg;
344 		tp->snd_ssthresh = win * tp->t_maxseg;
345 		tp->t_dupacks = 0;
346 #ifdef TCP_ECN
347 		tp->snd_last = tp->snd_max;
348 		tp->t_flags |= TF_SEND_CWR;
349 #endif
350 #if 1 /* TCP_ECN */
351 		tcpstat.tcps_cwr_timeout++;
352 #endif
353 	}
354 	(void) tcp_output(tp);
355 
356  out:
357 	splx(s);
358 }
359 
360 void
361 tcp_timer_persist(void *arg)
362 {
363 	struct tcpcb *tp = arg;
364 	uint32_t rto;
365 	int s;
366 
367 	s = splsoftnet();
368 	if ((tp->t_flags & TF_DEAD) ||
369             TCP_TIMER_ISARMED(tp, TCPT_REXMT)) {
370 		splx(s);
371 		return;
372 	}
373 	tcpstat.tcps_persisttimeo++;
374 	/*
375 	 * Hack: if the peer is dead/unreachable, we do not
376 	 * time out if the window is closed.  After a full
377 	 * backoff, drop the connection if the idle time
378 	 * (no responses to probes) reaches the maximum
379 	 * backoff that we would use if retransmitting.
380 	 */
381 	rto = TCP_REXMTVAL(tp);
382 	if (rto < tp->t_rttmin)
383 		rto = tp->t_rttmin;
384 	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
385 	    ((tcp_now - tp->t_rcvtime) >= tcp_maxpersistidle ||
386 	    (tcp_now - tp->t_rcvtime) >= rto * tcp_totbackoff)) {
387 		tcpstat.tcps_persistdrop++;
388 		tp = tcp_drop(tp, ETIMEDOUT);
389 		goto out;
390 	}
391 	tcp_setpersist(tp);
392 	tp->t_force = 1;
393 	(void) tcp_output(tp);
394 	tp->t_force = 0;
395  out:
396 	splx(s);
397 }
398 
399 void
400 tcp_timer_keep(void *arg)
401 {
402 	struct tcpcb *tp = arg;
403 	int s;
404 
405 	s = splsoftnet();
406 	if (tp->t_flags & TF_DEAD) {
407 		splx(s);
408 		return;
409 	}
410 
411 	tcpstat.tcps_keeptimeo++;
412 	if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
413 		goto dropit;
414 	if (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE &&
415 	    tp->t_state <= TCPS_CLOSING) {
416 		if ((tcp_maxidle > 0) &&
417 		    ((tcp_now - tp->t_rcvtime) >= tcp_keepidle + tcp_maxidle))
418 			goto dropit;
419 		/*
420 		 * Send a packet designed to force a response
421 		 * if the peer is up and reachable:
422 		 * either an ACK if the connection is still alive,
423 		 * or an RST if the peer has closed the connection
424 		 * due to timeout or reboot.
425 		 * Using sequence number tp->snd_una-1
426 		 * causes the transmitted zero-length segment
427 		 * to lie outside the receive window;
428 		 * by the protocol spec, this requires the
429 		 * correspondent TCP to respond.
430 		 */
431 		tcpstat.tcps_keepprobe++;
432 #ifdef TCP_COMPAT_42
433 		/*
434 		 * The keepalive packet must have nonzero length
435 		 * to get a 4.2 host to respond.
436 		 */
437 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
438 		    (struct mbuf *)NULL, tp->rcv_nxt - 1, tp->snd_una - 1, 0);
439 #else
440 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
441 		    (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0);
442 #endif
443 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepintvl);
444 	} else
445 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
446 
447 	splx(s);
448 	return;
449 
450  dropit:
451 	tcpstat.tcps_keepdrops++;
452 	tp = tcp_drop(tp, ETIMEDOUT);
453 
454 	splx(s);
455 }
456 
457 void
458 tcp_timer_2msl(void *arg)
459 {
460 	struct tcpcb *tp = arg;
461 	int s;
462 
463 	s = splsoftnet();
464 	if (tp->t_flags & TF_DEAD) {
465 		splx(s);
466 		return;
467 	}
468 
469 #ifdef TCP_SACK
470 	tcp_timer_freesack(tp);
471 #endif
472 
473 	if (tp->t_state != TCPS_TIME_WAIT &&
474 	    ((tcp_maxidle == 0) || ((tcp_now - tp->t_rcvtime) <= tcp_maxidle)))
475 		TCP_TIMER_ARM(tp, TCPT_2MSL, tcp_keepintvl);
476 	else
477 		tp = tcp_close(tp);
478 
479 	splx(s);
480 }
481