xref: /openbsd-src/sys/netinet/tcp_timer.c (revision b725ae7711052a2233e31a66fefb8a752c388d7a)
1 /*	$OpenBSD: tcp_timer.c,v 1.34 2003/12/10 07:22:43 itojun Exp $	*/
2 /*	$NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)tcp_timer.c	8.1 (Berkeley) 6/10/93
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/socket.h>
39 #include <sys/socketvar.h>
40 #include <sys/protosw.h>
41 #include <sys/kernel.h>
42 
43 #include <net/route.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/in_systm.h>
47 #include <netinet/ip.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip_var.h>
50 #include <netinet/tcp.h>
51 #include <netinet/tcp_fsm.h>
52 #include <netinet/tcp_timer.h>
53 #include <netinet/tcp_var.h>
54 #include <netinet/ip_icmp.h>
55 
56 int	tcp_keepidle;
57 int	tcp_keepintvl;
58 int	tcp_maxpersistidle;	/* max idle time in persist */
59 int	tcp_maxidle;
60 
61 /*
62  * Time to delay the ACK.  This is initialized in tcp_init(), unless
63  * its patched.
64  */
65 int	tcp_delack_ticks;
66 
67 void	tcp_timer_rexmt(void *);
68 void	tcp_timer_persist(void *);
69 void	tcp_timer_keep(void *);
70 void	tcp_timer_2msl(void *);
71 
72 const tcp_timer_func_t tcp_timer_funcs[TCPT_NTIMERS] = {
73 	tcp_timer_rexmt,
74 	tcp_timer_persist,
75 	tcp_timer_keep,
76 	tcp_timer_2msl,
77 };
78 
79 /*
80  * Timer state initialization, called from tcp_init().
81  */
82 void
83 tcp_timer_init(void)
84 {
85 
86 	if (tcp_keepidle == 0)
87 		tcp_keepidle = TCPTV_KEEP_IDLE;
88 
89 	if (tcp_keepintvl == 0)
90 		tcp_keepintvl = TCPTV_KEEPINTVL;
91 
92 	if (tcp_maxpersistidle == 0)
93 		tcp_maxpersistidle = TCPTV_KEEP_IDLE;
94 
95 	if (tcp_delack_ticks == 0)
96 		tcp_delack_ticks = TCP_DELACK_TICKS;
97 }
98 
99 /*
100  * Callout to process delayed ACKs for a TCPCB.
101  */
102 void
103 tcp_delack(void *arg)
104 {
105 	struct tcpcb *tp = arg;
106 	int s;
107 
108 	/*
109 	 * If tcp_output() wasn't able to transmit the ACK
110 	 * for whatever reason, it will restart the delayed
111 	 * ACK callout.
112 	 */
113 
114 	s = splsoftnet();
115 	tp->t_flags |= TF_ACKNOW;
116 	(void) tcp_output(tp);
117 	splx(s);
118 }
119 
120 /*
121  * Tcp protocol timeout routine called every 500 ms.
122  * Updates the timers in all active tcb's and
123  * causes finite state machine actions if timers expire.
124  */
125 void
126 tcp_slowtimo()
127 {
128 	int s;
129 
130 	s = splsoftnet();
131 	tcp_maxidle = TCPTV_KEEPCNT * tcp_keepintvl;
132 #ifdef TCP_COMPAT_42
133 	tcp_iss += TCP_ISSINCR/PR_SLOWHZ;		/* increment iss */
134 	if ((int)tcp_iss < 0)
135 		tcp_iss = 0;				/* XXX */
136 #endif /* TCP_COMPAT_42 */
137 	tcp_now++;					/* for timestamps */
138 	splx(s);
139 }
140 
141 /*
142  * Cancel all timers for TCP tp.
143  */
144 void
145 tcp_canceltimers(tp)
146 	struct tcpcb *tp;
147 {
148 	int i;
149 
150 	for (i = 0; i < TCPT_NTIMERS; i++)
151 		TCP_TIMER_DISARM(tp, i);
152 }
153 
154 int	tcp_backoff[TCP_MAXRXTSHIFT + 1] =
155     { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
156 
157 int tcp_totbackoff = 511;	/* sum of tcp_backoff[] */
158 
159 /*
160  * TCP timer processing.
161  */
162 
163 #ifdef TCP_SACK
164 void	tcp_timer_freesack(struct tcpcb *);
165 
166 void
167 tcp_timer_freesack(struct tcpcb *tp)
168 {
169 	struct sackhole *p, *q;
170 	/*
171 	 * Free SACK holes for 2MSL and REXMT timers.
172 	 */
173 	q = tp->snd_holes;
174 	while (q != NULL) {
175 		p = q;
176 		q = q->next;
177 		pool_put(&sackhl_pool, p);
178 	}
179 	tp->snd_holes = 0;
180 #ifdef TCP_FACK
181 	tp->snd_fack = tp->snd_una;
182 	tp->retran_data = 0;
183 	tp->snd_awnd = 0;
184 #endif /* TCP_FACK */
185 }
186 #endif /* TCP_SACK */
187 
188 void
189 tcp_timer_rexmt(void *arg)
190 {
191 	struct tcpcb *tp = arg;
192 	uint32_t rto;
193 	int s;
194 
195 	s = splsoftnet();
196 
197 #ifdef TCP_SACK
198 	tcp_timer_freesack(tp);
199 #endif
200 	if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
201 		tp->t_rxtshift = TCP_MAXRXTSHIFT;
202 		tcpstat.tcps_timeoutdrop++;
203 		(void)tcp_drop(tp, tp->t_softerror ?
204 		    tp->t_softerror : ETIMEDOUT);
205 		goto out;
206 	}
207 	tcpstat.tcps_rexmttimeo++;
208 	rto = TCP_REXMTVAL(tp);
209 	if (rto < tp->t_rttmin)
210 		rto = tp->t_rttmin;
211 	TCPT_RANGESET((long) tp->t_rxtcur,
212 	    rto * tcp_backoff[tp->t_rxtshift],
213 	    tp->t_rttmin, TCPTV_REXMTMAX);
214 	TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
215 
216 	/*
217 	 * If we are losing and we are trying path MTU discovery,
218 	 * try turning it off.  This will avoid black holes in
219 	 * the network which suppress or fail to send "packet
220 	 * too big" ICMP messages.  We should ideally do
221 	 * lots more sophisticated searching to find the right
222 	 * value here...
223 	 */
224 	if (ip_mtudisc && tp->t_inpcb &&
225 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
226 	    tp->t_rxtshift > TCP_MAXRXTSHIFT / 6) {
227 		struct inpcb *inp = tp->t_inpcb;
228 		struct rtentry *rt = NULL;
229 		struct sockaddr_in sin;
230 
231 		/* No data to send means path mtu is not a problem */
232 		if (!inp->inp_socket->so_snd.sb_cc)
233 			goto leave;
234 
235 		rt = in_pcbrtentry(inp);
236 		/* Check if path MTU discovery is disabled already */
237 		if (rt && (rt->rt_flags & RTF_HOST) &&
238 		    (rt->rt_rmx.rmx_locks & RTV_MTU))
239 			goto leave;
240 
241 		rt = NULL;
242 		switch(tp->pf) {
243 #ifdef INET6
244 		case PF_INET6:
245 			/*
246 			 * We can not turn off path MTU for IPv6.
247 			 * Do nothing for now, maybe lower to
248 			 * minimum MTU.
249 			 */
250 			break;
251 #endif
252 		case PF_INET:
253 			bzero(&sin, sizeof(struct sockaddr_in));
254 			sin.sin_family = AF_INET;
255 			sin.sin_len = sizeof(struct sockaddr_in);
256 			sin.sin_addr = inp->inp_faddr;
257 			rt = icmp_mtudisc_clone(sintosa(&sin));
258 			break;
259 		}
260 		if (rt != NULL) {
261 			/* Disable path MTU discovery */
262 			if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) {
263 				rt->rt_rmx.rmx_locks |= RTV_MTU;
264 				in_rtchange(inp, 0);
265 			}
266 
267 			rtfree(rt);
268 		}
269 	leave:
270 		;
271 	}
272 
273 	/*
274 	 * If losing, let the lower level know and try for
275 	 * a better route.  Also, if we backed off this far,
276 	 * our srtt estimate is probably bogus.  Clobber it
277 	 * so we'll take the next rtt measurement as our srtt;
278 	 * move the current srtt into rttvar to keep the current
279 	 * retransmit times until then.
280 	 */
281 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
282 		in_losing(tp->t_inpcb);
283 		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
284 		tp->t_srtt = 0;
285 	}
286 	tp->snd_nxt = tp->snd_una;
287 #if defined(TCP_SACK)
288 	/*
289 	 * Note:  We overload snd_last to function also as the
290 	 * snd_last variable described in RFC 2582
291 	 */
292 	tp->snd_last = tp->snd_max;
293 #endif /* TCP_SACK */
294 	/*
295 	 * If timing a segment in this window, stop the timer.
296 	 */
297 	tp->t_rtttime = 0;
298 #ifdef TCP_ECN
299 	/*
300 	 * if ECN is enabled, there might be a broken firewall which
301 	 * blocks ecn packets.  fall back to non-ecn.
302 	 */
303 	if ((tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED)
304 	    && tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN))
305 		tp->t_flags |= TF_DISABLE_ECN;
306 #endif
307 	/*
308 	 * Close the congestion window down to one segment
309 	 * (we'll open it by one segment for each ack we get).
310 	 * Since we probably have a window's worth of unacked
311 	 * data accumulated, this "slow start" keeps us from
312 	 * dumping all that data as back-to-back packets (which
313 	 * might overwhelm an intermediate gateway).
314 	 *
315 	 * There are two phases to the opening: Initially we
316 	 * open by one mss on each ack.  This makes the window
317 	 * size increase exponentially with time.  If the
318 	 * window is larger than the path can handle, this
319 	 * exponential growth results in dropped packet(s)
320 	 * almost immediately.  To get more time between
321 	 * drops but still "push" the network to take advantage
322 	 * of improving conditions, we switch from exponential
323 	 * to linear window opening at some threshhold size.
324 	 * For a threshhold, we use half the current window
325 	 * size, truncated to a multiple of the mss.
326 	 *
327 	 * (the minimum cwnd that will give us exponential
328 	 * growth is 2 mss.  We don't allow the threshhold
329 	 * to go below this.)
330 	 */
331 	{
332 		u_long win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
333 		if (win < 2)
334 			win = 2;
335 		tp->snd_cwnd = tp->t_maxseg;
336 		tp->snd_ssthresh = win * tp->t_maxseg;
337 		tp->t_dupacks = 0;
338 #ifdef TCP_ECN
339 		tp->snd_last = tp->snd_max;
340 		tp->t_flags |= TF_SEND_CWR;
341 #endif
342 #if 1 /* TCP_ECN */
343 		tcpstat.tcps_cwr_timeout++;
344 #endif
345 	}
346 	(void) tcp_output(tp);
347 
348  out:
349 	splx(s);
350 }
351 
352 void
353 tcp_timer_persist(void *arg)
354 {
355 	struct tcpcb *tp = arg;
356 	uint32_t rto;
357 	int s;
358 
359 	s = splsoftnet();
360 	tcpstat.tcps_persisttimeo++;
361 	/*
362 	 * Hack: if the peer is dead/unreachable, we do not
363 	 * time out if the window is closed.  After a full
364 	 * backoff, drop the connection if the idle time
365 	 * (no responses to probes) reaches the maximum
366 	 * backoff that we would use if retransmitting.
367 	 */
368 	rto = TCP_REXMTVAL(tp);
369 	if (rto < tp->t_rttmin)
370 		rto = tp->t_rttmin;
371 	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
372 	    ((tcp_now - tp->t_rcvtime) >= tcp_maxpersistidle ||
373 	    (tcp_now - tp->t_rcvtime) >= rto * tcp_totbackoff)) {
374 		tcpstat.tcps_persistdrop++;
375 		tp = tcp_drop(tp, ETIMEDOUT);
376 		goto out;
377 	}
378 	tcp_setpersist(tp);
379 	tp->t_force = 1;
380 	(void) tcp_output(tp);
381 	tp->t_force = 0;
382  out:
383 	splx(s);
384 }
385 
386 void
387 tcp_timer_keep(void *arg)
388 {
389 	struct tcpcb *tp = arg;
390 	int s;
391 
392 	s = splsoftnet();
393 
394 	tcpstat.tcps_keeptimeo++;
395 	if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
396 		goto dropit;
397 	if (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE &&
398 	    tp->t_state <= TCPS_CLOSING) {
399 		if ((tcp_maxidle > 0) &&
400 		    ((tcp_now - tp->t_rcvtime) >= tcp_keepidle + tcp_maxidle))
401 			goto dropit;
402 		/*
403 		 * Send a packet designed to force a response
404 		 * if the peer is up and reachable:
405 		 * either an ACK if the connection is still alive,
406 		 * or an RST if the peer has closed the connection
407 		 * due to timeout or reboot.
408 		 * Using sequence number tp->snd_una-1
409 		 * causes the transmitted zero-length segment
410 		 * to lie outside the receive window;
411 		 * by the protocol spec, this requires the
412 		 * correspondent TCP to respond.
413 		 */
414 		tcpstat.tcps_keepprobe++;
415 #ifdef TCP_COMPAT_42
416 		/*
417 		 * The keepalive packet must have nonzero length
418 		 * to get a 4.2 host to respond.
419 		 */
420 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
421 		    (struct mbuf *)NULL, tp->rcv_nxt - 1, tp->snd_una - 1, 0);
422 #else
423 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
424 		    (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0);
425 #endif
426 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepintvl);
427 	} else
428 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
429 
430 	splx(s);
431 	return;
432 
433  dropit:
434 	tcpstat.tcps_keepdrops++;
435 	tp = tcp_drop(tp, ETIMEDOUT);
436 
437 	splx(s);
438 }
439 
440 void
441 tcp_timer_2msl(void *arg)
442 {
443 	struct tcpcb *tp = arg;
444 	int s;
445 
446 	s = splsoftnet();
447 
448 #ifdef TCP_SACK
449 	tcp_timer_freesack(tp);
450 #endif
451 
452 	if (tp->t_state != TCPS_TIME_WAIT &&
453 	    ((tcp_maxidle == 0) || ((tcp_now - tp->t_rcvtime) <= tcp_maxidle)))
454 		TCP_TIMER_ARM(tp, TCPT_2MSL, tcp_keepintvl);
455 	else
456 		tp = tcp_close(tp);
457 
458 	splx(s);
459 }
460