xref: /openbsd-src/sys/netinet/tcp_timer.c (revision 47911bd667ac77dc523b8a13ef40b012dbffa741)
1 /*	$OpenBSD: tcp_timer.c,v 1.31 2002/11/06 01:52:08 kjc Exp $	*/
2 /*	$NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)tcp_timer.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/kernel.h>
46 
47 #include <net/route.h>
48 
49 #include <netinet/in.h>
50 #include <netinet/in_systm.h>
51 #include <netinet/ip.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/tcp_timer.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/ip_icmp.h>
59 
60 int	tcp_keepidle;
61 int	tcp_keepintvl;
62 int	tcp_maxpersistidle;	/* max idle time in persist */
63 int	tcp_maxidle;
64 
65 /*
66  * Time to delay the ACK.  This is initialized in tcp_init(), unless
67  * its patched.
68  */
69 int	tcp_delack_ticks;
70 
71 void	tcp_timer_rexmt(void *);
72 void	tcp_timer_persist(void *);
73 void	tcp_timer_keep(void *);
74 void	tcp_timer_2msl(void *);
75 
76 const tcp_timer_func_t tcp_timer_funcs[TCPT_NTIMERS] = {
77 	tcp_timer_rexmt,
78 	tcp_timer_persist,
79 	tcp_timer_keep,
80 	tcp_timer_2msl,
81 };
82 
83 /*
84  * Timer state initialization, called from tcp_init().
85  */
86 void
87 tcp_timer_init(void)
88 {
89 
90 	if (tcp_keepidle == 0)
91 		tcp_keepidle = TCPTV_KEEP_IDLE;
92 
93 	if (tcp_keepintvl == 0)
94 		tcp_keepintvl = TCPTV_KEEPINTVL;
95 
96 	if (tcp_maxpersistidle == 0)
97 		tcp_maxpersistidle = TCPTV_KEEP_IDLE;
98 
99 	if (tcp_delack_ticks == 0)
100 		tcp_delack_ticks = TCP_DELACK_TICKS;
101 }
102 
103 /*
104  * Callout to process delayed ACKs for a TCPCB.
105  */
106 void
107 tcp_delack(void *arg)
108 {
109 	struct tcpcb *tp = arg;
110 	int s;
111 
112 	/*
113 	 * If tcp_output() wasn't able to transmit the ACK
114 	 * for whatever reason, it will restart the delayed
115 	 * ACK callout.
116 	 */
117 
118 	s = splsoftnet();
119 	tp->t_flags |= TF_ACKNOW;
120 	(void) tcp_output(tp);
121 	splx(s);
122 }
123 
124 /*
125  * Tcp protocol timeout routine called every 500 ms.
126  * Updates the timers in all active tcb's and
127  * causes finite state machine actions if timers expire.
128  */
129 void
130 tcp_slowtimo()
131 {
132 	int s;
133 
134 	s = splsoftnet();
135 	tcp_maxidle = TCPTV_KEEPCNT * tcp_keepintvl;
136 #ifdef TCP_COMPAT_42
137 	tcp_iss += TCP_ISSINCR/PR_SLOWHZ;		/* increment iss */
138 	if ((int)tcp_iss < 0)
139 		tcp_iss = 0;				/* XXX */
140 #endif /* TCP_COMPAT_42 */
141 	tcp_now++;					/* for timestamps */
142 	splx(s);
143 }
144 
145 /*
146  * Cancel all timers for TCP tp.
147  */
148 void
149 tcp_canceltimers(tp)
150 	struct tcpcb *tp;
151 {
152 	register int i;
153 
154 	for (i = 0; i < TCPT_NTIMERS; i++)
155 		TCP_TIMER_DISARM(tp, i);
156 }
157 
158 int	tcp_backoff[TCP_MAXRXTSHIFT + 1] =
159     { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
160 
161 int tcp_totbackoff = 511;	/* sum of tcp_backoff[] */
162 
163 /*
164  * TCP timer processing.
165  */
166 
167 #ifdef TCP_SACK
168 void	tcp_timer_freesack(struct tcpcb *);
169 
170 void
171 tcp_timer_freesack(struct tcpcb *tp)
172 {
173 	struct sackhole *p, *q;
174 	/*
175 	 * Free SACK holes for 2MSL and REXMT timers.
176 	 */
177 	q = tp->snd_holes;
178 	while (q != NULL) {
179 		p = q;
180 		q = q->next;
181 		pool_put(&sackhl_pool, p);
182 	}
183 	tp->snd_holes = 0;
184 #ifdef TCP_FACK
185 	tp->snd_fack = tp->snd_una;
186 	tp->retran_data = 0;
187 	tp->snd_awnd = 0;
188 #endif /* TCP_FACK */
189 }
190 #endif /* TCP_SACK */
191 
192 void
193 tcp_timer_rexmt(void *arg)
194 {
195 	struct tcpcb *tp = arg;
196 	uint32_t rto;
197 	int s;
198 
199 	s = splsoftnet();
200 
201 #ifdef TCP_SACK
202 	tcp_timer_freesack(tp);
203 #endif
204 	if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
205 		tp->t_rxtshift = TCP_MAXRXTSHIFT;
206 		tcpstat.tcps_timeoutdrop++;
207 		tp = tcp_drop(tp, tp->t_softerror ?
208 		    tp->t_softerror : ETIMEDOUT);
209 		goto out;
210 	}
211 	tcpstat.tcps_rexmttimeo++;
212 	rto = TCP_REXMTVAL(tp);
213 	if (rto < tp->t_rttmin)
214 		rto = tp->t_rttmin;
215 	TCPT_RANGESET((long) tp->t_rxtcur,
216 	    rto * tcp_backoff[tp->t_rxtshift],
217 	    tp->t_rttmin, TCPTV_REXMTMAX);
218 	TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
219 
220 	/*
221 	 * If we are losing and we are trying path MTU discovery,
222 	 * try turning it off.  This will avoid black holes in
223 	 * the network which suppress or fail to send "packet
224 	 * too big" ICMP messages.  We should ideally do
225 	 * lots more sophisticated searching to find the right
226 	 * value here...
227 	 */
228 	if (ip_mtudisc && tp->t_inpcb &&
229 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
230 	    tp->t_rxtshift > TCP_MAXRXTSHIFT / 6) {
231 		struct inpcb *inp = tp->t_inpcb;
232 		struct rtentry *rt = NULL;
233 		struct sockaddr_in sin;
234 
235 		/* No data to send means path mtu is not a problem */
236 		if (!inp->inp_socket->so_snd.sb_cc)
237 			goto leave;
238 
239 		rt = in_pcbrtentry(inp);
240 		/* Check if path MTU discovery is disabled already */
241 		if (rt && (rt->rt_flags & RTF_HOST) &&
242 		    (rt->rt_rmx.rmx_locks & RTV_MTU))
243 			goto leave;
244 
245 		rt = NULL;
246 		switch(tp->pf) {
247 #ifdef INET6
248 		case PF_INET6:
249 			/*
250 			 * We can not turn off path MTU for IPv6.
251 			 * Do nothing for now, maybe lower to
252 			 * minimum MTU.
253 			 */
254 			break;
255 #endif
256 		case PF_INET:
257 			bzero(&sin, sizeof(struct sockaddr_in));
258 			sin.sin_family = AF_INET;
259 			sin.sin_len = sizeof(struct sockaddr_in);
260 			sin.sin_addr = inp->inp_faddr;
261 			rt = icmp_mtudisc_clone(sintosa(&sin));
262 			break;
263 		}
264 		if (rt != NULL) {
265 			/* Disable path MTU discovery */
266 			if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) {
267 				rt->rt_rmx.rmx_locks |= RTV_MTU;
268 				in_rtchange(inp, 0);
269 			}
270 
271 			rtfree(rt);
272 		}
273 	leave:
274 		;
275 	}
276 
277 	/*
278 	 * If losing, let the lower level know and try for
279 	 * a better route.  Also, if we backed off this far,
280 	 * our srtt estimate is probably bogus.  Clobber it
281 	 * so we'll take the next rtt measurement as our srtt;
282 	 * move the current srtt into rttvar to keep the current
283 	 * retransmit times until then.
284 	 */
285 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
286 		in_losing(tp->t_inpcb);
287 		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
288 		tp->t_srtt = 0;
289 	}
290 	tp->snd_nxt = tp->snd_una;
291 #if defined(TCP_SACK)
292 	/*
293 	 * Note:  We overload snd_last to function also as the
294 	 * snd_last variable described in RFC 2582
295 	 */
296 	tp->snd_last = tp->snd_max;
297 #endif /* TCP_SACK */
298 	/*
299 	 * If timing a segment in this window, stop the timer.
300 	 */
301 	tp->t_rtttime = 0;
302 #ifdef TCP_ECN
303 	/*
304 	 * if ECN is enabled, there might be a broken firewall which
305 	 * blocks ecn packets.  fall back to non-ecn.
306 	 */
307 	if ((tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED)
308 	    && tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN))
309 		tp->t_flags |= TF_DISABLE_ECN;
310 #endif
311 	/*
312 	 * Close the congestion window down to one segment
313 	 * (we'll open it by one segment for each ack we get).
314 	 * Since we probably have a window's worth of unacked
315 	 * data accumulated, this "slow start" keeps us from
316 	 * dumping all that data as back-to-back packets (which
317 	 * might overwhelm an intermediate gateway).
318 	 *
319 	 * There are two phases to the opening: Initially we
320 	 * open by one mss on each ack.  This makes the window
321 	 * size increase exponentially with time.  If the
322 	 * window is larger than the path can handle, this
323 	 * exponential growth results in dropped packet(s)
324 	 * almost immediately.  To get more time between
325 	 * drops but still "push" the network to take advantage
326 	 * of improving conditions, we switch from exponential
327 	 * to linear window opening at some threshhold size.
328 	 * For a threshhold, we use half the current window
329 	 * size, truncated to a multiple of the mss.
330 	 *
331 	 * (the minimum cwnd that will give us exponential
332 	 * growth is 2 mss.  We don't allow the threshhold
333 	 * to go below this.)
334 	 */
335 	{
336 		u_long win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
337 		if (win < 2)
338 			win = 2;
339 		tp->snd_cwnd = tp->t_maxseg;
340 		tp->snd_ssthresh = win * tp->t_maxseg;
341 		tp->t_dupacks = 0;
342 #ifdef TCP_ECN
343 		tp->snd_last = tp->snd_max;
344 		tp->t_flags |= TF_SEND_CWR;
345 #endif
346 #if 1 /* TCP_ECN */
347 		tcpstat.tcps_cwr_timeout++;
348 #endif
349 	}
350 	(void) tcp_output(tp);
351 
352  out:
353 	splx(s);
354 }
355 
356 void
357 tcp_timer_persist(void *arg)
358 {
359 	struct tcpcb *tp = arg;
360 	uint32_t rto;
361 	int s;
362 
363 	s = splsoftnet();
364 	tcpstat.tcps_persisttimeo++;
365 	/*
366 	 * Hack: if the peer is dead/unreachable, we do not
367 	 * time out if the window is closed.  After a full
368 	 * backoff, drop the connection if the idle time
369 	 * (no responses to probes) reaches the maximum
370 	 * backoff that we would use if retransmitting.
371 	 */
372 	rto = TCP_REXMTVAL(tp);
373 	if (rto < tp->t_rttmin)
374 		rto = tp->t_rttmin;
375 	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
376 	    ((tcp_now - tp->t_rcvtime) >= tcp_maxpersistidle ||
377 	    (tcp_now - tp->t_rcvtime) >= rto * tcp_totbackoff)) {
378 		tcpstat.tcps_persistdrop++;
379 		tp = tcp_drop(tp, ETIMEDOUT);
380 		goto out;
381 	}
382 	tcp_setpersist(tp);
383 	tp->t_force = 1;
384 	(void) tcp_output(tp);
385 	tp->t_force = 0;
386  out:
387 	splx(s);
388 }
389 
390 void
391 tcp_timer_keep(void *arg)
392 {
393 	struct tcpcb *tp = arg;
394 	int s;
395 
396 	s = splsoftnet();
397 
398 	tcpstat.tcps_keeptimeo++;
399 	if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
400 		goto dropit;
401 	if (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE &&
402 	    tp->t_state <= TCPS_CLOSING) {
403 		if ((tcp_maxidle > 0) &&
404 		    ((tcp_now - tp->t_rcvtime) >= tcp_keepidle + tcp_maxidle))
405 			goto dropit;
406 		/*
407 		 * Send a packet designed to force a response
408 		 * if the peer is up and reachable:
409 		 * either an ACK if the connection is still alive,
410 		 * or an RST if the peer has closed the connection
411 		 * due to timeout or reboot.
412 		 * Using sequence number tp->snd_una-1
413 		 * causes the transmitted zero-length segment
414 		 * to lie outside the receive window;
415 		 * by the protocol spec, this requires the
416 		 * correspondent TCP to respond.
417 		 */
418 		tcpstat.tcps_keepprobe++;
419 #ifdef TCP_COMPAT_42
420 		/*
421 		 * The keepalive packet must have nonzero length
422 		 * to get a 4.2 host to respond.
423 		 */
424 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
425 		    (struct mbuf *)NULL, tp->rcv_nxt - 1, tp->snd_una - 1, 0);
426 #else
427 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
428 		    (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0);
429 #endif
430 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepintvl);
431 	} else
432 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
433 
434 	splx(s);
435 	return;
436 
437  dropit:
438 	tcpstat.tcps_keepdrops++;
439 	tp = tcp_drop(tp, ETIMEDOUT);
440 
441 	splx(s);
442 }
443 
444 void
445 tcp_timer_2msl(void *arg)
446 {
447 	struct tcpcb *tp = arg;
448 	int s;
449 
450 	s = splsoftnet();
451 
452 #ifdef TCP_SACK
453 	tcp_timer_freesack(tp);
454 #endif
455 
456 	if (tp->t_state != TCPS_TIME_WAIT &&
457 	    ((tcp_maxidle == 0) || ((tcp_now - tp->t_rcvtime) <= tcp_maxidle)))
458 		TCP_TIMER_ARM(tp, TCPT_2MSL, tcp_keepintvl);
459 	else
460 		tp = tcp_close(tp);
461 
462 	splx(s);
463 }
464