xref: /openbsd-src/sys/netinet/tcp_subr.c (revision a4afd6dad3fba28f80e70208181c06c482259988)
1 /*	$OpenBSD: tcp_subr.c,v 1.7 1996/07/29 22:01:50 niklas Exp $	*/
2 /*	$NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)tcp_subr.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/protosw.h>
47 #include <sys/errno.h>
48 
49 #include <net/route.h>
50 #include <net/if.h>
51 
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55 #include <netinet/in_pcb.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/ip_icmp.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_fsm.h>
60 #include <netinet/tcp_seq.h>
61 #include <netinet/tcp_timer.h>
62 #include <netinet/tcp_var.h>
63 #include <netinet/tcpip.h>
64 
65 /* patchable/settable parameters for tcp */
66 int 	tcp_mssdflt = TCP_MSS;
67 int 	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
68 
69 /*
70  * Configure kernel with options "TCP_DO_RFC1323=0" to disable RFC1323 stuff.
71  * This is a good idea over slow SLIP/PPP links, because the timestamp
72  * pretty well destroys the VJ compression (any packet with a timestamp
73  * different from the previous one can't be compressed), as well as adding
74  * more overhead.
75  * XXX And it should be a settable per route characteristic (with this just
76  * used as the default).
77  */
78 #ifndef TCP_DO_RFC1323
79 #define TCP_DO_RFC1323 1
80 #endif
81 int    tcp_do_rfc1323 = TCP_DO_RFC1323;
82 
83 #ifndef TCBHASHSIZE
84 #define	TCBHASHSIZE	128
85 #endif
86 int	tcbhashsize = TCBHASHSIZE;
87 
88 /*
89  * Tcp initialization
90  */
91 void
92 tcp_init()
93 {
94 #ifdef TCP_COMPAT_42
95 	tcp_iss = 1;		/* wrong */
96 #else /* TCP_COMPAT_42 */
97 	tcp_iss = random() + 1;
98 #endif /* !TCP_COMPAT_42 */
99 	in_pcbinit(&tcbtable, tcbhashsize);
100 	if (max_protohdr < sizeof(struct tcpiphdr))
101 		max_protohdr = sizeof(struct tcpiphdr);
102 	if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN)
103 		panic("tcp_init");
104 }
105 
106 /*
107  * Create template to be used to send tcp packets on a connection.
108  * Call after host entry created, allocates an mbuf and fills
109  * in a skeletal tcp/ip header, minimizing the amount of work
110  * necessary when the connection is used.
111  */
112 struct tcpiphdr *
113 tcp_template(tp)
114 	struct tcpcb *tp;
115 {
116 	register struct inpcb *inp = tp->t_inpcb;
117 	register struct mbuf *m;
118 	register struct tcpiphdr *n;
119 
120 	if ((n = tp->t_template) == 0) {
121 		m = m_get(M_DONTWAIT, MT_HEADER);
122 		if (m == NULL)
123 			return (0);
124 		m->m_len = sizeof (struct tcpiphdr);
125 		n = mtod(m, struct tcpiphdr *);
126 	}
127 	bzero(n->ti_x1, sizeof n->ti_x1);
128 	n->ti_pr = IPPROTO_TCP;
129 	n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip));
130 	n->ti_src = inp->inp_laddr;
131 	n->ti_dst = inp->inp_faddr;
132 	n->ti_sport = inp->inp_lport;
133 	n->ti_dport = inp->inp_fport;
134 	n->ti_seq = 0;
135 	n->ti_ack = 0;
136 	n->ti_x2 = 0;
137 	n->ti_off = 5;
138 	n->ti_flags = 0;
139 	n->ti_win = 0;
140 	n->ti_sum = 0;
141 	n->ti_urp = 0;
142 	return (n);
143 }
144 
145 /*
146  * Send a single message to the TCP at address specified by
147  * the given TCP/IP header.  If m == 0, then we make a copy
148  * of the tcpiphdr at ti and send directly to the addressed host.
149  * This is used to force keep alive messages out using the TCP
150  * template for a connection tp->t_template.  If flags are given
151  * then we send a message back to the TCP which originated the
152  * segment ti, and discard the mbuf containing it and any other
153  * attached mbufs.
154  *
155  * In any case the ack and sequence number of the transmitted
156  * segment are as specified by the parameters.
157  */
158 void
159 tcp_respond(tp, ti, m, ack, seq, flags)
160 	struct tcpcb *tp;
161 	register struct tcpiphdr *ti;
162 	register struct mbuf *m;
163 	tcp_seq ack, seq;
164 	int flags;
165 {
166 	register int tlen;
167 	int win = 0;
168 	struct route *ro = 0;
169 
170 	if (tp) {
171 		win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
172 		ro = &tp->t_inpcb->inp_route;
173 	}
174 	if (m == 0) {
175 		m = m_gethdr(M_DONTWAIT, MT_HEADER);
176 		if (m == NULL)
177 			return;
178 #ifdef TCP_COMPAT_42
179 		tlen = 1;
180 #else
181 		tlen = 0;
182 #endif
183 		m->m_data += max_linkhdr;
184 		*mtod(m, struct tcpiphdr *) = *ti;
185 		ti = mtod(m, struct tcpiphdr *);
186 		flags = TH_ACK;
187 	} else {
188 		m_freem(m->m_next);
189 		m->m_next = 0;
190 		m->m_data = (caddr_t)ti;
191 		m->m_len = sizeof (struct tcpiphdr);
192 		tlen = 0;
193 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
194 		xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_int32_t);
195 		xchg(ti->ti_dport, ti->ti_sport, u_int16_t);
196 #undef xchg
197 	}
198 	ti->ti_len = htons((u_int16_t)(sizeof (struct tcphdr) + tlen));
199 	tlen += sizeof (struct tcpiphdr);
200 	m->m_len = tlen;
201 	m->m_pkthdr.len = tlen;
202 	m->m_pkthdr.rcvif = (struct ifnet *) 0;
203 	bzero(ti->ti_x1, sizeof ti->ti_x1);
204 	ti->ti_seq = htonl(seq);
205 	ti->ti_ack = htonl(ack);
206 	ti->ti_x2 = 0;
207 	ti->ti_off = sizeof (struct tcphdr) >> 2;
208 	ti->ti_flags = flags;
209 	if (tp)
210 		ti->ti_win = htons((u_int16_t) (win >> tp->rcv_scale));
211 	else
212 		ti->ti_win = htons((u_int16_t)win);
213 	ti->ti_urp = 0;
214 	ti->ti_sum = 0;
215 	ti->ti_sum = in_cksum(m, tlen);
216 	((struct ip *)ti)->ip_len = tlen;
217 	((struct ip *)ti)->ip_ttl = ip_defttl;
218 	(void) ip_output(m, NULL, ro, 0, NULL);
219 }
220 
221 /*
222  * Create a new TCP control block, making an
223  * empty reassembly queue and hooking it to the argument
224  * protocol control block.
225  */
226 struct tcpcb *
227 tcp_newtcpcb(inp)
228 	struct inpcb *inp;
229 {
230 	register struct tcpcb *tp;
231 
232 	tp = malloc(sizeof(*tp), M_PCB, M_NOWAIT);
233 	if (tp == NULL)
234 		return ((struct tcpcb *)0);
235 	bzero((char *) tp, sizeof(struct tcpcb));
236 	LIST_INIT(&tp->segq);
237 	tp->t_maxseg = tcp_mssdflt;
238 
239 	tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0;
240 	tp->t_inpcb = inp;
241 	/*
242 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
243 	 * rtt estimate.  Set rttvar so that srtt + 2 * rttvar gives
244 	 * reasonable initial retransmit time.
245 	 */
246 	tp->t_srtt = TCPTV_SRTTBASE;
247 	tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
248 	tp->t_rttmin = TCPTV_MIN;
249 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
250 	    TCPTV_MIN, TCPTV_REXMTMAX);
251 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
252 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
253 	inp->inp_ip.ip_ttl = ip_defttl;
254 	inp->inp_ppcb = (caddr_t)tp;
255 	return (tp);
256 }
257 
258 /*
259  * Drop a TCP connection, reporting
260  * the specified error.  If connection is synchronized,
261  * then send a RST to peer.
262  */
263 struct tcpcb *
264 tcp_drop(tp, errno)
265 	register struct tcpcb *tp;
266 	int errno;
267 {
268 	struct socket *so = tp->t_inpcb->inp_socket;
269 
270 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
271 		tp->t_state = TCPS_CLOSED;
272 		(void) tcp_output(tp);
273 		tcpstat.tcps_drops++;
274 	} else
275 		tcpstat.tcps_conndrops++;
276 	if (errno == ETIMEDOUT && tp->t_softerror)
277 		errno = tp->t_softerror;
278 	so->so_error = errno;
279 	return (tcp_close(tp));
280 }
281 
282 /*
283  * Close a TCP control block:
284  *	discard all space held by the tcp
285  *	discard internet protocol block
286  *	wake up any sleepers
287  */
288 struct tcpcb *
289 tcp_close(tp)
290 	register struct tcpcb *tp;
291 {
292 	register struct ipqent *qe;
293 	struct inpcb *inp = tp->t_inpcb;
294 	struct socket *so = inp->inp_socket;
295 #ifdef RTV_RTT
296 	register struct rtentry *rt;
297 
298 	/*
299 	 * If we sent enough data to get some meaningful characteristics,
300 	 * save them in the routing entry.  'Enough' is arbitrarily
301 	 * defined as the sendpipesize (default 4K) * 16.  This would
302 	 * give us 16 rtt samples assuming we only get one sample per
303 	 * window (the usual case on a long haul net).  16 samples is
304 	 * enough for the srtt filter to converge to within 5% of the correct
305 	 * value; fewer samples and we could save a very bogus rtt.
306 	 *
307 	 * Don't update the default route's characteristics and don't
308 	 * update anything that the user "locked".
309 	 */
310 	if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
311 	    (rt = inp->inp_route.ro_rt) &&
312 	    satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
313 		register u_long i = 0;
314 
315 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
316 			i = tp->t_srtt *
317 			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));
318 			if (rt->rt_rmx.rmx_rtt && i)
319 				/*
320 				 * filter this update to half the old & half
321 				 * the new values, converting scale.
322 				 * See route.h and tcp_var.h for a
323 				 * description of the scaling constants.
324 				 */
325 				rt->rt_rmx.rmx_rtt =
326 				    (rt->rt_rmx.rmx_rtt + i) / 2;
327 			else
328 				rt->rt_rmx.rmx_rtt = i;
329 		}
330 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
331 			i = tp->t_rttvar *
332 			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));
333 			if (rt->rt_rmx.rmx_rttvar && i)
334 				rt->rt_rmx.rmx_rttvar =
335 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
336 			else
337 				rt->rt_rmx.rmx_rttvar = i;
338 		}
339 		/*
340 		 * update the pipelimit (ssthresh) if it has been updated
341 		 * already or if a pipesize was specified & the threshhold
342 		 * got below half the pipesize.  I.e., wait for bad news
343 		 * before we start updating, then update on both good
344 		 * and bad news.
345 		 */
346 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
347 		    (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
348 		    i < (rt->rt_rmx.rmx_sendpipe / 2)) {
349 			/*
350 			 * convert the limit from user data bytes to
351 			 * packets then to packet data bytes.
352 			 */
353 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
354 			if (i < 2)
355 				i = 2;
356 			i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));
357 			if (rt->rt_rmx.rmx_ssthresh)
358 				rt->rt_rmx.rmx_ssthresh =
359 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
360 			else
361 				rt->rt_rmx.rmx_ssthresh = i;
362 		}
363 	}
364 #endif /* RTV_RTT */
365 	/* free the reassembly queue, if any */
366 	while ((qe = tp->segq.lh_first) != NULL) {
367 		LIST_REMOVE(qe, ipqe_q);
368 		m_freem(qe->ipqe_m);
369 		FREE(qe, M_IPQ);
370 	}
371 	if (tp->t_template)
372 		(void) m_free(dtom(tp->t_template));
373 	free(tp, M_PCB);
374 	inp->inp_ppcb = 0;
375 	soisdisconnected(so);
376 	in_pcbdetach(inp);
377 	tcpstat.tcps_closed++;
378 	return ((struct tcpcb *)0);
379 }
380 
381 void
382 tcp_drain()
383 {
384 
385 }
386 
387 /*
388  * Notify a tcp user of an asynchronous error;
389  * store error as soft error, but wake up user
390  * (for now, won't do anything until can select for soft error).
391  */
392 void
393 tcp_notify(inp, error)
394 	struct inpcb *inp;
395 	int error;
396 {
397 	register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
398 	register struct socket *so = inp->inp_socket;
399 
400 	/*
401 	 * Ignore some errors if we are hooked up.
402 	 * If connection hasn't completed, has retransmitted several times,
403 	 * and receives a second error, give up now.  This is better
404 	 * than waiting a long time to establish a connection that
405 	 * can never complete.
406 	 */
407 	if (tp->t_state == TCPS_ESTABLISHED &&
408 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
409 	      error == EHOSTDOWN)) {
410 		return;
411 	} else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
412 	    tp->t_rxtshift > 3 && tp->t_softerror)
413 		so->so_error = error;
414 	else
415 		tp->t_softerror = error;
416 	wakeup((caddr_t) &so->so_timeo);
417 	sorwakeup(so);
418 	sowwakeup(so);
419 }
420 
421 void *
422 tcp_ctlinput(cmd, sa, v)
423 	int cmd;
424 	struct sockaddr *sa;
425 	register void *v;
426 {
427 	register struct ip *ip = v;
428 	register struct tcphdr *th;
429 	extern int inetctlerrmap[];
430 	void (*notify) __P((struct inpcb *, int)) = tcp_notify;
431 	int errno;
432 
433 	if ((unsigned)cmd >= PRC_NCMDS)
434 		return NULL;
435 	errno = inetctlerrmap[cmd];
436 	if (cmd == PRC_QUENCH)
437 		notify = tcp_quench;
438 	else if (PRC_IS_REDIRECT(cmd))
439 		notify = in_rtchange, ip = 0;
440 	else if (cmd == PRC_HOSTDEAD)
441 		ip = 0;
442 	else if (errno == 0)
443 		return NULL;
444 	if (ip) {
445 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
446 		in_pcbnotify(&tcbtable, sa, th->th_dport, ip->ip_src,
447 		    th->th_sport, errno, notify);
448 	} else
449 		in_pcbnotifyall(&tcbtable, sa, errno, notify);
450 	return NULL;
451 }
452 
453 /*
454  * When a source quench is received, close congestion window
455  * to one segment.  We will gradually open it again as we proceed.
456  */
457 void
458 tcp_quench(inp, errno)
459 	struct inpcb *inp;
460 	int errno;
461 {
462 	struct tcpcb *tp = intotcpcb(inp);
463 
464 	if (tp)
465 		tp->snd_cwnd = tp->t_maxseg;
466 }
467