xref: /openbsd-src/sys/netinet/tcp_subr.c (revision daf88648c0e349d5c02e1504293082072c981640)
1 /*	$OpenBSD: tcp_subr.c,v 1.93 2006/03/04 22:40:16 brad Exp $	*/
2 /*	$NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
33  *
34  * NRL grants permission for redistribution and use in source and binary
35  * forms, with or without modification, of the software and documentation
36  * created at NRL provided that the following conditions are met:
37  *
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. All advertising materials mentioning features or use of this software
44  *    must display the following acknowledgements:
45  * 	This product includes software developed by the University of
46  * 	California, Berkeley and its contributors.
47  * 	This product includes software developed at the Information
48  * 	Technology Division, US Naval Research Laboratory.
49  * 4. Neither the name of the NRL nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
57  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  *
65  * The views and conclusions contained in the software and documentation
66  * are those of the authors and should not be interpreted as representing
67  * official policies, either expressed or implied, of the US Naval
68  * Research Laboratory (NRL).
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/protosw.h>
78 #include <sys/kernel.h>
79 
80 #include <net/route.h>
81 #include <net/if.h>
82 
83 #include <netinet/in.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/ip.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/ip_icmp.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/tcpip.h>
95 #include <dev/rndvar.h>
96 
97 #ifdef INET6
98 #include <netinet6/in6_var.h>
99 #include <netinet6/ip6protosw.h>
100 #endif /* INET6 */
101 
102 #ifdef TCP_SIGNATURE
103 #include <crypto/md5.h>
104 #endif /* TCP_SIGNATURE */
105 
106 /* patchable/settable parameters for tcp */
107 int	tcp_mssdflt = TCP_MSS;
108 int	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
109 
110 /* values controllable via sysctl */
111 int	tcp_do_rfc1323 = 1;
112 int	tcp_do_sack = 1;	/* RFC 2018 selective ACKs */
113 int	tcp_ack_on_push = 0;	/* set to enable immediate ACK-on-PUSH */
114 int	tcp_do_ecn = 0;		/* RFC3168 ECN enabled/disabled? */
115 int	tcp_do_rfc3390 = 1;	/* RFC3390 Increasing TCP's Initial Window */
116 
117 u_int32_t	tcp_now = 1;
118 
119 #ifndef TCBHASHSIZE
120 #define	TCBHASHSIZE	128
121 #endif
122 int	tcbhashsize = TCBHASHSIZE;
123 
124 /* syn hash parameters */
125 #define	TCP_SYN_HASH_SIZE	293
126 #define	TCP_SYN_BUCKET_SIZE	35
127 int	tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
128 int	tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
129 int	tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
130 struct	syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
131 
132 int tcp_reass_limit = NMBCLUSTERS / 2; /* hardlimit for tcpqe_pool */
133 #ifdef TCP_SACK
134 int tcp_sackhole_limit = 32*1024; /* hardlimit for sackhl_pool */
135 #endif
136 
137 #ifdef INET6
138 extern int ip6_defhlim;
139 #endif /* INET6 */
140 
141 struct pool tcpcb_pool;
142 struct pool tcpqe_pool;
143 #ifdef TCP_SACK
144 struct pool sackhl_pool;
145 #endif
146 
147 struct tcpstat tcpstat;		/* tcp statistics */
148 tcp_seq  tcp_iss;
149 
150 /*
151  * Tcp initialization
152  */
153 void
154 tcp_init()
155 {
156 #ifdef TCP_COMPAT_42
157 	tcp_iss = 1;		/* wrong */
158 #endif /* TCP_COMPAT_42 */
159 	pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
160 	    NULL);
161 	pool_init(&tcpqe_pool, sizeof(struct tcpqent), 0, 0, 0, "tcpqepl",
162 	    NULL);
163 	pool_sethardlimit(&tcpqe_pool, tcp_reass_limit, NULL, 0);
164 #ifdef TCP_SACK
165 	pool_init(&sackhl_pool, sizeof(struct sackhole), 0, 0, 0, "sackhlpl",
166 	    NULL);
167 	pool_sethardlimit(&sackhl_pool, tcp_sackhole_limit, NULL, 0);
168 #endif /* TCP_SACK */
169 	in_pcbinit(&tcbtable, tcbhashsize);
170 
171 #ifdef INET6
172 	/*
173 	 * Since sizeof(struct ip6_hdr) > sizeof(struct ip), we
174 	 * do max length checks/computations only on the former.
175 	 */
176 	if (max_protohdr < (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)))
177 		max_protohdr = (sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
178 	if ((max_linkhdr + sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) >
179 	    MHLEN)
180 		panic("tcp_init");
181 
182 	icmp6_mtudisc_callback_register(tcp6_mtudisc_callback);
183 #endif /* INET6 */
184 
185 	/* Initialize the compressed state engine. */
186 	syn_cache_init();
187 
188 	/* Initialize timer state. */
189 	tcp_timer_init();
190 }
191 
192 /*
193  * Create template to be used to send tcp packets on a connection.
194  * Call after host entry created, allocates an mbuf and fills
195  * in a skeletal tcp/ip header, minimizing the amount of work
196  * necessary when the connection is used.
197  *
198  * To support IPv6 in addition to IPv4 and considering that the sizes of
199  * the IPv4 and IPv6 headers are not the same, we now use a separate pointer
200  * for the TCP header.  Also, we made the former tcpiphdr header pointer
201  * into just an IP overlay pointer, with casting as appropriate for v6. rja
202  */
203 struct mbuf *
204 tcp_template(tp)
205 	struct tcpcb *tp;
206 {
207 	struct inpcb *inp = tp->t_inpcb;
208 	struct mbuf *m;
209 	struct tcphdr *th;
210 
211 	if ((m = tp->t_template) == 0) {
212 		m = m_get(M_DONTWAIT, MT_HEADER);
213 		if (m == NULL)
214 			return (0);
215 
216 		switch (tp->pf) {
217 		case 0:	/*default to PF_INET*/
218 #ifdef INET
219 		case AF_INET:
220 			m->m_len = sizeof(struct ip);
221 			break;
222 #endif /* INET */
223 #ifdef INET6
224 		case AF_INET6:
225 			m->m_len = sizeof(struct ip6_hdr);
226 			break;
227 #endif /* INET6 */
228 		}
229 		m->m_len += sizeof (struct tcphdr);
230 
231 		/*
232 		 * The link header, network header, TCP header, and TCP options
233 		 * all must fit in this mbuf. For now, assume the worst case of
234 		 * TCP options size. Eventually, compute this from tp flags.
235 		 */
236 		if (m->m_len + MAX_TCPOPTLEN + max_linkhdr >= MHLEN) {
237 			MCLGET(m, M_DONTWAIT);
238 			if ((m->m_flags & M_EXT) == 0) {
239 				m_free(m);
240 				return (0);
241 			}
242 		}
243 	}
244 
245 	switch(tp->pf) {
246 #ifdef INET
247 	case AF_INET:
248 		{
249 			struct ipovly *ipovly;
250 
251 			ipovly = mtod(m, struct ipovly *);
252 
253 			bzero(ipovly->ih_x1, sizeof ipovly->ih_x1);
254 			ipovly->ih_pr = IPPROTO_TCP;
255 			ipovly->ih_len = htons(sizeof (struct tcphdr));
256 			ipovly->ih_src = inp->inp_laddr;
257 			ipovly->ih_dst = inp->inp_faddr;
258 
259 			th = (struct tcphdr *)(mtod(m, caddr_t) +
260 				sizeof(struct ip));
261 			th->th_sum = in_cksum_phdr(ipovly->ih_src.s_addr,
262 			    ipovly->ih_dst.s_addr,
263 			    htons(sizeof (struct tcphdr) + IPPROTO_TCP));
264 		}
265 		break;
266 #endif /* INET */
267 #ifdef INET6
268 	case AF_INET6:
269 		{
270 			struct ip6_hdr *ip6;
271 
272 			ip6 = mtod(m, struct ip6_hdr *);
273 
274 			ip6->ip6_src = inp->inp_laddr6;
275 			ip6->ip6_dst = inp->inp_faddr6;
276 			ip6->ip6_flow = htonl(0x60000000) |
277 			    (inp->inp_flowinfo & IPV6_FLOWLABEL_MASK);
278 
279 			ip6->ip6_nxt = IPPROTO_TCP;
280 			ip6->ip6_plen = htons(sizeof(struct tcphdr)); /*XXX*/
281 			ip6->ip6_hlim = in6_selecthlim(inp, NULL);	/*XXX*/
282 
283 			th = (struct tcphdr *)(mtod(m, caddr_t) +
284 				sizeof(struct ip6_hdr));
285 			th->th_sum = 0;
286 		}
287 		break;
288 #endif /* INET6 */
289 	}
290 
291 	th->th_sport = inp->inp_lport;
292 	th->th_dport = inp->inp_fport;
293 	th->th_seq = 0;
294 	th->th_ack = 0;
295 	th->th_x2  = 0;
296 	th->th_off = 5;
297 	th->th_flags = 0;
298 	th->th_win = 0;
299 	th->th_urp = 0;
300 	return (m);
301 }
302 
303 /*
304  * Send a single message to the TCP at address specified by
305  * the given TCP/IP header.  If m == 0, then we make a copy
306  * of the tcpiphdr at ti and send directly to the addressed host.
307  * This is used to force keep alive messages out using the TCP
308  * template for a connection tp->t_template.  If flags are given
309  * then we send a message back to the TCP which originated the
310  * segment ti, and discard the mbuf containing it and any other
311  * attached mbufs.
312  *
313  * In any case the ack and sequence number of the transmitted
314  * segment are as specified by the parameters.
315  */
316 #ifdef INET6
317 /* This function looks hairy, because it was so IPv4-dependent. */
318 #endif /* INET6 */
319 void
320 tcp_respond(tp, template, m, ack, seq, flags)
321 	struct tcpcb *tp;
322 	caddr_t template;
323 	struct mbuf *m;
324 	tcp_seq ack, seq;
325 	int flags;
326 {
327 	int tlen;
328 	int win = 0;
329 	struct route *ro = 0;
330 	struct tcphdr *th;
331 	struct tcpiphdr *ti = (struct tcpiphdr *)template;
332 	int af;		/* af on wire */
333 
334 	if (tp) {
335 		win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
336 		/*
337 		 * If this is called with an unconnected
338 		 * socket/tp/pcb (tp->pf is 0), we lose.
339 		 */
340 		af = tp->pf;
341 
342 		/*
343 		 * The route/route6 distinction is meaningless
344 		 * unless you're allocating space or passing parameters.
345 		 */
346 		ro = &tp->t_inpcb->inp_route;
347 	} else
348 		af = (((struct ip *)ti)->ip_v == 6) ? AF_INET6 : AF_INET;
349 	if (m == 0) {
350 		m = m_gethdr(M_DONTWAIT, MT_HEADER);
351 		if (m == NULL)
352 			return;
353 #ifdef TCP_COMPAT_42
354 		tlen = 1;
355 #else
356 		tlen = 0;
357 #endif
358 		m->m_data += max_linkhdr;
359 		switch (af) {
360 #ifdef INET6
361 		case AF_INET6:
362 			bcopy(ti, mtod(m, caddr_t), sizeof(struct tcphdr) +
363 			    sizeof(struct ip6_hdr));
364 			break;
365 #endif /* INET6 */
366 		case AF_INET:
367 			bcopy(ti, mtod(m, caddr_t), sizeof(struct tcphdr) +
368 			    sizeof(struct ip));
369 			break;
370 		}
371 
372 		ti = mtod(m, struct tcpiphdr *);
373 		flags = TH_ACK;
374 	} else {
375 		m_freem(m->m_next);
376 		m->m_next = 0;
377 		m->m_data = (caddr_t)ti;
378 		tlen = 0;
379 #define xchg(a,b,type) do { type t; t=a; a=b; b=t; } while (0)
380 		switch (af) {
381 #ifdef INET6
382 		case AF_INET6:
383 			m->m_len = sizeof(struct tcphdr) + sizeof(struct ip6_hdr);
384 			xchg(((struct ip6_hdr *)ti)->ip6_dst,
385 			    ((struct ip6_hdr *)ti)->ip6_src, struct in6_addr);
386 			th = (void *)((caddr_t)ti + sizeof(struct ip6_hdr));
387 			break;
388 #endif /* INET6 */
389 		case AF_INET:
390 			m->m_len = sizeof (struct tcpiphdr);
391 			xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_int32_t);
392 			th = (void *)((caddr_t)ti + sizeof(struct ip));
393 			break;
394 		}
395 		xchg(th->th_dport, th->th_sport, u_int16_t);
396 #undef xchg
397 	}
398 	switch (af) {
399 #ifdef INET6
400 	case AF_INET6:
401 		tlen += sizeof(struct tcphdr) + sizeof(struct ip6_hdr);
402 		th = (struct tcphdr *)((caddr_t)ti + sizeof(struct ip6_hdr));
403 		break;
404 #endif /* INET6 */
405 	case AF_INET:
406 		ti->ti_len = htons((u_int16_t)(sizeof (struct tcphdr) + tlen));
407 		tlen += sizeof (struct tcpiphdr);
408 		th = (struct tcphdr *)((caddr_t)ti + sizeof(struct ip));
409 		break;
410 	}
411 
412 	m->m_len = tlen;
413 	m->m_pkthdr.len = tlen;
414 	m->m_pkthdr.rcvif = (struct ifnet *) 0;
415 	th->th_seq = htonl(seq);
416 	th->th_ack = htonl(ack);
417 	th->th_x2 = 0;
418 	th->th_off = sizeof (struct tcphdr) >> 2;
419 	th->th_flags = flags;
420 	if (tp)
421 		win >>= tp->rcv_scale;
422 	if (win > TCP_MAXWIN)
423 		win = TCP_MAXWIN;
424 	th->th_win = htons((u_int16_t)win);
425 	th->th_urp = 0;
426 
427 	switch (af) {
428 #ifdef INET6
429 	case AF_INET6:
430 		((struct ip6_hdr *)ti)->ip6_flow   = htonl(0x60000000);
431 		((struct ip6_hdr *)ti)->ip6_nxt  = IPPROTO_TCP;
432 		((struct ip6_hdr *)ti)->ip6_hlim =
433 			in6_selecthlim(tp ? tp->t_inpcb : NULL, NULL);	/*XXX*/
434 		((struct ip6_hdr *)ti)->ip6_plen = tlen - sizeof(struct ip6_hdr);
435 		th->th_sum = 0;
436 		th->th_sum = in6_cksum(m, IPPROTO_TCP,
437 		   sizeof(struct ip6_hdr), ((struct ip6_hdr *)ti)->ip6_plen);
438 		HTONS(((struct ip6_hdr *)ti)->ip6_plen);
439 		ip6_output(m, tp ? tp->t_inpcb->inp_outputopts6 : NULL,
440 		    (struct route_in6 *)ro, 0, NULL, NULL);
441 		break;
442 #endif /* INET6 */
443 	case AF_INET:
444 		bzero(ti->ti_x1, sizeof ti->ti_x1);
445 		ti->ti_len = htons((u_short)tlen - sizeof(struct ip));
446 
447 		/*
448 		 * There's no point deferring to hardware checksum processing
449 		 * here, as we only send a minimal TCP packet whose checksum
450 		 * we need to compute in any case.
451 		 */
452 		th->th_sum = 0;
453 		th->th_sum = in_cksum(m, tlen);
454 		((struct ip *)ti)->ip_len = htons(tlen);
455 		((struct ip *)ti)->ip_ttl = ip_defttl;
456 		ip_output(m, (void *)NULL, ro, ip_mtudisc ? IP_MTUDISC : 0,
457 			(void *)NULL, tp ? tp->t_inpcb : (void *)NULL);
458 	}
459 }
460 
461 /*
462  * Create a new TCP control block, making an
463  * empty reassembly queue and hooking it to the argument
464  * protocol control block.
465  */
466 struct tcpcb *
467 tcp_newtcpcb(struct inpcb *inp)
468 {
469 	struct tcpcb *tp;
470 	int i;
471 
472 	tp = pool_get(&tcpcb_pool, PR_NOWAIT);
473 	if (tp == NULL)
474 		return ((struct tcpcb *)0);
475 	bzero((char *) tp, sizeof(struct tcpcb));
476 	TAILQ_INIT(&tp->t_segq);
477 	tp->t_maxseg = tcp_mssdflt;
478 	tp->t_maxopd = 0;
479 
480 	TCP_INIT_DELACK(tp);
481 	for (i = 0; i < TCPT_NTIMERS; i++)
482 		TCP_TIMER_INIT(tp, i);
483 	timeout_set(&tp->t_reap_to, tcp_reaper, tp);
484 
485 #ifdef TCP_SACK
486 	tp->sack_enable = tcp_do_sack;
487 #endif
488 	tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0;
489 	tp->t_inpcb = inp;
490 	/*
491 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
492 	 * rtt estimate.  Set rttvar so that srtt + 2 * rttvar gives
493 	 * reasonable initial retransmit time.
494 	 */
495 	tp->t_srtt = TCPTV_SRTTBASE;
496 	tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ <<
497 	    (TCP_RTTVAR_SHIFT + TCP_RTT_BASE_SHIFT - 1);
498 	tp->t_rttmin = TCPTV_MIN;
499 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
500 	    TCPTV_MIN, TCPTV_REXMTMAX);
501 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
502 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
503 
504 	tp->t_pmtud_mtu_sent = 0;
505 	tp->t_pmtud_mss_acked = 0;
506 
507 #ifdef INET6
508 	/* we disallow IPv4 mapped address completely. */
509 	if ((inp->inp_flags & INP_IPV6) == 0)
510 		tp->pf = PF_INET;
511 	else
512 		tp->pf = PF_INET6;
513 #else
514 	tp->pf = PF_INET;
515 #endif
516 
517 #ifdef INET6
518 	if (inp->inp_flags & INP_IPV6)
519 		inp->inp_ipv6.ip6_hlim = ip6_defhlim;
520 	else
521 #endif /* INET6 */
522 		inp->inp_ip.ip_ttl = ip_defttl;
523 
524 	inp->inp_ppcb = (caddr_t)tp;
525 	return (tp);
526 }
527 
528 /*
529  * Drop a TCP connection, reporting
530  * the specified error.  If connection is synchronized,
531  * then send a RST to peer.
532  */
533 struct tcpcb *
534 tcp_drop(tp, errno)
535 	struct tcpcb *tp;
536 	int errno;
537 {
538 	struct socket *so = tp->t_inpcb->inp_socket;
539 
540 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
541 		tp->t_state = TCPS_CLOSED;
542 		(void) tcp_output(tp);
543 		tcpstat.tcps_drops++;
544 	} else
545 		tcpstat.tcps_conndrops++;
546 	if (errno == ETIMEDOUT && tp->t_softerror)
547 		errno = tp->t_softerror;
548 	so->so_error = errno;
549 	return (tcp_close(tp));
550 }
551 
552 /*
553  * Close a TCP control block:
554  *	discard all space held by the tcp
555  *	discard internet protocol block
556  *	wake up any sleepers
557  */
558 struct tcpcb *
559 tcp_close(struct tcpcb *tp)
560 {
561 	struct inpcb *inp = tp->t_inpcb;
562 	struct socket *so = inp->inp_socket;
563 #ifdef TCP_SACK
564 	struct sackhole *p, *q;
565 #endif
566 
567 	/* free the reassembly queue, if any */
568 	tcp_reass_lock(tp);
569 	tcp_freeq(tp);
570 	tcp_reass_unlock(tp);
571 
572 	tcp_canceltimers(tp);
573 	TCP_CLEAR_DELACK(tp);
574 	syn_cache_cleanup(tp);
575 
576 #ifdef TCP_SACK
577 	/* Free SACK holes. */
578 	q = p = tp->snd_holes;
579 	while (p != 0) {
580 		q = p->next;
581 		pool_put(&sackhl_pool, p);
582 		p = q;
583 	}
584 #endif
585 	if (tp->t_template)
586 		(void) m_free(tp->t_template);
587 
588 	tp->t_flags |= TF_DEAD;
589 	timeout_add(&tp->t_reap_to, 0);
590 
591 	inp->inp_ppcb = 0;
592 	soisdisconnected(so);
593 	in_pcbdetach(inp);
594 	return ((struct tcpcb *)0);
595 }
596 
597 void
598 tcp_reaper(void *arg)
599 {
600 	struct tcpcb *tp = arg;
601 	int s;
602 
603 	s = splsoftnet();
604 	pool_put(&tcpcb_pool, tp);
605 	splx(s);
606 	tcpstat.tcps_closed++;
607 }
608 
609 int
610 tcp_freeq(struct tcpcb *tp)
611 {
612 	struct tcpqent *qe;
613 	int rv = 0;
614 
615 	while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
616 		TAILQ_REMOVE(&tp->t_segq, qe, tcpqe_q);
617 		m_freem(qe->tcpqe_m);
618 		pool_put(&tcpqe_pool, qe);
619 		rv = 1;
620 	}
621 	return (rv);
622 }
623 
624 void
625 tcp_drain()
626 {
627 	struct inpcb *inp;
628 
629 	/* called at splnet() */
630 	CIRCLEQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue) {
631 		struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
632 
633 		if (tp != NULL) {
634 			if (tcp_reass_lock_try(tp) == 0)
635 				continue;
636 			if (tcp_freeq(tp))
637 				tcpstat.tcps_conndrained++;
638 			tcp_reass_unlock(tp);
639 		}
640 	}
641 }
642 
643 /*
644  * Compute proper scaling value for receiver window from buffer space
645  */
646 
647 void
648 tcp_rscale(struct tcpcb *tp, u_long hiwat)
649 {
650 	tp->request_r_scale = 0;
651 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
652 	       TCP_MAXWIN << tp->request_r_scale < hiwat)
653 		tp->request_r_scale++;
654 }
655 
656 /*
657  * Notify a tcp user of an asynchronous error;
658  * store error as soft error, but wake up user
659  * (for now, won't do anything until can select for soft error).
660  */
661 void
662 tcp_notify(inp, error)
663 	struct inpcb *inp;
664 	int error;
665 {
666 	struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
667 	struct socket *so = inp->inp_socket;
668 
669 	/*
670 	 * Ignore some errors if we are hooked up.
671 	 * If connection hasn't completed, has retransmitted several times,
672 	 * and receives a second error, give up now.  This is better
673 	 * than waiting a long time to establish a connection that
674 	 * can never complete.
675 	 */
676 	if (tp->t_state == TCPS_ESTABLISHED &&
677 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
678 	      error == EHOSTDOWN)) {
679 		return;
680 	} else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
681 	    tp->t_rxtshift > 3 && tp->t_softerror)
682 		so->so_error = error;
683 	else
684 		tp->t_softerror = error;
685 	wakeup((caddr_t) &so->so_timeo);
686 	sorwakeup(so);
687 	sowwakeup(so);
688 }
689 
690 #ifdef INET6
691 void
692 tcp6_ctlinput(cmd, sa, d)
693 	int cmd;
694 	struct sockaddr *sa;
695 	void *d;
696 {
697 	struct tcphdr th;
698 	struct tcpcb *tp;
699 	void (*notify)(struct inpcb *, int) = tcp_notify;
700 	struct ip6_hdr *ip6;
701 	const struct sockaddr_in6 *sa6_src = NULL;
702 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa;
703 	struct inpcb *inp;
704 	struct mbuf *m;
705 	tcp_seq seq;
706 	int off;
707 	struct {
708 		u_int16_t th_sport;
709 		u_int16_t th_dport;
710 		u_int32_t th_seq;
711 	} *thp;
712 
713 	if (sa->sa_family != AF_INET6 ||
714 	    sa->sa_len != sizeof(struct sockaddr_in6) ||
715 	    IN6_IS_ADDR_UNSPECIFIED(&sa6->sin6_addr) ||
716 	    IN6_IS_ADDR_V4MAPPED(&sa6->sin6_addr))
717 		return;
718 	if ((unsigned)cmd >= PRC_NCMDS)
719 		return;
720 	else if (cmd == PRC_QUENCH) {
721 		/*
722 		 * Don't honor ICMP Source Quench messages meant for
723 		 * TCP connections.
724 		 */
725 		/* XXX there's no PRC_QUENCH in IPv6 */
726 		return;
727 	} else if (PRC_IS_REDIRECT(cmd))
728 		notify = in_rtchange, d = NULL;
729 	else if (cmd == PRC_MSGSIZE)
730 		; /* special code is present, see below */
731 	else if (cmd == PRC_HOSTDEAD)
732 		d = NULL;
733 	else if (inet6ctlerrmap[cmd] == 0)
734 		return;
735 
736 	/* if the parameter is from icmp6, decode it. */
737 	if (d != NULL) {
738 		struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
739 		m = ip6cp->ip6c_m;
740 		ip6 = ip6cp->ip6c_ip6;
741 		off = ip6cp->ip6c_off;
742 		sa6_src = ip6cp->ip6c_src;
743 	} else {
744 		m = NULL;
745 		ip6 = NULL;
746 		sa6_src = &sa6_any;
747 	}
748 
749 	if (ip6) {
750 		/*
751 		 * XXX: We assume that when ip6 is non NULL,
752 		 * M and OFF are valid.
753 		 */
754 
755 		/* check if we can safely examine src and dst ports */
756 		if (m->m_pkthdr.len < off + sizeof(*thp))
757 			return;
758 
759 		bzero(&th, sizeof(th));
760 #ifdef DIAGNOSTIC
761 		if (sizeof(*thp) > sizeof(th))
762 			panic("assumption failed in tcp6_ctlinput");
763 #endif
764 		m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
765 
766 		/*
767 		 * Check to see if we have a valid TCP connection
768 		 * corresponding to the address in the ICMPv6 message
769 		 * payload.
770 		 */
771 		inp = in6_pcbhashlookup(&tcbtable, &sa6->sin6_addr,
772 		    th.th_dport, (struct in6_addr *)&sa6_src->sin6_addr,
773 		    th.th_sport);
774 		if (cmd == PRC_MSGSIZE) {
775 			/*
776 			 * Depending on the value of "valid" and routing table
777 			 * size (mtudisc_{hi,lo}wat), we will:
778 			 * - recalcurate the new MTU and create the
779 			 *   corresponding routing entry, or
780 			 * - ignore the MTU change notification.
781 			 */
782 			icmp6_mtudisc_update((struct ip6ctlparam *)d, inp != NULL);
783 			return;
784 		}
785 		if (inp) {
786 			seq = ntohl(th.th_seq);
787 			if (inp->inp_socket &&
788 			    (tp = intotcpcb(inp)) &&
789 			    SEQ_GEQ(seq, tp->snd_una) &&
790 			    SEQ_LT(seq, tp->snd_max))
791 				notify(inp, inet6ctlerrmap[cmd]);
792 		} else if (syn_cache_count &&
793 		    (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
794 		     inet6ctlerrmap[cmd] == ENETUNREACH ||
795 		     inet6ctlerrmap[cmd] == EHOSTDOWN))
796 			syn_cache_unreach((struct sockaddr *)sa6_src,
797 			    sa, &th);
798 	} else {
799 		(void) in6_pcbnotify(&tcbtable, sa, 0,
800 		    (struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
801 	}
802 }
803 #endif
804 
805 void *
806 tcp_ctlinput(cmd, sa, v)
807 	int cmd;
808 	struct sockaddr *sa;
809 	void *v;
810 {
811 	struct ip *ip = v;
812 	struct tcphdr *th;
813 	struct tcpcb *tp;
814 	struct inpcb *inp;
815 	struct in_addr faddr;
816 	tcp_seq seq;
817 	u_int mtu;
818 	extern int inetctlerrmap[];
819 	void (*notify)(struct inpcb *, int) = tcp_notify;
820 	int errno;
821 
822 	if (sa->sa_family != AF_INET)
823 		return NULL;
824 	faddr = satosin(sa)->sin_addr;
825 	if (faddr.s_addr == INADDR_ANY)
826 		return NULL;
827 
828 	if ((unsigned)cmd >= PRC_NCMDS)
829 		return NULL;
830 	errno = inetctlerrmap[cmd];
831 	if (cmd == PRC_QUENCH)
832 		/*
833 		 * Don't honor ICMP Source Quench messages meant for
834 		 * TCP connections.
835 		 */
836 		return NULL;
837 	else if (PRC_IS_REDIRECT(cmd))
838 		notify = in_rtchange, ip = 0;
839 	else if (cmd == PRC_MSGSIZE && ip_mtudisc && ip) {
840 		/*
841 		 * Verify that the packet in the icmp payload refers
842 		 * to an existing TCP connection.
843 		 */
844 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
845 		seq = ntohl(th->th_seq);
846 		inp = in_pcbhashlookup(&tcbtable,
847 		    ip->ip_dst, th->th_dport, ip->ip_src, th->th_sport);
848 		if (inp && (tp = intotcpcb(inp)) &&
849 		    SEQ_GEQ(seq, tp->snd_una) &&
850 		    SEQ_LT(seq, tp->snd_max)) {
851 			struct icmp *icp;
852 			icp = (struct icmp *)((caddr_t)ip -
853 					      offsetof(struct icmp, icmp_ip));
854 
855 			/*
856 			 * If the ICMP message advertises a Next-Hop MTU
857 			 * equal or larger than the maximum packet size we have
858 			 * ever sent, drop the message.
859 			 */
860 			mtu = (u_int)ntohs(icp->icmp_nextmtu);
861 			if (mtu >= tp->t_pmtud_mtu_sent)
862 				return NULL;
863 			if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
864 				/*
865 				 * Calculate new MTU, and create corresponding
866 				 * route (traditional PMTUD).
867 				 */
868 				tp->t_flags &= ~TF_PMTUD_PEND;
869 				icmp_mtudisc(icp);
870 			} else {
871 				/*
872 				 * Record the information got in the ICMP
873 				 * message; act on it later.
874 				 * If we had already recorded an ICMP message,
875 				 * replace the old one only if the new message
876 				 * refers to an older TCP segment
877 				 */
878 				if (tp->t_flags & TF_PMTUD_PEND) {
879 					if (SEQ_LT(tp->t_pmtud_th_seq, seq))
880 						return NULL;
881 				} else
882 					tp->t_flags |= TF_PMTUD_PEND;
883 				tp->t_pmtud_th_seq = seq;
884 				tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
885 				tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
886 				tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
887 				return NULL;
888 			}
889 		} else {
890 			/* ignore if we don't have a matching connection */
891 			return NULL;
892 		}
893 		notify = tcp_mtudisc, ip = 0;
894 	} else if (cmd == PRC_MTUINC)
895 		notify = tcp_mtudisc_increase, ip = 0;
896 	else if (cmd == PRC_HOSTDEAD)
897 		ip = 0;
898 	else if (errno == 0)
899 		return NULL;
900 
901 	if (ip) {
902 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
903 		inp = in_pcbhashlookup(&tcbtable,
904 		    ip->ip_dst, th->th_dport, ip->ip_src, th->th_sport);
905 		if (inp) {
906 			seq = ntohl(th->th_seq);
907 			if (inp->inp_socket &&
908 			    (tp = intotcpcb(inp)) &&
909 			    SEQ_GEQ(seq, tp->snd_una) &&
910 			    SEQ_LT(seq, tp->snd_max))
911 				notify(inp, errno);
912 		} else if (syn_cache_count &&
913 		    (inetctlerrmap[cmd] == EHOSTUNREACH ||
914 		     inetctlerrmap[cmd] == ENETUNREACH ||
915 		     inetctlerrmap[cmd] == EHOSTDOWN)) {
916 			struct sockaddr_in sin;
917 
918 			bzero(&sin, sizeof(sin));
919 			sin.sin_len = sizeof(sin);
920 			sin.sin_family = AF_INET;
921 			sin.sin_port = th->th_sport;
922 			sin.sin_addr = ip->ip_src;
923 			syn_cache_unreach((struct sockaddr *)&sin,
924 			    sa, th);
925 		}
926 	} else
927 		in_pcbnotifyall(&tcbtable, sa, errno, notify);
928 
929 	return NULL;
930 }
931 
932 
933 #ifdef INET6
934 /*
935  * Path MTU Discovery handlers.
936  */
937 void
938 tcp6_mtudisc_callback(faddr)
939 	struct in6_addr *faddr;
940 {
941 	struct sockaddr_in6 sin6;
942 
943 	bzero(&sin6, sizeof(sin6));
944 	sin6.sin6_family = AF_INET6;
945 	sin6.sin6_len = sizeof(struct sockaddr_in6);
946 	sin6.sin6_addr = *faddr;
947 	(void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
948 	    (struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp_mtudisc);
949 }
950 #endif /* INET6 */
951 
952 /*
953  * On receipt of path MTU corrections, flush old route and replace it
954  * with the new one.  Retransmit all unacknowledged packets, to ensure
955  * that all packets will be received.
956  */
957 void
958 tcp_mtudisc(inp, errno)
959 	struct inpcb *inp;
960 	int errno;
961 {
962 	struct tcpcb *tp = intotcpcb(inp);
963 	struct rtentry *rt = in_pcbrtentry(inp);
964 	int change = 0;
965 
966 	if (tp != 0) {
967 		int orig_maxseg = tp->t_maxseg;
968 		if (rt != 0) {
969 			/*
970 			 * If this was not a host route, remove and realloc.
971 			 */
972 			if ((rt->rt_flags & RTF_HOST) == 0) {
973 				in_rtchange(inp, errno);
974 				if ((rt = in_pcbrtentry(inp)) == 0)
975 					return;
976 			}
977 			if (orig_maxseg != tp->t_maxseg ||
978 			    (rt->rt_rmx.rmx_locks & RTV_MTU))
979 				change = 1;
980 		}
981 		tcp_mss(tp, -1);
982 
983 		/*
984 		 * Resend unacknowledged packets
985 		 */
986 		tp->snd_nxt = tp->snd_una;
987 		if (change || errno > 0)
988 			tcp_output(tp);
989 	}
990 }
991 
992 void
993 tcp_mtudisc_increase(inp, errno)
994 	struct inpcb *inp;
995 	int errno;
996 {
997 	struct tcpcb *tp = intotcpcb(inp);
998 	struct rtentry *rt = in_pcbrtentry(inp);
999 
1000 	if (tp != 0 && rt != 0) {
1001 		/*
1002 		 * If this was a host route, remove and realloc.
1003 		 */
1004 		if (rt->rt_flags & RTF_HOST)
1005 			in_rtchange(inp, errno);
1006 
1007 		/* also takes care of congestion window */
1008 		tcp_mss(tp, -1);
1009 	}
1010 }
1011 
1012 #ifdef TCP_SIGNATURE
1013 int
1014 tcp_signature_tdb_attach()
1015 {
1016 	return (0);
1017 }
1018 
1019 int
1020 tcp_signature_tdb_init(tdbp, xsp, ii)
1021 	struct tdb *tdbp;
1022 	struct xformsw *xsp;
1023 	struct ipsecinit *ii;
1024 {
1025 	if ((ii->ii_authkeylen < 1) || (ii->ii_authkeylen > 80))
1026 		return (EINVAL);
1027 
1028 	tdbp->tdb_amxkey = malloc(ii->ii_authkeylen, M_XDATA, M_DONTWAIT);
1029 	if (tdbp->tdb_amxkey == NULL)
1030 		return (ENOMEM);
1031 	bcopy(ii->ii_authkey, tdbp->tdb_amxkey, ii->ii_authkeylen);
1032 	tdbp->tdb_amxkeylen = ii->ii_authkeylen;
1033 
1034 	return (0);
1035 }
1036 
1037 int
1038 tcp_signature_tdb_zeroize(tdbp)
1039 	struct tdb *tdbp;
1040 {
1041 	if (tdbp->tdb_amxkey) {
1042 		bzero(tdbp->tdb_amxkey, tdbp->tdb_amxkeylen);
1043 		free(tdbp->tdb_amxkey, M_XDATA);
1044 		tdbp->tdb_amxkey = NULL;
1045 	}
1046 
1047 	return (0);
1048 }
1049 
1050 int
1051 tcp_signature_tdb_input(m, tdbp, skip, protoff)
1052 	struct mbuf *m;
1053 	struct tdb *tdbp;
1054 	int skip, protoff;
1055 {
1056 	return (0);
1057 }
1058 
1059 int
1060 tcp_signature_tdb_output(m, tdbp, mp, skip, protoff)
1061 	struct mbuf *m;
1062 	struct tdb *tdbp;
1063 	struct mbuf **mp;
1064 	int skip, protoff;
1065 {
1066 	return (EINVAL);
1067 }
1068 
1069 int
1070 tcp_signature_apply(fstate, data, len)
1071 	caddr_t fstate;
1072 	caddr_t data;
1073 	unsigned int len;
1074 {
1075 	MD5Update((MD5_CTX *)fstate, (char *)data, len);
1076 	return 0;
1077 }
1078 
1079 int
1080 tcp_signature(struct tdb *tdb, int af, struct mbuf *m, struct tcphdr *th,
1081     int iphlen, int doswap, char *sig)
1082 {
1083 	MD5_CTX ctx;
1084 	int len;
1085 	struct tcphdr th0;
1086 
1087 	MD5Init(&ctx);
1088 
1089 	switch(af) {
1090 	case 0:
1091 #ifdef INET
1092 	case AF_INET: {
1093 		struct ippseudo ippseudo;
1094 		struct ip *ip;
1095 
1096 		ip = mtod(m, struct ip *);
1097 
1098 		ippseudo.ippseudo_src = ip->ip_src;
1099 		ippseudo.ippseudo_dst = ip->ip_dst;
1100 		ippseudo.ippseudo_pad = 0;
1101 		ippseudo.ippseudo_p = IPPROTO_TCP;
1102 		ippseudo.ippseudo_len = htons(m->m_pkthdr.len - iphlen);
1103 
1104 		MD5Update(&ctx, (char *)&ippseudo,
1105 		    sizeof(struct ippseudo));
1106 		break;
1107 		}
1108 #endif
1109 #ifdef INET6
1110 	case AF_INET6: {
1111 		struct ip6_hdr_pseudo ip6pseudo;
1112 		struct ip6_hdr *ip6;
1113 
1114 		ip6 = mtod(m, struct ip6_hdr *);
1115 		bzero(&ip6pseudo, sizeof(ip6pseudo));
1116 		ip6pseudo.ip6ph_src = ip6->ip6_src;
1117 		ip6pseudo.ip6ph_dst = ip6->ip6_dst;
1118 		in6_clearscope(&ip6pseudo.ip6ph_src);
1119 		in6_clearscope(&ip6pseudo.ip6ph_dst);
1120 		ip6pseudo.ip6ph_nxt = IPPROTO_TCP;
1121 		ip6pseudo.ip6ph_len = htonl(m->m_pkthdr.len - iphlen);
1122 
1123 		MD5Update(&ctx, (char *)&ip6pseudo,
1124 		    sizeof(ip6pseudo));
1125 		break;
1126 		}
1127 #endif
1128 	}
1129 
1130 	th0 = *th;
1131 	th0.th_sum = 0;
1132 
1133 	if (doswap) {
1134 		HTONL(th0.th_seq);
1135 		HTONL(th0.th_ack);
1136 		HTONS(th0.th_win);
1137 		HTONS(th0.th_urp);
1138 	}
1139 	MD5Update(&ctx, (char *)&th0, sizeof(th0));
1140 
1141 	len = m->m_pkthdr.len - iphlen - th->th_off * sizeof(uint32_t);
1142 
1143 	if (len > 0 &&
1144 	    m_apply(m, iphlen + th->th_off * sizeof(uint32_t), len,
1145 	    tcp_signature_apply, (caddr_t)&ctx))
1146 		return (-1);
1147 
1148 	MD5Update(&ctx, tdb->tdb_amxkey, tdb->tdb_amxkeylen);
1149 	MD5Final(sig, &ctx);
1150 
1151 	return (0);
1152 }
1153 #endif /* TCP_SIGNATURE */
1154 
1155 #define TCP_RNDISS_ROUNDS	16
1156 #define TCP_RNDISS_OUT	7200
1157 #define TCP_RNDISS_MAX	30000
1158 
1159 u_int8_t tcp_rndiss_sbox[128];
1160 u_int16_t tcp_rndiss_msb;
1161 u_int16_t tcp_rndiss_cnt;
1162 long tcp_rndiss_reseed;
1163 
1164 u_int16_t
1165 tcp_rndiss_encrypt(val)
1166 	u_int16_t val;
1167 {
1168 	u_int16_t sum = 0, i;
1169 
1170 	for (i = 0; i < TCP_RNDISS_ROUNDS; i++) {
1171 		sum += 0x79b9;
1172 		val ^= ((u_int16_t)tcp_rndiss_sbox[(val^sum) & 0x7f]) << 7;
1173 		val = ((val & 0xff) << 7) | (val >> 8);
1174 	}
1175 
1176 	return val;
1177 }
1178 
1179 void
1180 tcp_rndiss_init()
1181 {
1182 	get_random_bytes(tcp_rndiss_sbox, sizeof(tcp_rndiss_sbox));
1183 
1184 	tcp_rndiss_reseed = time_second + TCP_RNDISS_OUT;
1185 	tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000;
1186 	tcp_rndiss_cnt = 0;
1187 }
1188 
1189 tcp_seq
1190 tcp_rndiss_next()
1191 {
1192         if (tcp_rndiss_cnt >= TCP_RNDISS_MAX ||
1193 	    time_second > tcp_rndiss_reseed)
1194                 tcp_rndiss_init();
1195 
1196 	/* (arc4random() & 0x7fff) ensures a 32768 byte gap between ISS */
1197 	return ((tcp_rndiss_encrypt(tcp_rndiss_cnt++) | tcp_rndiss_msb) <<16) |
1198 		(arc4random() & 0x7fff);
1199 }
1200 
1201