xref: /openbsd-src/sys/netinet/tcp_subr.c (revision b725ae7711052a2233e31a66fefb8a752c388d7a)
1 /*	$OpenBSD: tcp_subr.c,v 1.80 2004/05/07 14:42:27 millert Exp $	*/
2 /*	$NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
33  *
34  * NRL grants permission for redistribution and use in source and binary
35  * forms, with or without modification, of the software and documentation
36  * created at NRL provided that the following conditions are met:
37  *
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. All advertising materials mentioning features or use of this software
44  *    must display the following acknowledgements:
45  * 	This product includes software developed by the University of
46  * 	California, Berkeley and its contributors.
47  * 	This product includes software developed at the Information
48  * 	Technology Division, US Naval Research Laboratory.
49  * 4. Neither the name of the NRL nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
57  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  *
65  * The views and conclusions contained in the software and documentation
66  * are those of the authors and should not be interpreted as representing
67  * official policies, either expressed or implied, of the US Naval
68  * Research Laboratory (NRL).
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/protosw.h>
78 #include <sys/kernel.h>
79 
80 #include <net/route.h>
81 #include <net/if.h>
82 
83 #include <netinet/in.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/ip.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/ip_icmp.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/tcpip.h>
95 #include <dev/rndvar.h>
96 
97 #ifdef INET6
98 #include <netinet6/in6_var.h>
99 #include <netinet6/ip6protosw.h>
100 #endif /* INET6 */
101 
102 #ifdef TCP_SIGNATURE
103 #include <crypto/md5.h>
104 #endif /* TCP_SIGNATURE */
105 
106 /* patchable/settable parameters for tcp */
107 int	tcp_mssdflt = TCP_MSS;
108 int	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
109 
110 /*
111  * Configure kernel with options "TCP_DO_RFC1323=0" to disable RFC1323 stuff.
112  * This is a good idea over slow SLIP/PPP links, because the timestamp
113  * pretty well destroys the VJ compression (any packet with a timestamp
114  * different from the previous one can't be compressed), as well as adding
115  * more overhead.
116  * XXX And it should be a settable per route characteristic (with this just
117  * used as the default).
118  */
119 #ifndef TCP_DO_RFC1323
120 #define TCP_DO_RFC1323	1
121 #endif
122 int	tcp_do_rfc1323 = TCP_DO_RFC1323;
123 
124 #ifndef TCP_DO_SACK
125 #ifdef TCP_SACK
126 #define TCP_DO_SACK	1
127 #else
128 #define TCP_DO_SACK	0
129 #endif
130 #endif
131 int	tcp_do_sack = TCP_DO_SACK;		/* RFC 2018 selective ACKs */
132 int	tcp_ack_on_push = 0;	/* set to enable immediate ACK-on-PUSH */
133 int	tcp_do_ecn = 0;		/* RFC3168 ECN enabled/disabled? */
134 int	tcp_do_rfc3390 = 0;	/* RFC3390 Increasing TCP's Initial Window */
135 
136 u_int32_t	tcp_now;
137 
138 #ifndef TCBHASHSIZE
139 #define	TCBHASHSIZE	128
140 #endif
141 int	tcbhashsize = TCBHASHSIZE;
142 
143 /* syn hash parameters */
144 #define	TCP_SYN_HASH_SIZE	293
145 #define	TCP_SYN_BUCKET_SIZE	35
146 int	tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
147 int	tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
148 int	tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
149 struct	syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
150 
151 int tcp_reass_limit = NMBCLUSTERS / 2; /* hardlimit for tcpqe_pool */
152 
153 #ifdef INET6
154 extern int ip6_defhlim;
155 #endif /* INET6 */
156 
157 struct pool tcpcb_pool;
158 struct pool tcpqe_pool;
159 #ifdef TCP_SACK
160 struct pool sackhl_pool;
161 #endif
162 
163 struct tcpstat tcpstat;		/* tcp statistics */
164 tcp_seq  tcp_iss;
165 
166 /*
167  * Tcp initialization
168  */
169 void
170 tcp_init()
171 {
172 #ifdef TCP_COMPAT_42
173 	tcp_iss = 1;		/* wrong */
174 #endif /* TCP_COMPAT_42 */
175 	pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
176 	    NULL);
177 	pool_init(&tcpqe_pool, sizeof(struct ipqent), 0, 0, 0, "tcpqepl",
178 	    NULL);
179 	pool_sethardlimit(&tcpqe_pool, tcp_reass_limit, NULL, 0);
180 #ifdef TCP_SACK
181 	pool_init(&sackhl_pool, sizeof(struct sackhole), 0, 0, 0, "sackhlpl",
182 	    NULL);
183 #endif /* TCP_SACK */
184 	in_pcbinit(&tcbtable, tcbhashsize);
185 	tcp_now = arc4random() / 2;
186 
187 #ifdef INET6
188 	/*
189 	 * Since sizeof(struct ip6_hdr) > sizeof(struct ip), we
190 	 * do max length checks/computations only on the former.
191 	 */
192 	if (max_protohdr < (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)))
193 		max_protohdr = (sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
194 	if ((max_linkhdr + sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) >
195 	    MHLEN)
196 		panic("tcp_init");
197 
198 	icmp6_mtudisc_callback_register(tcp6_mtudisc_callback);
199 #endif /* INET6 */
200 
201 	/* Initialize the compressed state engine. */
202 	syn_cache_init();
203 
204 	/* Initialize timer state. */
205 	tcp_timer_init();
206 }
207 
208 /*
209  * Create template to be used to send tcp packets on a connection.
210  * Call after host entry created, allocates an mbuf and fills
211  * in a skeletal tcp/ip header, minimizing the amount of work
212  * necessary when the connection is used.
213  *
214  * To support IPv6 in addition to IPv4 and considering that the sizes of
215  * the IPv4 and IPv6 headers are not the same, we now use a separate pointer
216  * for the TCP header.  Also, we made the former tcpiphdr header pointer
217  * into just an IP overlay pointer, with casting as appropriate for v6. rja
218  */
219 struct mbuf *
220 tcp_template(tp)
221 	struct tcpcb *tp;
222 {
223 	struct inpcb *inp = tp->t_inpcb;
224 	struct mbuf *m;
225 	struct tcphdr *th;
226 
227 	if ((m = tp->t_template) == 0) {
228 		m = m_get(M_DONTWAIT, MT_HEADER);
229 		if (m == NULL)
230 			return (0);
231 
232 		switch (tp->pf) {
233 		case 0:	/*default to PF_INET*/
234 #ifdef INET
235 		case AF_INET:
236 			m->m_len = sizeof(struct ip);
237 			break;
238 #endif /* INET */
239 #ifdef INET6
240 		case AF_INET6:
241 			m->m_len = sizeof(struct ip6_hdr);
242 			break;
243 #endif /* INET6 */
244 		}
245 		m->m_len += sizeof (struct tcphdr);
246 
247 		/*
248 		 * The link header, network header, TCP header, and TCP options
249 		 * all must fit in this mbuf. For now, assume the worst case of
250 		 * TCP options size. Eventually, compute this from tp flags.
251 		 */
252 		if (m->m_len + MAX_TCPOPTLEN + max_linkhdr >= MHLEN) {
253 			MCLGET(m, M_DONTWAIT);
254 			if ((m->m_flags & M_EXT) == 0) {
255 				m_free(m);
256 				return (0);
257 			}
258 		}
259 	}
260 
261 	switch(tp->pf) {
262 #ifdef INET
263 	case AF_INET:
264 		{
265 			struct ipovly *ipovly;
266 
267 			ipovly = mtod(m, struct ipovly *);
268 
269 			bzero(ipovly->ih_x1, sizeof ipovly->ih_x1);
270 			ipovly->ih_pr = IPPROTO_TCP;
271 			ipovly->ih_len = htons(sizeof (struct tcphdr));
272 			ipovly->ih_src = inp->inp_laddr;
273 			ipovly->ih_dst = inp->inp_faddr;
274 
275 			th = (struct tcphdr *)(mtod(m, caddr_t) +
276 				sizeof(struct ip));
277 			th->th_sum = in_cksum_phdr(ipovly->ih_src.s_addr,
278 			    ipovly->ih_dst.s_addr,
279 			    htons(sizeof (struct tcphdr) + IPPROTO_TCP));
280 		}
281 		break;
282 #endif /* INET */
283 #ifdef INET6
284 	case AF_INET6:
285 		{
286 			struct ip6_hdr *ip6;
287 
288 			ip6 = mtod(m, struct ip6_hdr *);
289 
290 			ip6->ip6_src = inp->inp_laddr6;
291 			ip6->ip6_dst = inp->inp_faddr6;
292 			ip6->ip6_flow = htonl(0x60000000) |
293 			    (inp->inp_flowinfo & IPV6_FLOWLABEL_MASK);
294 
295 			ip6->ip6_nxt = IPPROTO_TCP;
296 			ip6->ip6_plen = htons(sizeof(struct tcphdr)); /*XXX*/
297 			ip6->ip6_hlim = in6_selecthlim(inp, NULL);	/*XXX*/
298 
299 			th = (struct tcphdr *)(mtod(m, caddr_t) +
300 				sizeof(struct ip6_hdr));
301 			th->th_sum = 0;
302 		}
303 		break;
304 #endif /* INET6 */
305 	}
306 
307 	th->th_sport = inp->inp_lport;
308 	th->th_dport = inp->inp_fport;
309 	th->th_seq = 0;
310 	th->th_ack = 0;
311 	th->th_x2  = 0;
312 	th->th_off = 5;
313 	th->th_flags = 0;
314 	th->th_win = 0;
315 	th->th_urp = 0;
316 	return (m);
317 }
318 
319 /*
320  * Send a single message to the TCP at address specified by
321  * the given TCP/IP header.  If m == 0, then we make a copy
322  * of the tcpiphdr at ti and send directly to the addressed host.
323  * This is used to force keep alive messages out using the TCP
324  * template for a connection tp->t_template.  If flags are given
325  * then we send a message back to the TCP which originated the
326  * segment ti, and discard the mbuf containing it and any other
327  * attached mbufs.
328  *
329  * In any case the ack and sequence number of the transmitted
330  * segment are as specified by the parameters.
331  */
332 #ifdef INET6
333 /* This function looks hairy, because it was so IPv4-dependent. */
334 #endif /* INET6 */
335 void
336 tcp_respond(tp, template, m, ack, seq, flags)
337 	struct tcpcb *tp;
338 	caddr_t template;
339 	struct mbuf *m;
340 	tcp_seq ack, seq;
341 	int flags;
342 {
343 	int tlen;
344 	int win = 0;
345 	struct route *ro = 0;
346 	struct tcphdr *th;
347 	struct tcpiphdr *ti = (struct tcpiphdr *)template;
348 	int af;		/* af on wire */
349 
350 	if (tp) {
351 		win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
352 		/*
353 		 * If this is called with an unconnected
354 		 * socket/tp/pcb (tp->pf is 0), we lose.
355 		 */
356 		af = tp->pf;
357 
358 		/*
359 		 * The route/route6 distinction is meaningless
360 		 * unless you're allocating space or passing parameters.
361 		 */
362 		ro = &tp->t_inpcb->inp_route;
363 	} else
364 		af = (((struct ip *)ti)->ip_v == 6) ? AF_INET6 : AF_INET;
365 	if (m == 0) {
366 		m = m_gethdr(M_DONTWAIT, MT_HEADER);
367 		if (m == NULL)
368 			return;
369 #ifdef TCP_COMPAT_42
370 		tlen = 1;
371 #else
372 		tlen = 0;
373 #endif
374 		m->m_data += max_linkhdr;
375 		switch (af) {
376 #ifdef INET6
377 		case AF_INET6:
378 			bcopy(ti, mtod(m, caddr_t), sizeof(struct tcphdr) +
379 			    sizeof(struct ip6_hdr));
380 			break;
381 #endif /* INET6 */
382 		case AF_INET:
383 			bcopy(ti, mtod(m, caddr_t), sizeof(struct tcphdr) +
384 			    sizeof(struct ip));
385 			break;
386 		}
387 
388 		ti = mtod(m, struct tcpiphdr *);
389 		flags = TH_ACK;
390 	} else {
391 		m_freem(m->m_next);
392 		m->m_next = 0;
393 		m->m_data = (caddr_t)ti;
394 		tlen = 0;
395 #define xchg(a,b,type) do { type t; t=a; a=b; b=t; } while (0)
396 		switch (af) {
397 #ifdef INET6
398 		case AF_INET6:
399 			m->m_len = sizeof(struct tcphdr) + sizeof(struct ip6_hdr);
400 			xchg(((struct ip6_hdr *)ti)->ip6_dst,
401 			    ((struct ip6_hdr *)ti)->ip6_src, struct in6_addr);
402 			th = (void *)((caddr_t)ti + sizeof(struct ip6_hdr));
403 			break;
404 #endif /* INET6 */
405 		case AF_INET:
406 			m->m_len = sizeof (struct tcpiphdr);
407 			xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_int32_t);
408 			th = (void *)((caddr_t)ti + sizeof(struct ip));
409 			break;
410 		}
411 		xchg(th->th_dport, th->th_sport, u_int16_t);
412 #undef xchg
413 	}
414 	switch (af) {
415 #ifdef INET6
416 	case AF_INET6:
417 		tlen += sizeof(struct tcphdr) + sizeof(struct ip6_hdr);
418 		th = (struct tcphdr *)((caddr_t)ti + sizeof(struct ip6_hdr));
419 		break;
420 #endif /* INET6 */
421 	case AF_INET:
422 		ti->ti_len = htons((u_int16_t)(sizeof (struct tcphdr) + tlen));
423 		tlen += sizeof (struct tcpiphdr);
424 		th = (struct tcphdr *)((caddr_t)ti + sizeof(struct ip));
425 		break;
426 	}
427 
428 	m->m_len = tlen;
429 	m->m_pkthdr.len = tlen;
430 	m->m_pkthdr.rcvif = (struct ifnet *) 0;
431 	th->th_seq = htonl(seq);
432 	th->th_ack = htonl(ack);
433 	th->th_x2 = 0;
434 	th->th_off = sizeof (struct tcphdr) >> 2;
435 	th->th_flags = flags;
436 	if (tp)
437 		win >>= tp->rcv_scale;
438 	if (win > TCP_MAXWIN)
439 		win = TCP_MAXWIN;
440 	th->th_win = htons((u_int16_t)win);
441 	th->th_urp = 0;
442 
443 	switch (af) {
444 #ifdef INET6
445 	case AF_INET6:
446 		((struct ip6_hdr *)ti)->ip6_flow   = htonl(0x60000000);
447 		((struct ip6_hdr *)ti)->ip6_nxt  = IPPROTO_TCP;
448 		((struct ip6_hdr *)ti)->ip6_hlim =
449 			in6_selecthlim(tp ? tp->t_inpcb : NULL, NULL);	/*XXX*/
450 		((struct ip6_hdr *)ti)->ip6_plen = tlen - sizeof(struct ip6_hdr);
451 		th->th_sum = 0;
452 		th->th_sum = in6_cksum(m, IPPROTO_TCP,
453 		   sizeof(struct ip6_hdr), ((struct ip6_hdr *)ti)->ip6_plen);
454 		HTONS(((struct ip6_hdr *)ti)->ip6_plen);
455 		ip6_output(m, tp ? tp->t_inpcb->inp_outputopts6 : NULL,
456 		    (struct route_in6 *)ro, 0, NULL, NULL);
457 		break;
458 #endif /* INET6 */
459 	case AF_INET:
460 		bzero(ti->ti_x1, sizeof ti->ti_x1);
461 		ti->ti_len = htons((u_short)tlen - sizeof(struct ip));
462 
463 		/*
464 		 * There's no point deferring to hardware checksum processing
465 		 * here, as we only send a minimal TCP packet whose checksum
466 		 * we need to compute in any case.
467 		 */
468 		th->th_sum = 0;
469 		th->th_sum = in_cksum(m, tlen);
470 		((struct ip *)ti)->ip_len = htons(tlen);
471 		((struct ip *)ti)->ip_ttl = ip_defttl;
472 		ip_output(m, (void *)NULL, ro, ip_mtudisc ? IP_MTUDISC : 0,
473 			(void *)NULL, tp ? tp->t_inpcb : (void *)NULL);
474 	}
475 }
476 
477 /*
478  * Create a new TCP control block, making an
479  * empty reassembly queue and hooking it to the argument
480  * protocol control block.
481  */
482 struct tcpcb *
483 tcp_newtcpcb(struct inpcb *inp)
484 {
485 	struct tcpcb *tp;
486 	int i;
487 
488 	tp = pool_get(&tcpcb_pool, PR_NOWAIT);
489 	if (tp == NULL)
490 		return ((struct tcpcb *)0);
491 	bzero((char *) tp, sizeof(struct tcpcb));
492 	LIST_INIT(&tp->segq);
493 	tp->t_maxseg = tcp_mssdflt;
494 	tp->t_maxopd = 0;
495 
496 	TCP_INIT_DELACK(tp);
497 	for (i = 0; i < TCPT_NTIMERS; i++)
498 		TCP_TIMER_INIT(tp, i);
499 
500 #ifdef TCP_SACK
501 	tp->sack_enable = tcp_do_sack;
502 #endif
503 	tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0;
504 	tp->t_inpcb = inp;
505 	/*
506 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
507 	 * rtt estimate.  Set rttvar so that srtt + 2 * rttvar gives
508 	 * reasonable initial retransmit time.
509 	 */
510 	tp->t_srtt = TCPTV_SRTTBASE;
511 	tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
512 	tp->t_rttmin = TCPTV_MIN;
513 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
514 	    TCPTV_MIN, TCPTV_REXMTMAX);
515 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
516 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
517 #ifdef INET6
518 	/* we disallow IPv4 mapped address completely. */
519 	if ((inp->inp_flags & INP_IPV6) == 0)
520 		tp->pf = PF_INET;
521 	else
522 		tp->pf = PF_INET6;
523 #else
524 	tp->pf = PF_INET;
525 #endif
526 
527 #ifdef INET6
528 	if (inp->inp_flags & INP_IPV6)
529 		inp->inp_ipv6.ip6_hlim = ip6_defhlim;
530 	else
531 #endif /* INET6 */
532 		inp->inp_ip.ip_ttl = ip_defttl;
533 
534 	inp->inp_ppcb = (caddr_t)tp;
535 	return (tp);
536 }
537 
538 /*
539  * Drop a TCP connection, reporting
540  * the specified error.  If connection is synchronized,
541  * then send a RST to peer.
542  */
543 struct tcpcb *
544 tcp_drop(tp, errno)
545 	struct tcpcb *tp;
546 	int errno;
547 {
548 	struct socket *so = tp->t_inpcb->inp_socket;
549 
550 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
551 		tp->t_state = TCPS_CLOSED;
552 		(void) tcp_output(tp);
553 		tcpstat.tcps_drops++;
554 	} else
555 		tcpstat.tcps_conndrops++;
556 	if (errno == ETIMEDOUT && tp->t_softerror)
557 		errno = tp->t_softerror;
558 	so->so_error = errno;
559 	return (tcp_close(tp));
560 }
561 
562 /*
563  * Close a TCP control block:
564  *	discard all space held by the tcp
565  *	discard internet protocol block
566  *	wake up any sleepers
567  */
568 struct tcpcb *
569 tcp_close(struct tcpcb *tp)
570 {
571 	struct inpcb *inp = tp->t_inpcb;
572 	struct socket *so = inp->inp_socket;
573 #ifdef TCP_SACK
574 	struct sackhole *p, *q;
575 #endif
576 
577 	/* free the reassembly queue, if any */
578 	tcp_reass_lock(tp);
579 	tcp_freeq(tp);
580 	tcp_reass_unlock(tp);
581 
582 	tcp_canceltimers(tp);
583 	TCP_CLEAR_DELACK(tp);
584 	syn_cache_cleanup(tp);
585 
586 #ifdef TCP_SACK
587 	/* Free SACK holes. */
588 	q = p = tp->snd_holes;
589 	while (p != 0) {
590 		q = p->next;
591 		pool_put(&sackhl_pool, p);
592 		p = q;
593 	}
594 #endif
595 	if (tp->t_template)
596 		(void) m_free(tp->t_template);
597 	pool_put(&tcpcb_pool, tp);
598 	inp->inp_ppcb = 0;
599 	soisdisconnected(so);
600 	in_pcbdetach(inp);
601 	tcpstat.tcps_closed++;
602 	return ((struct tcpcb *)0);
603 }
604 
605 int
606 tcp_freeq(struct tcpcb *tp)
607 {
608 	struct ipqent *qe;
609 	int rv = 0;
610 
611 	while ((qe = LIST_FIRST(&tp->segq)) != NULL) {
612 		LIST_REMOVE(qe, ipqe_q);
613 		m_freem(qe->ipqe_m);
614 		pool_put(&tcpqe_pool, qe);
615 		rv = 1;
616 	}
617 	return (rv);
618 }
619 
620 void
621 tcp_drain()
622 {
623 	struct inpcb *inp;
624 
625 	/* called at splimp() */
626 	CIRCLEQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue) {
627 		struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
628 
629 		if (tp != NULL) {
630 			if (tcp_reass_lock_try(tp) == 0)
631 				continue;
632 			if (tcp_freeq(tp))
633 				tcpstat.tcps_conndrained++;
634 			tcp_reass_unlock(tp);
635 		}
636 	}
637 }
638 
639 /*
640  * Compute proper scaling value for receiver window from buffer space
641  */
642 
643 void
644 tcp_rscale(struct tcpcb *tp, u_long hiwat)
645 {
646 	tp->request_r_scale = 0;
647 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
648 	       TCP_MAXWIN << tp->request_r_scale < hiwat)
649 		tp->request_r_scale++;
650 }
651 
652 /*
653  * Notify a tcp user of an asynchronous error;
654  * store error as soft error, but wake up user
655  * (for now, won't do anything until can select for soft error).
656  */
657 void
658 tcp_notify(inp, error)
659 	struct inpcb *inp;
660 	int error;
661 {
662 	struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
663 	struct socket *so = inp->inp_socket;
664 
665 	/*
666 	 * Ignore some errors if we are hooked up.
667 	 * If connection hasn't completed, has retransmitted several times,
668 	 * and receives a second error, give up now.  This is better
669 	 * than waiting a long time to establish a connection that
670 	 * can never complete.
671 	 */
672 	if (tp->t_state == TCPS_ESTABLISHED &&
673 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
674 	      error == EHOSTDOWN)) {
675 		return;
676 	} else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
677 	    tp->t_rxtshift > 3 && tp->t_softerror)
678 		so->so_error = error;
679 	else
680 		tp->t_softerror = error;
681 	wakeup((caddr_t) &so->so_timeo);
682 	sorwakeup(so);
683 	sowwakeup(so);
684 }
685 
686 #ifdef INET6
687 void
688 tcp6_ctlinput(cmd, sa, d)
689 	int cmd;
690 	struct sockaddr *sa;
691 	void *d;
692 {
693 	struct tcphdr th;
694 	void (*notify)(struct inpcb *, int) = tcp_notify;
695 	struct ip6_hdr *ip6;
696 	const struct sockaddr_in6 *sa6_src = NULL;
697 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa;
698 	struct mbuf *m;
699 	int off;
700 	struct {
701 		u_int16_t th_sport;
702 		u_int16_t th_dport;
703 	} *thp;
704 
705 	if (sa->sa_family != AF_INET6 ||
706 	    sa->sa_len != sizeof(struct sockaddr_in6))
707 		return;
708 	if ((unsigned)cmd >= PRC_NCMDS)
709 		return;
710 	else if (cmd == PRC_QUENCH) {
711 		/* XXX there's no PRC_QUENCH in IPv6 */
712 		notify = tcp_quench;
713 	} else if (PRC_IS_REDIRECT(cmd))
714 		notify = in_rtchange, d = NULL;
715 	else if (cmd == PRC_MSGSIZE)
716 		; /* special code is present, see below */
717 	else if (cmd == PRC_HOSTDEAD)
718 		d = NULL;
719 	else if (inet6ctlerrmap[cmd] == 0)
720 		return;
721 
722 	/* if the parameter is from icmp6, decode it. */
723 	if (d != NULL) {
724 		struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
725 		m = ip6cp->ip6c_m;
726 		ip6 = ip6cp->ip6c_ip6;
727 		off = ip6cp->ip6c_off;
728 		sa6_src = ip6cp->ip6c_src;
729 	} else {
730 		m = NULL;
731 		ip6 = NULL;
732 		sa6_src = &sa6_any;
733 	}
734 
735 	if (ip6) {
736 		/*
737 		 * XXX: We assume that when ip6 is non NULL,
738 		 * M and OFF are valid.
739 		 */
740 
741 		/* check if we can safely examine src and dst ports */
742 		if (m->m_pkthdr.len < off + sizeof(*thp))
743 			return;
744 
745 		bzero(&th, sizeof(th));
746 #ifdef DIAGNOSTIC
747 		if (sizeof(*thp) > sizeof(th))
748 			panic("assumption failed in tcp6_ctlinput");
749 #endif
750 		m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
751 
752 		if (cmd == PRC_MSGSIZE) {
753 			int valid = 0;
754 
755 			/*
756 			 * Check to see if we have a valid TCP connection
757 			 * corresponding to the address in the ICMPv6 message
758 			 * payload.
759 			 */
760 			if (in6_pcbhashlookup(&tcbtable, &sa6->sin6_addr,
761 			    th.th_dport, (struct in6_addr *)&sa6_src->sin6_addr,
762 			    th.th_sport))
763 				valid++;
764 
765 			/*
766 			 * Depending on the value of "valid" and routing table
767 			 * size (mtudisc_{hi,lo}wat), we will:
768 			 * - recalcurate the new MTU and create the
769 			 *   corresponding routing entry, or
770 			 * - ignore the MTU change notification.
771 			 */
772 			icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
773 
774 			return;
775 		}
776 
777 		if (in6_pcbnotify(&tcbtable, sa, th.th_dport,
778 		    (struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify) == 0 &&
779 		    syn_cache_count &&
780 		    (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
781 		     inet6ctlerrmap[cmd] == ENETUNREACH ||
782 		     inet6ctlerrmap[cmd] == EHOSTDOWN))
783 			syn_cache_unreach((struct sockaddr *)sa6_src,
784 			    sa, &th);
785 	} else {
786 		(void) in6_pcbnotify(&tcbtable, sa, 0,
787 		    (struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
788 	}
789 }
790 #endif
791 
792 void *
793 tcp_ctlinput(cmd, sa, v)
794 	int cmd;
795 	struct sockaddr *sa;
796 	void *v;
797 {
798 	struct ip *ip = v;
799 	struct tcphdr *th;
800 	extern int inetctlerrmap[];
801 	void (*notify)(struct inpcb *, int) = tcp_notify;
802 	int errno;
803 
804 	if (sa->sa_family != AF_INET)
805 		return NULL;
806 
807 	if ((unsigned)cmd >= PRC_NCMDS)
808 		return NULL;
809 	errno = inetctlerrmap[cmd];
810 	if (cmd == PRC_QUENCH)
811 		notify = tcp_quench;
812 	else if (PRC_IS_REDIRECT(cmd))
813 		notify = in_rtchange, ip = 0;
814 	else if (cmd == PRC_MSGSIZE && ip_mtudisc) {
815 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
816 		/*
817 		 * Verify that the packet in the icmp payload refers
818 		 * to an existing TCP connection.
819 		 */
820 		/*
821 		 * XXX is it possible to get a valid PRC_MSGSIZE error for
822 		 * a non-established connection?
823 		 */
824 		if (in_pcbhashlookup(&tcbtable,
825 		    ip->ip_dst, th->th_dport, ip->ip_src, th->th_sport)) {
826 			struct icmp *icp;
827 			icp = (struct icmp *)((caddr_t)ip -
828 					      offsetof(struct icmp, icmp_ip));
829 
830 			/* Calculate new mtu and create corresponding route */
831 			icmp_mtudisc(icp);
832 		}
833 		notify = tcp_mtudisc, ip = 0;
834 	} else if (cmd == PRC_MTUINC)
835 		notify = tcp_mtudisc_increase, ip = 0;
836 	else if (cmd == PRC_HOSTDEAD)
837 		ip = 0;
838 	else if (errno == 0)
839 		return NULL;
840 
841 	if (ip) {
842 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
843 		if (in_pcbnotify(&tcbtable, sa, th->th_dport, ip->ip_src,
844 		    th->th_sport, errno, notify) == 0 &&
845 		    syn_cache_count &&
846 		    (inetctlerrmap[cmd] == EHOSTUNREACH ||
847 		     inetctlerrmap[cmd] == ENETUNREACH ||
848 		     inetctlerrmap[cmd] == EHOSTDOWN)) {
849 			struct sockaddr_in sin;
850 
851 			bzero(&sin, sizeof(sin));
852 			sin.sin_len = sizeof(sin);
853 			sin.sin_family = AF_INET;
854 			sin.sin_port = th->th_sport;
855 			sin.sin_addr = ip->ip_src;
856 			syn_cache_unreach((struct sockaddr *)&sin,
857 			    sa, th);
858 		}
859 	} else
860 		in_pcbnotifyall(&tcbtable, sa, errno, notify);
861 
862 	return NULL;
863 }
864 
865 /*
866  * When a source quench is received, close congestion window
867  * to one segment.  We will gradually open it again as we proceed.
868  */
869 void
870 tcp_quench(inp, errno)
871 	struct inpcb *inp;
872 	int errno;
873 {
874 	struct tcpcb *tp = intotcpcb(inp);
875 
876 	if (tp)
877 		tp->snd_cwnd = tp->t_maxseg;
878 }
879 
880 #ifdef INET6
881 /*
882  * Path MTU Discovery handlers.
883  */
884 void
885 tcp6_mtudisc_callback(faddr)
886 	struct in6_addr *faddr;
887 {
888 	struct sockaddr_in6 sin6;
889 
890 	bzero(&sin6, sizeof(sin6));
891 	sin6.sin6_family = AF_INET6;
892 	sin6.sin6_len = sizeof(struct sockaddr_in6);
893 	sin6.sin6_addr = *faddr;
894 	(void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
895 	    (struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp_mtudisc);
896 }
897 #endif /* INET6 */
898 
899 /*
900  * On receipt of path MTU corrections, flush old route and replace it
901  * with the new one.  Retransmit all unacknowledged packets, to ensure
902  * that all packets will be received.
903  */
904 void
905 tcp_mtudisc(inp, errno)
906 	struct inpcb *inp;
907 	int errno;
908 {
909 	struct tcpcb *tp = intotcpcb(inp);
910 	struct rtentry *rt = in_pcbrtentry(inp);
911 	int change = 0;
912 
913 	if (tp != 0) {
914 		int orig_maxseg = tp->t_maxseg;
915 		if (rt != 0) {
916 			/*
917 			 * If this was not a host route, remove and realloc.
918 			 */
919 			if ((rt->rt_flags & RTF_HOST) == 0) {
920 				in_rtchange(inp, errno);
921 				if ((rt = in_pcbrtentry(inp)) == 0)
922 					return;
923 			}
924 			if (orig_maxseg != tp->t_maxseg ||
925 			    (rt->rt_rmx.rmx_locks & RTV_MTU))
926 				change = 1;
927 		}
928 		tcp_mss(tp, -1);
929 
930 		/*
931 		 * Resend unacknowledged packets
932 		 */
933 		tp->snd_nxt = tp->snd_una;
934 		if (change || errno > 0)
935 			tcp_output(tp);
936 	}
937 }
938 
939 void
940 tcp_mtudisc_increase(inp, errno)
941 	struct inpcb *inp;
942 	int errno;
943 {
944 	struct tcpcb *tp = intotcpcb(inp);
945 	struct rtentry *rt = in_pcbrtentry(inp);
946 
947 	if (tp != 0 && rt != 0) {
948 		/*
949 		 * If this was a host route, remove and realloc.
950 		 */
951 		if (rt->rt_flags & RTF_HOST)
952 			in_rtchange(inp, errno);
953 
954 		/* also takes care of congestion window */
955 		tcp_mss(tp, -1);
956 	}
957 }
958 
959 #ifdef TCP_SIGNATURE
960 int
961 tcp_signature_tdb_attach()
962 {
963 	return (0);
964 }
965 
966 int
967 tcp_signature_tdb_init(tdbp, xsp, ii)
968 	struct tdb *tdbp;
969 	struct xformsw *xsp;
970 	struct ipsecinit *ii;
971 {
972 	if ((ii->ii_authkeylen < 1) || (ii->ii_authkeylen > 80))
973 		return (EINVAL);
974 
975 	tdbp->tdb_amxkey = malloc(ii->ii_authkeylen, M_XDATA, M_DONTWAIT);
976 	if (tdbp->tdb_amxkey == NULL)
977 		return (ENOMEM);
978 	bcopy(ii->ii_authkey, tdbp->tdb_amxkey, ii->ii_authkeylen);
979 	tdbp->tdb_amxkeylen = ii->ii_authkeylen;
980 
981 	return (0);
982 }
983 
984 int
985 tcp_signature_tdb_zeroize(tdbp)
986 	struct tdb *tdbp;
987 {
988 	if (tdbp->tdb_amxkey) {
989 		bzero(tdbp->tdb_amxkey, tdbp->tdb_amxkeylen);
990 		free(tdbp->tdb_amxkey, M_XDATA);
991 		tdbp->tdb_amxkey = NULL;
992 	}
993 
994 	return (0);
995 }
996 
997 int
998 tcp_signature_tdb_input(m, tdbp, skip, protoff)
999 	struct mbuf *m;
1000 	struct tdb *tdbp;
1001 	int skip, protoff;
1002 {
1003 	return (0);
1004 }
1005 
1006 int
1007 tcp_signature_tdb_output(m, tdbp, mp, skip, protoff)
1008 	struct mbuf *m;
1009 	struct tdb *tdbp;
1010 	struct mbuf **mp;
1011 	int skip, protoff;
1012 {
1013 	return (EINVAL);
1014 }
1015 
1016 int
1017 tcp_signature_apply(fstate, data, len)
1018 	caddr_t fstate;
1019 	caddr_t data;
1020 	unsigned int len;
1021 {
1022 	MD5Update((MD5_CTX *)fstate, (char *)data, len);
1023 	return 0;
1024 }
1025 #endif /* TCP_SIGNATURE */
1026 
1027 #define TCP_RNDISS_ROUNDS	16
1028 #define TCP_RNDISS_OUT	7200
1029 #define TCP_RNDISS_MAX	30000
1030 
1031 u_int8_t tcp_rndiss_sbox[128];
1032 u_int16_t tcp_rndiss_msb;
1033 u_int16_t tcp_rndiss_cnt;
1034 long tcp_rndiss_reseed;
1035 
1036 u_int16_t
1037 tcp_rndiss_encrypt(val)
1038 	u_int16_t val;
1039 {
1040 	u_int16_t sum = 0, i;
1041 
1042 	for (i = 0; i < TCP_RNDISS_ROUNDS; i++) {
1043 		sum += 0x79b9;
1044 		val ^= ((u_int16_t)tcp_rndiss_sbox[(val^sum) & 0x7f]) << 7;
1045 		val = ((val & 0xff) << 7) | (val >> 8);
1046 	}
1047 
1048 	return val;
1049 }
1050 
1051 void
1052 tcp_rndiss_init()
1053 {
1054 	get_random_bytes(tcp_rndiss_sbox, sizeof(tcp_rndiss_sbox));
1055 
1056 	tcp_rndiss_reseed = time.tv_sec + TCP_RNDISS_OUT;
1057 	tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000;
1058 	tcp_rndiss_cnt = 0;
1059 }
1060 
1061 tcp_seq
1062 tcp_rndiss_next()
1063 {
1064         if (tcp_rndiss_cnt >= TCP_RNDISS_MAX ||
1065 	    time.tv_sec > tcp_rndiss_reseed)
1066                 tcp_rndiss_init();
1067 
1068 	/* (arc4random() & 0x7fff) ensures a 32768 byte gap between ISS */
1069 	return ((tcp_rndiss_encrypt(tcp_rndiss_cnt++) | tcp_rndiss_msb) <<16) |
1070 		(arc4random() & 0x7fff);
1071 }
1072 
1073