xref: /openbsd-src/sys/netinet/tcp_subr.c (revision 898184e3e61f9129feb5978fad5a8c6865f00b92)
1 /*	$OpenBSD: tcp_subr.c,v 1.114 2012/12/28 17:52:06 gsoares Exp $	*/
2 /*	$NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
33  *
34  * NRL grants permission for redistribution and use in source and binary
35  * forms, with or without modification, of the software and documentation
36  * created at NRL provided that the following conditions are met:
37  *
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. All advertising materials mentioning features or use of this software
44  *    must display the following acknowledgements:
45  * 	This product includes software developed by the University of
46  * 	California, Berkeley and its contributors.
47  * 	This product includes software developed at the Information
48  * 	Technology Division, US Naval Research Laboratory.
49  * 4. Neither the name of the NRL nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
57  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  *
65  * The views and conclusions contained in the software and documentation
66  * are those of the authors and should not be interpreted as representing
67  * official policies, either expressed or implied, of the US Naval
68  * Research Laboratory (NRL).
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/protosw.h>
78 #include <sys/kernel.h>
79 #include <sys/pool.h>
80 
81 #include <net/route.h>
82 #include <net/if.h>
83 
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/ip_var.h>
89 #include <netinet/ip_icmp.h>
90 #include <netinet/tcp.h>
91 #include <netinet/tcp_fsm.h>
92 #include <netinet/tcp_seq.h>
93 #include <netinet/tcp_timer.h>
94 #include <netinet/tcp_var.h>
95 #include <netinet/tcpip.h>
96 #include <dev/rndvar.h>
97 
98 #ifdef INET6
99 #include <netinet6/in6_var.h>
100 #include <netinet6/ip6protosw.h>
101 #endif /* INET6 */
102 
103 #include <crypto/md5.h>
104 
105 /* patchable/settable parameters for tcp */
106 int	tcp_mssdflt = TCP_MSS;
107 int	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
108 
109 /* values controllable via sysctl */
110 int	tcp_do_rfc1323 = 1;
111 #ifdef TCP_SACK
112 int	tcp_do_sack = 1;	/* RFC 2018 selective ACKs */
113 #endif
114 int	tcp_ack_on_push = 0;	/* set to enable immediate ACK-on-PUSH */
115 #ifdef TCP_ECN
116 int	tcp_do_ecn = 0;		/* RFC3168 ECN enabled/disabled? */
117 #endif
118 int	tcp_do_rfc3390 = 2;	/* Increase TCP's Initial Window to 10*mss */
119 
120 u_int32_t	tcp_now = 1;
121 
122 #ifndef TCBHASHSIZE
123 #define	TCBHASHSIZE	128
124 #endif
125 int	tcbhashsize = TCBHASHSIZE;
126 
127 /* syn hash parameters */
128 #define	TCP_SYN_HASH_SIZE	293
129 #define	TCP_SYN_BUCKET_SIZE	35
130 int	tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
131 int	tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
132 int	tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
133 struct	syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
134 
135 int tcp_reass_limit = NMBCLUSTERS / 2; /* hardlimit for tcpqe_pool */
136 #ifdef TCP_SACK
137 int tcp_sackhole_limit = 32*1024; /* hardlimit for sackhl_pool */
138 #endif
139 
140 #ifdef INET6
141 extern int ip6_defhlim;
142 #endif /* INET6 */
143 
144 struct pool tcpcb_pool;
145 struct pool tcpqe_pool;
146 #ifdef TCP_SACK
147 struct pool sackhl_pool;
148 #endif
149 
150 struct tcpstat tcpstat;		/* tcp statistics */
151 tcp_seq  tcp_iss;
152 
153 /*
154  * Tcp initialization
155  */
156 void
157 tcp_init()
158 {
159 	tcp_iss = 1;		/* wrong */
160 	pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
161 	    NULL);
162 	pool_init(&tcpqe_pool, sizeof(struct tcpqent), 0, 0, 0, "tcpqepl",
163 	    NULL);
164 	pool_sethardlimit(&tcpqe_pool, tcp_reass_limit, NULL, 0);
165 #ifdef TCP_SACK
166 	pool_init(&sackhl_pool, sizeof(struct sackhole), 0, 0, 0, "sackhlpl",
167 	    NULL);
168 	pool_sethardlimit(&sackhl_pool, tcp_sackhole_limit, NULL, 0);
169 #endif /* TCP_SACK */
170 	in_pcbinit(&tcbtable, tcbhashsize);
171 
172 #ifdef INET6
173 	/*
174 	 * Since sizeof(struct ip6_hdr) > sizeof(struct ip), we
175 	 * do max length checks/computations only on the former.
176 	 */
177 	if (max_protohdr < (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)))
178 		max_protohdr = (sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
179 	if ((max_linkhdr + sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) >
180 	    MHLEN)
181 		panic("tcp_init");
182 
183 	icmp6_mtudisc_callback_register(tcp6_mtudisc_callback);
184 #endif /* INET6 */
185 
186 	/* Initialize the compressed state engine. */
187 	syn_cache_init();
188 
189 	/* Initialize timer state. */
190 	tcp_timer_init();
191 }
192 
193 /*
194  * Create template to be used to send tcp packets on a connection.
195  * Call after host entry created, allocates an mbuf and fills
196  * in a skeletal tcp/ip header, minimizing the amount of work
197  * necessary when the connection is used.
198  *
199  * To support IPv6 in addition to IPv4 and considering that the sizes of
200  * the IPv4 and IPv6 headers are not the same, we now use a separate pointer
201  * for the TCP header.  Also, we made the former tcpiphdr header pointer
202  * into just an IP overlay pointer, with casting as appropriate for v6. rja
203  */
204 struct mbuf *
205 tcp_template(tp)
206 	struct tcpcb *tp;
207 {
208 	struct inpcb *inp = tp->t_inpcb;
209 	struct mbuf *m;
210 	struct tcphdr *th;
211 
212 	if ((m = tp->t_template) == 0) {
213 		m = m_get(M_DONTWAIT, MT_HEADER);
214 		if (m == NULL)
215 			return (0);
216 
217 		switch (tp->pf) {
218 		case 0:	/*default to PF_INET*/
219 #ifdef INET
220 		case AF_INET:
221 			m->m_len = sizeof(struct ip);
222 			break;
223 #endif /* INET */
224 #ifdef INET6
225 		case AF_INET6:
226 			m->m_len = sizeof(struct ip6_hdr);
227 			break;
228 #endif /* INET6 */
229 		}
230 		m->m_len += sizeof (struct tcphdr);
231 
232 		/*
233 		 * The link header, network header, TCP header, and TCP options
234 		 * all must fit in this mbuf. For now, assume the worst case of
235 		 * TCP options size. Eventually, compute this from tp flags.
236 		 */
237 		if (m->m_len + MAX_TCPOPTLEN + max_linkhdr >= MHLEN) {
238 			MCLGET(m, M_DONTWAIT);
239 			if ((m->m_flags & M_EXT) == 0) {
240 				m_free(m);
241 				return (0);
242 			}
243 		}
244 	}
245 
246 	switch(tp->pf) {
247 #ifdef INET
248 	case AF_INET:
249 		{
250 			struct ipovly *ipovly;
251 
252 			ipovly = mtod(m, struct ipovly *);
253 
254 			bzero(ipovly->ih_x1, sizeof ipovly->ih_x1);
255 			ipovly->ih_pr = IPPROTO_TCP;
256 			ipovly->ih_len = htons(sizeof (struct tcphdr));
257 			ipovly->ih_src = inp->inp_laddr;
258 			ipovly->ih_dst = inp->inp_faddr;
259 
260 			th = (struct tcphdr *)(mtod(m, caddr_t) +
261 				sizeof(struct ip));
262 			th->th_sum = in_cksum_phdr(ipovly->ih_src.s_addr,
263 			    ipovly->ih_dst.s_addr,
264 			    htons(sizeof (struct tcphdr) + IPPROTO_TCP));
265 		}
266 		break;
267 #endif /* INET */
268 #ifdef INET6
269 	case AF_INET6:
270 		{
271 			struct ip6_hdr *ip6;
272 
273 			ip6 = mtod(m, struct ip6_hdr *);
274 
275 			ip6->ip6_src = inp->inp_laddr6;
276 			ip6->ip6_dst = inp->inp_faddr6;
277 			ip6->ip6_flow = htonl(0x60000000) |
278 			    (inp->inp_flowinfo & IPV6_FLOWLABEL_MASK);
279 
280 			ip6->ip6_nxt = IPPROTO_TCP;
281 			ip6->ip6_plen = htons(sizeof(struct tcphdr)); /*XXX*/
282 			ip6->ip6_hlim = in6_selecthlim(inp, NULL);	/*XXX*/
283 
284 			th = (struct tcphdr *)(mtod(m, caddr_t) +
285 				sizeof(struct ip6_hdr));
286 			th->th_sum = 0;
287 		}
288 		break;
289 #endif /* INET6 */
290 	}
291 
292 	th->th_sport = inp->inp_lport;
293 	th->th_dport = inp->inp_fport;
294 	th->th_seq = 0;
295 	th->th_ack = 0;
296 	th->th_x2  = 0;
297 	th->th_off = 5;
298 	th->th_flags = 0;
299 	th->th_win = 0;
300 	th->th_urp = 0;
301 	return (m);
302 }
303 
304 /*
305  * Send a single message to the TCP at address specified by
306  * the given TCP/IP header.  If m == 0, then we make a copy
307  * of the tcpiphdr at ti and send directly to the addressed host.
308  * This is used to force keep alive messages out using the TCP
309  * template for a connection tp->t_template.  If flags are given
310  * then we send a message back to the TCP which originated the
311  * segment ti, and discard the mbuf containing it and any other
312  * attached mbufs.
313  *
314  * In any case the ack and sequence number of the transmitted
315  * segment are as specified by the parameters.
316  */
317 #ifdef INET6
318 /* This function looks hairy, because it was so IPv4-dependent. */
319 #endif /* INET6 */
320 void
321 tcp_respond(struct tcpcb *tp, caddr_t template, struct tcphdr *th0,
322     tcp_seq ack, tcp_seq seq, int flags, u_int rtableid)
323 {
324 	int tlen;
325 	int win = 0;
326 	struct mbuf *m = 0;
327 	struct route *ro = 0;
328 	struct tcphdr *th;
329 	struct ip *ip;
330 	struct ipovly *ih;
331 #ifdef INET6
332 	struct ip6_hdr *ip6;
333 #endif
334 	int af;		/* af on wire */
335 
336 	if (tp) {
337 		win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
338 		/*
339 		 * If this is called with an unconnected
340 		 * socket/tp/pcb (tp->pf is 0), we lose.
341 		 */
342 		af = tp->pf;
343 
344 		/*
345 		 * The route/route6 distinction is meaningless
346 		 * unless you're allocating space or passing parameters.
347 		 */
348 		ro = &tp->t_inpcb->inp_route;
349 	} else
350 		af = (((struct ip *)template)->ip_v == 6) ? AF_INET6 : AF_INET;
351 
352 	m = m_gethdr(M_DONTWAIT, MT_HEADER);
353 	if (m == NULL)
354 		return;
355 	m->m_data += max_linkhdr;
356 	tlen = 0;
357 
358 #define xchg(a,b,type) do { type t; t=a; a=b; b=t; } while (0)
359 	switch (af) {
360 #ifdef INET6
361 	case AF_INET6:
362 		ip6 = mtod(m, struct ip6_hdr *);
363 		th = (struct tcphdr *)(ip6 + 1);
364 		tlen = sizeof(*ip6) + sizeof(*th);
365 		if (th0) {
366 			bcopy(template, ip6, sizeof(*ip6));
367 			bcopy(th0, th, sizeof(*th));
368 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
369 		} else {
370 			bcopy(template, ip6, tlen);
371 		}
372 		break;
373 #endif /* INET6 */
374 	case AF_INET:
375 		ip = mtod(m, struct ip *);
376 		th = (struct tcphdr *)(ip + 1);
377 		tlen = sizeof(*ip) + sizeof(*th);
378 		if (th0) {
379 			bcopy(template, ip, sizeof(*ip));
380 			bcopy(th0, th, sizeof(*th));
381 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, u_int32_t);
382 		} else {
383 			bcopy(template, ip, tlen);
384 		}
385 		break;
386 	}
387 	if (th0)
388 		xchg(th->th_dport, th->th_sport, u_int16_t);
389 	else
390 		flags = TH_ACK;
391 #undef xchg
392 
393 	m->m_len = tlen;
394 	m->m_pkthdr.len = tlen;
395 	m->m_pkthdr.rcvif = (struct ifnet *) 0;
396 	th->th_seq = htonl(seq);
397 	th->th_ack = htonl(ack);
398 	th->th_x2 = 0;
399 	th->th_off = sizeof (struct tcphdr) >> 2;
400 	th->th_flags = flags;
401 	if (tp)
402 		win >>= tp->rcv_scale;
403 	if (win > TCP_MAXWIN)
404 		win = TCP_MAXWIN;
405 	th->th_win = htons((u_int16_t)win);
406 	th->th_urp = 0;
407 
408 	/* force routing domain */
409 	if (tp)
410 		m->m_pkthdr.rdomain = tp->t_inpcb->inp_rtableid;
411 	else
412 		m->m_pkthdr.rdomain = rtableid;
413 
414 	switch (af) {
415 #ifdef INET6
416 	case AF_INET6:
417 		ip6->ip6_flow = htonl(0x60000000);
418 		ip6->ip6_nxt  = IPPROTO_TCP;
419 		ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, NULL);	/*XXX*/
420 		ip6->ip6_plen = tlen - sizeof(struct ip6_hdr);
421 		th->th_sum = 0;
422 		th->th_sum = in6_cksum(m, IPPROTO_TCP,
423 		   sizeof(struct ip6_hdr), ip6->ip6_plen);
424 		HTONS(ip6->ip6_plen);
425 		ip6_output(m, tp ? tp->t_inpcb->inp_outputopts6 : NULL,
426 		    (struct route_in6 *)ro, 0, NULL, NULL,
427 		    tp ? tp->t_inpcb : NULL);
428 		break;
429 #endif /* INET6 */
430 	case AF_INET:
431 		ih = (struct ipovly *)ip;
432 		bzero(ih->ih_x1, sizeof ih->ih_x1);
433 		ih->ih_len = htons((u_short)tlen - sizeof(struct ip));
434 
435 		/*
436 		 * There's no point deferring to hardware checksum processing
437 		 * here, as we only send a minimal TCP packet whose checksum
438 		 * we need to compute in any case.
439 		 */
440 		th->th_sum = 0;
441 		th->th_sum = in_cksum(m, tlen);
442 		ip->ip_len = htons(tlen);
443 		ip->ip_ttl = ip_defttl;
444 		ip_output(m, (void *)NULL, ro, ip_mtudisc ? IP_MTUDISC : 0,
445 			(void *)NULL, tp ? tp->t_inpcb : (void *)NULL);
446 	}
447 }
448 
449 /*
450  * Create a new TCP control block, making an
451  * empty reassembly queue and hooking it to the argument
452  * protocol control block.
453  */
454 struct tcpcb *
455 tcp_newtcpcb(struct inpcb *inp)
456 {
457 	struct tcpcb *tp;
458 	int i;
459 
460 	tp = pool_get(&tcpcb_pool, PR_NOWAIT|PR_ZERO);
461 	if (tp == NULL)
462 		return ((struct tcpcb *)0);
463 	TAILQ_INIT(&tp->t_segq);
464 	tp->t_maxseg = tcp_mssdflt;
465 	tp->t_maxopd = 0;
466 
467 	TCP_INIT_DELACK(tp);
468 	for (i = 0; i < TCPT_NTIMERS; i++)
469 		TCP_TIMER_INIT(tp, i);
470 	timeout_set(&tp->t_reap_to, tcp_reaper, tp);
471 
472 #ifdef TCP_SACK
473 	tp->sack_enable = tcp_do_sack;
474 #endif
475 	tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0;
476 	tp->t_inpcb = inp;
477 	/*
478 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
479 	 * rtt estimate.  Set rttvar so that srtt + 2 * rttvar gives
480 	 * reasonable initial retransmit time.
481 	 */
482 	tp->t_srtt = TCPTV_SRTTBASE;
483 	tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ <<
484 	    (TCP_RTTVAR_SHIFT + TCP_RTT_BASE_SHIFT - 1);
485 	tp->t_rttmin = TCPTV_MIN;
486 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
487 	    TCPTV_MIN, TCPTV_REXMTMAX);
488 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
489 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
490 
491 	tp->t_pmtud_mtu_sent = 0;
492 	tp->t_pmtud_mss_acked = 0;
493 
494 #ifdef INET6
495 	/* we disallow IPv4 mapped address completely. */
496 	if ((inp->inp_flags & INP_IPV6) == 0)
497 		tp->pf = PF_INET;
498 	else
499 		tp->pf = PF_INET6;
500 #else
501 	tp->pf = PF_INET;
502 #endif
503 
504 #ifdef INET6
505 	if (inp->inp_flags & INP_IPV6)
506 		inp->inp_ipv6.ip6_hlim = ip6_defhlim;
507 	else
508 #endif /* INET6 */
509 		inp->inp_ip.ip_ttl = ip_defttl;
510 
511 	inp->inp_ppcb = (caddr_t)tp;
512 	return (tp);
513 }
514 
515 /*
516  * Drop a TCP connection, reporting
517  * the specified error.  If connection is synchronized,
518  * then send a RST to peer.
519  */
520 struct tcpcb *
521 tcp_drop(tp, errno)
522 	struct tcpcb *tp;
523 	int errno;
524 {
525 	struct socket *so = tp->t_inpcb->inp_socket;
526 
527 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
528 		tp->t_state = TCPS_CLOSED;
529 		(void) tcp_output(tp);
530 		tcpstat.tcps_drops++;
531 	} else
532 		tcpstat.tcps_conndrops++;
533 	if (errno == ETIMEDOUT && tp->t_softerror)
534 		errno = tp->t_softerror;
535 	so->so_error = errno;
536 	return (tcp_close(tp));
537 }
538 
539 /*
540  * Close a TCP control block:
541  *	discard all space held by the tcp
542  *	discard internet protocol block
543  *	wake up any sleepers
544  */
545 struct tcpcb *
546 tcp_close(struct tcpcb *tp)
547 {
548 	struct inpcb *inp = tp->t_inpcb;
549 	struct socket *so = inp->inp_socket;
550 #ifdef TCP_SACK
551 	struct sackhole *p, *q;
552 #endif
553 
554 	/* free the reassembly queue, if any */
555 	tcp_freeq(tp);
556 
557 	tcp_canceltimers(tp);
558 	TCP_CLEAR_DELACK(tp);
559 	syn_cache_cleanup(tp);
560 
561 #ifdef TCP_SACK
562 	/* Free SACK holes. */
563 	q = p = tp->snd_holes;
564 	while (p != 0) {
565 		q = p->next;
566 		pool_put(&sackhl_pool, p);
567 		p = q;
568 	}
569 #endif
570 	if (tp->t_template)
571 		(void) m_free(tp->t_template);
572 
573 	tp->t_flags |= TF_DEAD;
574 	timeout_add(&tp->t_reap_to, 0);
575 
576 	inp->inp_ppcb = 0;
577 	soisdisconnected(so);
578 	in_pcbdetach(inp);
579 	return ((struct tcpcb *)0);
580 }
581 
582 void
583 tcp_reaper(void *arg)
584 {
585 	struct tcpcb *tp = arg;
586 	int s;
587 
588 	s = splsoftnet();
589 	pool_put(&tcpcb_pool, tp);
590 	splx(s);
591 	tcpstat.tcps_closed++;
592 }
593 
594 int
595 tcp_freeq(struct tcpcb *tp)
596 {
597 	struct tcpqent *qe;
598 	int rv = 0;
599 
600 	while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
601 		TAILQ_REMOVE(&tp->t_segq, qe, tcpqe_q);
602 		m_freem(qe->tcpqe_m);
603 		pool_put(&tcpqe_pool, qe);
604 		rv = 1;
605 	}
606 	return (rv);
607 }
608 
609 /*
610  * Compute proper scaling value for receiver window from buffer space
611  */
612 
613 void
614 tcp_rscale(struct tcpcb *tp, u_long hiwat)
615 {
616 	tp->request_r_scale = 0;
617 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
618 	       TCP_MAXWIN << tp->request_r_scale < hiwat)
619 		tp->request_r_scale++;
620 }
621 
622 /*
623  * Notify a tcp user of an asynchronous error;
624  * store error as soft error, but wake up user
625  * (for now, won't do anything until can select for soft error).
626  */
627 void
628 tcp_notify(inp, error)
629 	struct inpcb *inp;
630 	int error;
631 {
632 	struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
633 	struct socket *so = inp->inp_socket;
634 
635 	/*
636 	 * Ignore some errors if we are hooked up.
637 	 * If connection hasn't completed, has retransmitted several times,
638 	 * and receives a second error, give up now.  This is better
639 	 * than waiting a long time to establish a connection that
640 	 * can never complete.
641 	 */
642 	if (tp->t_state == TCPS_ESTABLISHED &&
643 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
644 	      error == EHOSTDOWN)) {
645 		return;
646 	} else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
647 	    tp->t_rxtshift > 3 && tp->t_softerror)
648 		so->so_error = error;
649 	else
650 		tp->t_softerror = error;
651 	wakeup((caddr_t) &so->so_timeo);
652 	sorwakeup(so);
653 	sowwakeup(so);
654 }
655 
656 #ifdef INET6
657 void
658 tcp6_ctlinput(cmd, sa, d)
659 	int cmd;
660 	struct sockaddr *sa;
661 	void *d;
662 {
663 	struct tcphdr th;
664 	struct tcpcb *tp;
665 	void (*notify)(struct inpcb *, int) = tcp_notify;
666 	struct ip6_hdr *ip6;
667 	const struct sockaddr_in6 *sa6_src = NULL;
668 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa;
669 	struct inpcb *inp;
670 	struct mbuf *m;
671 	tcp_seq seq;
672 	int off;
673 	struct {
674 		u_int16_t th_sport;
675 		u_int16_t th_dport;
676 		u_int32_t th_seq;
677 	} *thp;
678 
679 	if (sa->sa_family != AF_INET6 ||
680 	    sa->sa_len != sizeof(struct sockaddr_in6) ||
681 	    IN6_IS_ADDR_UNSPECIFIED(&sa6->sin6_addr) ||
682 	    IN6_IS_ADDR_V4MAPPED(&sa6->sin6_addr))
683 		return;
684 	if ((unsigned)cmd >= PRC_NCMDS)
685 		return;
686 	else if (cmd == PRC_QUENCH) {
687 		/*
688 		 * Don't honor ICMP Source Quench messages meant for
689 		 * TCP connections.
690 		 */
691 		/* XXX there's no PRC_QUENCH in IPv6 */
692 		return;
693 	} else if (PRC_IS_REDIRECT(cmd))
694 		notify = in_rtchange, d = NULL;
695 	else if (cmd == PRC_MSGSIZE)
696 		; /* special code is present, see below */
697 	else if (cmd == PRC_HOSTDEAD)
698 		d = NULL;
699 	else if (inet6ctlerrmap[cmd] == 0)
700 		return;
701 
702 	/* if the parameter is from icmp6, decode it. */
703 	if (d != NULL) {
704 		struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
705 		m = ip6cp->ip6c_m;
706 		ip6 = ip6cp->ip6c_ip6;
707 		off = ip6cp->ip6c_off;
708 		sa6_src = ip6cp->ip6c_src;
709 	} else {
710 		m = NULL;
711 		ip6 = NULL;
712 		sa6_src = &sa6_any;
713 	}
714 
715 	if (ip6) {
716 		/*
717 		 * XXX: We assume that when ip6 is non NULL,
718 		 * M and OFF are valid.
719 		 */
720 
721 		/* check if we can safely examine src and dst ports */
722 		if (m->m_pkthdr.len < off + sizeof(*thp))
723 			return;
724 
725 		bzero(&th, sizeof(th));
726 #ifdef DIAGNOSTIC
727 		if (sizeof(*thp) > sizeof(th))
728 			panic("assumption failed in tcp6_ctlinput");
729 #endif
730 		m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
731 
732 		/*
733 		 * Check to see if we have a valid TCP connection
734 		 * corresponding to the address in the ICMPv6 message
735 		 * payload.
736 		 */
737 		inp = in6_pcbhashlookup(&tcbtable, &sa6->sin6_addr,
738 		    th.th_dport, (struct in6_addr *)&sa6_src->sin6_addr,
739 		    th.th_sport);
740 		if (cmd == PRC_MSGSIZE) {
741 			/*
742 			 * Depending on the value of "valid" and routing table
743 			 * size (mtudisc_{hi,lo}wat), we will:
744 			 * - recalcurate the new MTU and create the
745 			 *   corresponding routing entry, or
746 			 * - ignore the MTU change notification.
747 			 */
748 			icmp6_mtudisc_update((struct ip6ctlparam *)d, inp != NULL);
749 			return;
750 		}
751 		if (inp) {
752 			seq = ntohl(th.th_seq);
753 			if (inp->inp_socket &&
754 			    (tp = intotcpcb(inp)) &&
755 			    SEQ_GEQ(seq, tp->snd_una) &&
756 			    SEQ_LT(seq, tp->snd_max))
757 				notify(inp, inet6ctlerrmap[cmd]);
758 		} else if (syn_cache_count &&
759 		    (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
760 		     inet6ctlerrmap[cmd] == ENETUNREACH ||
761 		     inet6ctlerrmap[cmd] == EHOSTDOWN))
762 			syn_cache_unreach((struct sockaddr *)sa6_src,
763 			    sa, &th, /* XXX */ 0);
764 	} else {
765 		(void) in6_pcbnotify(&tcbtable, sa, 0,
766 		    (struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
767 	}
768 }
769 #endif
770 
771 void *
772 tcp_ctlinput(int cmd, struct sockaddr *sa, u_int rdomain, void *v)
773 {
774 	struct ip *ip = v;
775 	struct tcphdr *th;
776 	struct tcpcb *tp;
777 	struct inpcb *inp;
778 	struct in_addr faddr;
779 	tcp_seq seq;
780 	u_int mtu;
781 	extern int inetctlerrmap[];
782 	void (*notify)(struct inpcb *, int) = tcp_notify;
783 	int errno;
784 
785 	if (sa->sa_family != AF_INET)
786 		return NULL;
787 	faddr = satosin(sa)->sin_addr;
788 	if (faddr.s_addr == INADDR_ANY)
789 		return NULL;
790 
791 	if ((unsigned)cmd >= PRC_NCMDS)
792 		return NULL;
793 	errno = inetctlerrmap[cmd];
794 	if (cmd == PRC_QUENCH)
795 		/*
796 		 * Don't honor ICMP Source Quench messages meant for
797 		 * TCP connections.
798 		 */
799 		return NULL;
800 	else if (PRC_IS_REDIRECT(cmd))
801 		notify = in_rtchange, ip = 0;
802 	else if (cmd == PRC_MSGSIZE && ip_mtudisc && ip) {
803 		/*
804 		 * Verify that the packet in the icmp payload refers
805 		 * to an existing TCP connection.
806 		 */
807 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
808 		seq = ntohl(th->th_seq);
809 		inp = in_pcbhashlookup(&tcbtable,
810 		    ip->ip_dst, th->th_dport, ip->ip_src, th->th_sport,
811 		    rdomain);
812 		if (inp && (tp = intotcpcb(inp)) &&
813 		    SEQ_GEQ(seq, tp->snd_una) &&
814 		    SEQ_LT(seq, tp->snd_max)) {
815 			struct icmp *icp;
816 			icp = (struct icmp *)((caddr_t)ip -
817 					      offsetof(struct icmp, icmp_ip));
818 
819 			/*
820 			 * If the ICMP message advertises a Next-Hop MTU
821 			 * equal or larger than the maximum packet size we have
822 			 * ever sent, drop the message.
823 			 */
824 			mtu = (u_int)ntohs(icp->icmp_nextmtu);
825 			if (mtu >= tp->t_pmtud_mtu_sent)
826 				return NULL;
827 			if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
828 				/*
829 				 * Calculate new MTU, and create corresponding
830 				 * route (traditional PMTUD).
831 				 */
832 				tp->t_flags &= ~TF_PMTUD_PEND;
833 				icmp_mtudisc(icp, inp->inp_rtableid);
834 			} else {
835 				/*
836 				 * Record the information got in the ICMP
837 				 * message; act on it later.
838 				 * If we had already recorded an ICMP message,
839 				 * replace the old one only if the new message
840 				 * refers to an older TCP segment
841 				 */
842 				if (tp->t_flags & TF_PMTUD_PEND) {
843 					if (SEQ_LT(tp->t_pmtud_th_seq, seq))
844 						return NULL;
845 				} else
846 					tp->t_flags |= TF_PMTUD_PEND;
847 				tp->t_pmtud_th_seq = seq;
848 				tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
849 				tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
850 				tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
851 				return NULL;
852 			}
853 		} else {
854 			/* ignore if we don't have a matching connection */
855 			return NULL;
856 		}
857 		notify = tcp_mtudisc, ip = 0;
858 	} else if (cmd == PRC_MTUINC)
859 		notify = tcp_mtudisc_increase, ip = 0;
860 	else if (cmd == PRC_HOSTDEAD)
861 		ip = 0;
862 	else if (errno == 0)
863 		return NULL;
864 
865 	if (ip) {
866 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
867 		inp = in_pcbhashlookup(&tcbtable,
868 		    ip->ip_dst, th->th_dport, ip->ip_src, th->th_sport,
869 		    rdomain);
870 		if (inp) {
871 			seq = ntohl(th->th_seq);
872 			if (inp->inp_socket &&
873 			    (tp = intotcpcb(inp)) &&
874 			    SEQ_GEQ(seq, tp->snd_una) &&
875 			    SEQ_LT(seq, tp->snd_max))
876 				notify(inp, errno);
877 		} else if (syn_cache_count &&
878 		    (inetctlerrmap[cmd] == EHOSTUNREACH ||
879 		     inetctlerrmap[cmd] == ENETUNREACH ||
880 		     inetctlerrmap[cmd] == EHOSTDOWN)) {
881 			struct sockaddr_in sin;
882 
883 			bzero(&sin, sizeof(sin));
884 			sin.sin_len = sizeof(sin);
885 			sin.sin_family = AF_INET;
886 			sin.sin_port = th->th_sport;
887 			sin.sin_addr = ip->ip_src;
888 			syn_cache_unreach((struct sockaddr *)&sin,
889 			    sa, th, rdomain);
890 		}
891 	} else
892 		in_pcbnotifyall(&tcbtable, sa, rdomain, errno, notify);
893 
894 	return NULL;
895 }
896 
897 
898 #ifdef INET6
899 /*
900  * Path MTU Discovery handlers.
901  */
902 void
903 tcp6_mtudisc_callback(faddr)
904 	struct in6_addr *faddr;
905 {
906 	struct sockaddr_in6 sin6;
907 
908 	bzero(&sin6, sizeof(sin6));
909 	sin6.sin6_family = AF_INET6;
910 	sin6.sin6_len = sizeof(struct sockaddr_in6);
911 	sin6.sin6_addr = *faddr;
912 	(void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
913 	    (struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp_mtudisc);
914 }
915 #endif /* INET6 */
916 
917 /*
918  * On receipt of path MTU corrections, flush old route and replace it
919  * with the new one.  Retransmit all unacknowledged packets, to ensure
920  * that all packets will be received.
921  */
922 void
923 tcp_mtudisc(inp, errno)
924 	struct inpcb *inp;
925 	int errno;
926 {
927 	struct tcpcb *tp = intotcpcb(inp);
928 	struct rtentry *rt = in_pcbrtentry(inp);
929 	int change = 0;
930 
931 	if (tp != 0) {
932 		int orig_maxseg = tp->t_maxseg;
933 		if (rt != 0) {
934 			/*
935 			 * If this was not a host route, remove and realloc.
936 			 */
937 			if ((rt->rt_flags & RTF_HOST) == 0) {
938 				in_rtchange(inp, errno);
939 				if ((rt = in_pcbrtentry(inp)) == 0)
940 					return;
941 			}
942 			if (orig_maxseg != tp->t_maxseg ||
943 			    (rt->rt_rmx.rmx_locks & RTV_MTU))
944 				change = 1;
945 		}
946 		tcp_mss(tp, -1);
947 
948 		/*
949 		 * Resend unacknowledged packets
950 		 */
951 		tp->snd_nxt = tp->snd_una;
952 		if (change || errno > 0)
953 			tcp_output(tp);
954 	}
955 }
956 
957 void
958 tcp_mtudisc_increase(inp, errno)
959 	struct inpcb *inp;
960 	int errno;
961 {
962 	struct tcpcb *tp = intotcpcb(inp);
963 	struct rtentry *rt = in_pcbrtentry(inp);
964 
965 	if (tp != 0 && rt != 0) {
966 		/*
967 		 * If this was a host route, remove and realloc.
968 		 */
969 		if (rt->rt_flags & RTF_HOST)
970 			in_rtchange(inp, errno);
971 
972 		/* also takes care of congestion window */
973 		tcp_mss(tp, -1);
974 	}
975 }
976 
977 #define TCP_ISS_CONN_INC 4096
978 int tcp_secret_init;
979 u_char tcp_secret[16];
980 MD5_CTX tcp_secret_ctx;
981 
982 void
983 tcp_set_iss_tsm(struct tcpcb *tp)
984 {
985 	MD5_CTX ctx;
986 	u_int32_t digest[4];
987 
988 	if (tcp_secret_init == 0) {
989 		arc4random_buf(tcp_secret, sizeof(tcp_secret));
990 		MD5Init(&tcp_secret_ctx);
991 		MD5Update(&tcp_secret_ctx, tcp_secret, sizeof(tcp_secret));
992 		tcp_secret_init = 1;
993 	}
994 	ctx = tcp_secret_ctx;
995 	MD5Update(&ctx, (char *)&tp->t_inpcb->inp_lport, sizeof(u_short));
996 	MD5Update(&ctx, (char *)&tp->t_inpcb->inp_fport, sizeof(u_short));
997 	if (tp->pf == AF_INET6) {
998 		MD5Update(&ctx, (char *)&tp->t_inpcb->inp_laddr6,
999 		    sizeof(struct in6_addr));
1000 		MD5Update(&ctx, (char *)&tp->t_inpcb->inp_faddr6,
1001 		    sizeof(struct in6_addr));
1002 	} else {
1003 		MD5Update(&ctx, (char *)&tp->t_inpcb->inp_laddr,
1004 		    sizeof(struct in_addr));
1005 		MD5Update(&ctx, (char *)&tp->t_inpcb->inp_faddr,
1006 		    sizeof(struct in_addr));
1007 	}
1008 	MD5Final((u_char *)digest, &ctx);
1009 	tcp_iss += TCP_ISS_CONN_INC;
1010 	tp->iss = digest[0] + tcp_iss;
1011 	tp->ts_modulate = digest[1];
1012 }
1013 
1014 #ifdef TCP_SIGNATURE
1015 int
1016 tcp_signature_tdb_attach()
1017 {
1018 	return (0);
1019 }
1020 
1021 int
1022 tcp_signature_tdb_init(tdbp, xsp, ii)
1023 	struct tdb *tdbp;
1024 	struct xformsw *xsp;
1025 	struct ipsecinit *ii;
1026 {
1027 	if ((ii->ii_authkeylen < 1) || (ii->ii_authkeylen > 80))
1028 		return (EINVAL);
1029 
1030 	tdbp->tdb_amxkey = malloc(ii->ii_authkeylen, M_XDATA, M_NOWAIT);
1031 	if (tdbp->tdb_amxkey == NULL)
1032 		return (ENOMEM);
1033 	bcopy(ii->ii_authkey, tdbp->tdb_amxkey, ii->ii_authkeylen);
1034 	tdbp->tdb_amxkeylen = ii->ii_authkeylen;
1035 
1036 	return (0);
1037 }
1038 
1039 int
1040 tcp_signature_tdb_zeroize(tdbp)
1041 	struct tdb *tdbp;
1042 {
1043 	if (tdbp->tdb_amxkey) {
1044 		explicit_bzero(tdbp->tdb_amxkey, tdbp->tdb_amxkeylen);
1045 		free(tdbp->tdb_amxkey, M_XDATA);
1046 		tdbp->tdb_amxkey = NULL;
1047 	}
1048 
1049 	return (0);
1050 }
1051 
1052 int
1053 tcp_signature_tdb_input(m, tdbp, skip, protoff)
1054 	struct mbuf *m;
1055 	struct tdb *tdbp;
1056 	int skip, protoff;
1057 {
1058 	return (0);
1059 }
1060 
1061 int
1062 tcp_signature_tdb_output(m, tdbp, mp, skip, protoff)
1063 	struct mbuf *m;
1064 	struct tdb *tdbp;
1065 	struct mbuf **mp;
1066 	int skip, protoff;
1067 {
1068 	return (EINVAL);
1069 }
1070 
1071 int
1072 tcp_signature_apply(fstate, data, len)
1073 	caddr_t fstate;
1074 	caddr_t data;
1075 	unsigned int len;
1076 {
1077 	MD5Update((MD5_CTX *)fstate, (char *)data, len);
1078 	return 0;
1079 }
1080 
1081 int
1082 tcp_signature(struct tdb *tdb, int af, struct mbuf *m, struct tcphdr *th,
1083     int iphlen, int doswap, char *sig)
1084 {
1085 	MD5_CTX ctx;
1086 	int len;
1087 	struct tcphdr th0;
1088 
1089 	MD5Init(&ctx);
1090 
1091 	switch(af) {
1092 	case 0:
1093 #ifdef INET
1094 	case AF_INET: {
1095 		struct ippseudo ippseudo;
1096 		struct ip *ip;
1097 
1098 		ip = mtod(m, struct ip *);
1099 
1100 		ippseudo.ippseudo_src = ip->ip_src;
1101 		ippseudo.ippseudo_dst = ip->ip_dst;
1102 		ippseudo.ippseudo_pad = 0;
1103 		ippseudo.ippseudo_p = IPPROTO_TCP;
1104 		ippseudo.ippseudo_len = htons(m->m_pkthdr.len - iphlen);
1105 
1106 		MD5Update(&ctx, (char *)&ippseudo,
1107 		    sizeof(struct ippseudo));
1108 		break;
1109 		}
1110 #endif
1111 #ifdef INET6
1112 	case AF_INET6: {
1113 		struct ip6_hdr_pseudo ip6pseudo;
1114 		struct ip6_hdr *ip6;
1115 
1116 		ip6 = mtod(m, struct ip6_hdr *);
1117 		bzero(&ip6pseudo, sizeof(ip6pseudo));
1118 		ip6pseudo.ip6ph_src = ip6->ip6_src;
1119 		ip6pseudo.ip6ph_dst = ip6->ip6_dst;
1120 		in6_clearscope(&ip6pseudo.ip6ph_src);
1121 		in6_clearscope(&ip6pseudo.ip6ph_dst);
1122 		ip6pseudo.ip6ph_nxt = IPPROTO_TCP;
1123 		ip6pseudo.ip6ph_len = htonl(m->m_pkthdr.len - iphlen);
1124 
1125 		MD5Update(&ctx, (char *)&ip6pseudo,
1126 		    sizeof(ip6pseudo));
1127 		break;
1128 		}
1129 #endif
1130 	}
1131 
1132 	th0 = *th;
1133 	th0.th_sum = 0;
1134 
1135 	if (doswap) {
1136 		HTONL(th0.th_seq);
1137 		HTONL(th0.th_ack);
1138 		HTONS(th0.th_win);
1139 		HTONS(th0.th_urp);
1140 	}
1141 	MD5Update(&ctx, (char *)&th0, sizeof(th0));
1142 
1143 	len = m->m_pkthdr.len - iphlen - th->th_off * sizeof(uint32_t);
1144 
1145 	if (len > 0 &&
1146 	    m_apply(m, iphlen + th->th_off * sizeof(uint32_t), len,
1147 	    tcp_signature_apply, (caddr_t)&ctx))
1148 		return (-1);
1149 
1150 	MD5Update(&ctx, tdb->tdb_amxkey, tdb->tdb_amxkeylen);
1151 	MD5Final(sig, &ctx);
1152 
1153 	return (0);
1154 }
1155 #endif /* TCP_SIGNATURE */
1156