xref: /openbsd-src/sys/netinet/tcp_input.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: tcp_input.c,v 1.365 2020/06/19 22:47:22 procter Exp $	*/
2 /*	$NetBSD: tcp_input.c,v 1.23 1996/02/13 23:43:44 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
33  *
34  * NRL grants permission for redistribution and use in source and binary
35  * forms, with or without modification, of the software and documentation
36  * created at NRL provided that the following conditions are met:
37  *
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. All advertising materials mentioning features or use of this software
44  *    must display the following acknowledgements:
45  *	This product includes software developed by the University of
46  *	California, Berkeley and its contributors.
47  *	This product includes software developed at the Information
48  *	Technology Division, US Naval Research Laboratory.
49  * 4. Neither the name of the NRL nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
57  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  *
65  * The views and conclusions contained in the software and documentation
66  * are those of the authors and should not be interpreted as representing
67  * official policies, either expressed or implied, of the US Naval
68  * Research Laboratory (NRL).
69  */
70 
71 #include "pf.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/mbuf.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <sys/timeout.h>
80 #include <sys/kernel.h>
81 #include <sys/pool.h>
82 
83 #include <net/if.h>
84 #include <net/if_var.h>
85 #include <net/route.h>
86 
87 #include <netinet/in.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_fsm.h>
93 #include <netinet/tcp_seq.h>
94 #include <netinet/tcp_timer.h>
95 #include <netinet/tcp_var.h>
96 #include <netinet/tcp_debug.h>
97 
98 #if NPF > 0
99 #include <net/pfvar.h>
100 #endif
101 
102 struct	tcpiphdr tcp_saveti;
103 
104 int tcp_mss_adv(struct mbuf *, int);
105 int tcp_flush_queue(struct tcpcb *);
106 
107 #ifdef INET6
108 #include <netinet6/in6_var.h>
109 #include <netinet6/nd6.h>
110 
111 struct  tcpipv6hdr tcp_saveti6;
112 
113 /* for the packet header length in the mbuf */
114 #define M_PH_LEN(m)      (((struct mbuf *)(m))->m_pkthdr.len)
115 #define M_V6_LEN(m)      (M_PH_LEN(m) - sizeof(struct ip6_hdr))
116 #define M_V4_LEN(m)      (M_PH_LEN(m) - sizeof(struct ip))
117 #endif /* INET6 */
118 
119 int	tcprexmtthresh = 3;
120 int	tcptv_keep_init = TCPTV_KEEP_INIT;
121 
122 int tcp_rst_ppslim = 100;		/* 100pps */
123 int tcp_rst_ppslim_count = 0;
124 struct timeval tcp_rst_ppslim_last;
125 
126 int tcp_ackdrop_ppslim = 100;		/* 100pps */
127 int tcp_ackdrop_ppslim_count = 0;
128 struct timeval tcp_ackdrop_ppslim_last;
129 
130 #define TCP_PAWS_IDLE	(24 * 24 * 60 * 60 * PR_SLOWHZ)
131 
132 /* for modulo comparisons of timestamps */
133 #define TSTMP_LT(a,b)	((int)((a)-(b)) < 0)
134 #define TSTMP_GEQ(a,b)	((int)((a)-(b)) >= 0)
135 
136 /* for TCP SACK comparisons */
137 #define	SEQ_MIN(a,b)	(SEQ_LT(a,b) ? (a) : (b))
138 #define	SEQ_MAX(a,b)	(SEQ_GT(a,b) ? (a) : (b))
139 
140 /*
141  * Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint.
142  */
143 #ifdef INET6
144 #define ND6_HINT(tp) \
145 do { \
146 	if (tp && tp->t_inpcb && (tp->t_inpcb->inp_flags & INP_IPV6) &&	\
147 	    rtisvalid(tp->t_inpcb->inp_route6.ro_rt)) {			\
148 		nd6_nud_hint(tp->t_inpcb->inp_route6.ro_rt);		\
149 	} \
150 } while (0)
151 #else
152 #define ND6_HINT(tp)
153 #endif
154 
155 #ifdef TCP_ECN
156 /*
157  * ECN (Explicit Congestion Notification) support based on RFC3168
158  * implementation note:
159  *   snd_last is used to track a recovery phase.
160  *   when cwnd is reduced, snd_last is set to snd_max.
161  *   while snd_last > snd_una, the sender is in a recovery phase and
162  *   its cwnd should not be reduced again.
163  *   snd_last follows snd_una when not in a recovery phase.
164  */
165 #endif
166 
167 /*
168  * Macro to compute ACK transmission behavior.  Delay the ACK unless
169  * we have already delayed an ACK (must send an ACK every two segments).
170  * We also ACK immediately if we received a PUSH and the ACK-on-PUSH
171  * option is enabled or when the packet is coming from a loopback
172  * interface.
173  */
174 #define	TCP_SETUP_ACK(tp, tiflags, m) \
175 do { \
176 	struct ifnet *ifp = NULL; \
177 	if (m && (m->m_flags & M_PKTHDR)) \
178 		ifp = if_get(m->m_pkthdr.ph_ifidx); \
179 	if (TCP_TIMER_ISARMED(tp, TCPT_DELACK) || \
180 	    (tcp_ack_on_push && (tiflags) & TH_PUSH) || \
181 	    (ifp && (ifp->if_flags & IFF_LOOPBACK))) \
182 		tp->t_flags |= TF_ACKNOW; \
183 	else \
184 		TCP_TIMER_ARM_MSEC(tp, TCPT_DELACK, tcp_delack_msecs); \
185 	if_put(ifp); \
186 } while (0)
187 
188 void	 tcp_sack_partialack(struct tcpcb *, struct tcphdr *);
189 void	 tcp_newreno_partialack(struct tcpcb *, struct tcphdr *);
190 
191 void	 syn_cache_put(struct syn_cache *);
192 void	 syn_cache_rm(struct syn_cache *);
193 int	 syn_cache_respond(struct syn_cache *, struct mbuf *);
194 void	 syn_cache_timer(void *);
195 void	 syn_cache_reaper(void *);
196 void	 syn_cache_insert(struct syn_cache *, struct tcpcb *);
197 void	 syn_cache_reset(struct sockaddr *, struct sockaddr *,
198 		struct tcphdr *, u_int);
199 int	 syn_cache_add(struct sockaddr *, struct sockaddr *, struct tcphdr *,
200 		unsigned int, struct socket *, struct mbuf *, u_char *, int,
201 		struct tcp_opt_info *, tcp_seq *);
202 struct socket *syn_cache_get(struct sockaddr *, struct sockaddr *,
203 		struct tcphdr *, unsigned int, unsigned int, struct socket *,
204 		struct mbuf *);
205 struct syn_cache *syn_cache_lookup(struct sockaddr *, struct sockaddr *,
206 		struct syn_cache_head **, u_int);
207 
208 /*
209  * Insert segment ti into reassembly queue of tcp with
210  * control block tp.  Return TH_FIN if reassembly now includes
211  * a segment with FIN.  The macro form does the common case inline
212  * (segment is the next to be received on an established connection,
213  * and the queue is empty), avoiding linkage into and removal
214  * from the queue and repetition of various conversions.
215  * Set DELACK for segments received in order, but ack immediately
216  * when segments are out of order (so fast retransmit can work).
217  */
218 
219 int
220 tcp_reass(struct tcpcb *tp, struct tcphdr *th, struct mbuf *m, int *tlen)
221 {
222 	struct tcpqent *p, *q, *nq, *tiqe;
223 
224 	/*
225 	 * Allocate a new queue entry, before we throw away any data.
226 	 * If we can't, just drop the packet.  XXX
227 	 */
228 	tiqe = pool_get(&tcpqe_pool, PR_NOWAIT);
229 	if (tiqe == NULL) {
230 		tiqe = TAILQ_LAST(&tp->t_segq, tcpqehead);
231 		if (tiqe != NULL && th->th_seq == tp->rcv_nxt) {
232 			/* Reuse last entry since new segment fills a hole */
233 			m_freem(tiqe->tcpqe_m);
234 			TAILQ_REMOVE(&tp->t_segq, tiqe, tcpqe_q);
235 		}
236 		if (tiqe == NULL || th->th_seq != tp->rcv_nxt) {
237 			/* Flush segment queue for this connection */
238 			tcp_freeq(tp);
239 			tcpstat_inc(tcps_rcvmemdrop);
240 			m_freem(m);
241 			return (0);
242 		}
243 	}
244 
245 	/*
246 	 * Find a segment which begins after this one does.
247 	 */
248 	for (p = NULL, q = TAILQ_FIRST(&tp->t_segq); q != NULL;
249 	    p = q, q = TAILQ_NEXT(q, tcpqe_q))
250 		if (SEQ_GT(q->tcpqe_tcp->th_seq, th->th_seq))
251 			break;
252 
253 	/*
254 	 * If there is a preceding segment, it may provide some of
255 	 * our data already.  If so, drop the data from the incoming
256 	 * segment.  If it provides all of our data, drop us.
257 	 */
258 	if (p != NULL) {
259 		struct tcphdr *phdr = p->tcpqe_tcp;
260 		int i;
261 
262 		/* conversion to int (in i) handles seq wraparound */
263 		i = phdr->th_seq + phdr->th_reseqlen - th->th_seq;
264 		if (i > 0) {
265 		        if (i >= *tlen) {
266 				tcpstat_pkt(tcps_rcvduppack, tcps_rcvdupbyte,
267 				    *tlen);
268 				m_freem(m);
269 				pool_put(&tcpqe_pool, tiqe);
270 				return (0);
271 			}
272 			m_adj(m, i);
273 			*tlen -= i;
274 			th->th_seq += i;
275 		}
276 	}
277 	tcpstat_pkt(tcps_rcvoopack, tcps_rcvoobyte, *tlen);
278 
279 	/*
280 	 * While we overlap succeeding segments trim them or,
281 	 * if they are completely covered, dequeue them.
282 	 */
283 	for (; q != NULL; q = nq) {
284 		struct tcphdr *qhdr = q->tcpqe_tcp;
285 		int i = (th->th_seq + *tlen) - qhdr->th_seq;
286 
287 		if (i <= 0)
288 			break;
289 		if (i < qhdr->th_reseqlen) {
290 			qhdr->th_seq += i;
291 			qhdr->th_reseqlen -= i;
292 			m_adj(q->tcpqe_m, i);
293 			break;
294 		}
295 		nq = TAILQ_NEXT(q, tcpqe_q);
296 		m_freem(q->tcpqe_m);
297 		TAILQ_REMOVE(&tp->t_segq, q, tcpqe_q);
298 		pool_put(&tcpqe_pool, q);
299 	}
300 
301 	/* Insert the new segment queue entry into place. */
302 	tiqe->tcpqe_m = m;
303 	th->th_reseqlen = *tlen;
304 	tiqe->tcpqe_tcp = th;
305 	if (p == NULL) {
306 		TAILQ_INSERT_HEAD(&tp->t_segq, tiqe, tcpqe_q);
307 	} else {
308 		TAILQ_INSERT_AFTER(&tp->t_segq, p, tiqe, tcpqe_q);
309 	}
310 
311 	if (th->th_seq != tp->rcv_nxt)
312 		return (0);
313 
314 	return (tcp_flush_queue(tp));
315 }
316 
317 int
318 tcp_flush_queue(struct tcpcb *tp)
319 {
320 	struct socket *so = tp->t_inpcb->inp_socket;
321 	struct tcpqent *q, *nq;
322 	int flags;
323 
324 	/*
325 	 * Present data to user, advancing rcv_nxt through
326 	 * completed sequence space.
327 	 */
328 	if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
329 		return (0);
330 	q = TAILQ_FIRST(&tp->t_segq);
331 	if (q == NULL || q->tcpqe_tcp->th_seq != tp->rcv_nxt)
332 		return (0);
333 	if (tp->t_state == TCPS_SYN_RECEIVED && q->tcpqe_tcp->th_reseqlen)
334 		return (0);
335 	do {
336 		tp->rcv_nxt += q->tcpqe_tcp->th_reseqlen;
337 		flags = q->tcpqe_tcp->th_flags & TH_FIN;
338 
339 		nq = TAILQ_NEXT(q, tcpqe_q);
340 		TAILQ_REMOVE(&tp->t_segq, q, tcpqe_q);
341 		ND6_HINT(tp);
342 		if (so->so_state & SS_CANTRCVMORE)
343 			m_freem(q->tcpqe_m);
344 		else
345 			sbappendstream(so, &so->so_rcv, q->tcpqe_m);
346 		pool_put(&tcpqe_pool, q);
347 		q = nq;
348 	} while (q != NULL && q->tcpqe_tcp->th_seq == tp->rcv_nxt);
349 	tp->t_flags |= TF_BLOCKOUTPUT;
350 	sorwakeup(so);
351 	tp->t_flags &= ~TF_BLOCKOUTPUT;
352 	return (flags);
353 }
354 
355 /*
356  * TCP input routine, follows pages 65-76 of the
357  * protocol specification dated September, 1981 very closely.
358  */
359 int
360 tcp_input(struct mbuf **mp, int *offp, int proto, int af)
361 {
362 	struct mbuf *m = *mp;
363 	int iphlen = *offp;
364 	struct ip *ip = NULL;
365 	struct inpcb *inp = NULL;
366 	u_int8_t *optp = NULL;
367 	int optlen = 0;
368 	int tlen, off;
369 	struct tcpcb *otp = NULL, *tp = NULL;
370 	int tiflags;
371 	struct socket *so = NULL;
372 	int todrop, acked, ourfinisacked;
373 	int hdroptlen = 0;
374 	short ostate;
375 	caddr_t saveti;
376 	tcp_seq iss, *reuse = NULL;
377 	u_long tiwin;
378 	struct tcp_opt_info opti;
379 	struct tcphdr *th;
380 #ifdef INET6
381 	struct ip6_hdr *ip6 = NULL;
382 #endif /* INET6 */
383 #ifdef IPSEC
384 	struct m_tag *mtag;
385 	struct tdb_ident *tdbi;
386 	struct tdb *tdb;
387 	int error;
388 #endif /* IPSEC */
389 #ifdef TCP_ECN
390 	u_char iptos;
391 #endif
392 
393 	tcpstat_inc(tcps_rcvtotal);
394 
395 	opti.ts_present = 0;
396 	opti.maxseg = 0;
397 
398 	/*
399 	 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
400 	 */
401 	if (m->m_flags & (M_BCAST|M_MCAST))
402 		goto drop;
403 
404 	/*
405 	 * Get IP and TCP header together in first mbuf.
406 	 * Note: IP leaves IP header in first mbuf.
407 	 */
408 	IP6_EXTHDR_GET(th, struct tcphdr *, m, iphlen, sizeof(*th));
409 	if (!th) {
410 		tcpstat_inc(tcps_rcvshort);
411 		return IPPROTO_DONE;
412 	}
413 
414 	tlen = m->m_pkthdr.len - iphlen;
415 	switch (af) {
416 	case AF_INET:
417 		ip = mtod(m, struct ip *);
418 #ifdef TCP_ECN
419 		/* save ip_tos before clearing it for checksum */
420 		iptos = ip->ip_tos;
421 #endif
422 		break;
423 #ifdef INET6
424 	case AF_INET6:
425 		ip6 = mtod(m, struct ip6_hdr *);
426 #ifdef TCP_ECN
427 		iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
428 #endif
429 
430 		/*
431 		 * Be proactive about unspecified IPv6 address in source.
432 		 * As we use all-zero to indicate unbounded/unconnected pcb,
433 		 * unspecified IPv6 address can be used to confuse us.
434 		 *
435 		 * Note that packets with unspecified IPv6 destination is
436 		 * already dropped in ip6_input.
437 		 */
438 		if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
439 			/* XXX stat */
440 			goto drop;
441 		}
442 
443 		/* Discard packets to multicast */
444 		if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
445 			/* XXX stat */
446 			goto drop;
447 		}
448 		break;
449 #endif
450 	default:
451 		unhandled_af(af);
452 	}
453 
454 	/*
455 	 * Checksum extended TCP header and data.
456 	 */
457 	if ((m->m_pkthdr.csum_flags & M_TCP_CSUM_IN_OK) == 0) {
458 		int sum;
459 
460 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_IN_BAD) {
461 			tcpstat_inc(tcps_rcvbadsum);
462 			goto drop;
463 		}
464 		tcpstat_inc(tcps_inswcsum);
465 		switch (af) {
466 		case AF_INET:
467 			sum = in4_cksum(m, IPPROTO_TCP, iphlen, tlen);
468 			break;
469 #ifdef INET6
470 		case AF_INET6:
471 			sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
472 			    tlen);
473 			break;
474 #endif
475 		}
476 		if (sum != 0) {
477 			tcpstat_inc(tcps_rcvbadsum);
478 			goto drop;
479 		}
480 	}
481 
482 	/*
483 	 * Check that TCP offset makes sense,
484 	 * pull out TCP options and adjust length.		XXX
485 	 */
486 	off = th->th_off << 2;
487 	if (off < sizeof(struct tcphdr) || off > tlen) {
488 		tcpstat_inc(tcps_rcvbadoff);
489 		goto drop;
490 	}
491 	tlen -= off;
492 	if (off > sizeof(struct tcphdr)) {
493 		IP6_EXTHDR_GET(th, struct tcphdr *, m, iphlen, off);
494 		if (!th) {
495 			tcpstat_inc(tcps_rcvshort);
496 			return IPPROTO_DONE;
497 		}
498 		optlen = off - sizeof(struct tcphdr);
499 		optp = (u_int8_t *)(th + 1);
500 		/*
501 		 * Do quick retrieval of timestamp options ("options
502 		 * prediction?").  If timestamp is the only option and it's
503 		 * formatted as recommended in RFC 1323 appendix A, we
504 		 * quickly get the values now and not bother calling
505 		 * tcp_dooptions(), etc.
506 		 */
507 		if ((optlen == TCPOLEN_TSTAMP_APPA ||
508 		     (optlen > TCPOLEN_TSTAMP_APPA &&
509 			optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
510 		     *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
511 		     (th->th_flags & TH_SYN) == 0) {
512 			opti.ts_present = 1;
513 			opti.ts_val = ntohl(*(u_int32_t *)(optp + 4));
514 			opti.ts_ecr = ntohl(*(u_int32_t *)(optp + 8));
515 			optp = NULL;	/* we've parsed the options */
516 		}
517 	}
518 	tiflags = th->th_flags;
519 
520 	/*
521 	 * Convert TCP protocol specific fields to host format.
522 	 */
523 	th->th_seq = ntohl(th->th_seq);
524 	th->th_ack = ntohl(th->th_ack);
525 	th->th_win = ntohs(th->th_win);
526 	th->th_urp = ntohs(th->th_urp);
527 
528 	/*
529 	 * Locate pcb for segment.
530 	 */
531 #if NPF > 0
532 	inp = pf_inp_lookup(m);
533 #endif
534 findpcb:
535 	if (inp == NULL) {
536 		switch (af) {
537 #ifdef INET6
538 		case AF_INET6:
539 			inp = in6_pcbhashlookup(&tcbtable, &ip6->ip6_src,
540 			    th->th_sport, &ip6->ip6_dst, th->th_dport,
541 			    m->m_pkthdr.ph_rtableid);
542 			break;
543 #endif
544 		case AF_INET:
545 			inp = in_pcbhashlookup(&tcbtable, ip->ip_src,
546 			    th->th_sport, ip->ip_dst, th->th_dport,
547 			    m->m_pkthdr.ph_rtableid);
548 			break;
549 		}
550 	}
551 	if (inp == NULL) {
552 		tcpstat_inc(tcps_pcbhashmiss);
553 		switch (af) {
554 #ifdef INET6
555 		case AF_INET6:
556 			inp = in6_pcblookup_listen(&tcbtable, &ip6->ip6_dst,
557 			    th->th_dport, m, m->m_pkthdr.ph_rtableid);
558 			break;
559 #endif /* INET6 */
560 		case AF_INET:
561 			inp = in_pcblookup_listen(&tcbtable, ip->ip_dst,
562 			    th->th_dport, m, m->m_pkthdr.ph_rtableid);
563 			break;
564 		}
565 		/*
566 		 * If the state is CLOSED (i.e., TCB does not exist) then
567 		 * all data in the incoming segment is discarded.
568 		 * If the TCB exists but is in CLOSED state, it is embryonic,
569 		 * but should either do a listen or a connect soon.
570 		 */
571 	}
572 #ifdef IPSEC
573 	if (ipsec_in_use) {
574 		/* Find most recent IPsec tag */
575 		mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
576 		if (mtag != NULL) {
577 			tdbi = (struct tdb_ident *)(mtag + 1);
578 		        tdb = gettdb(tdbi->rdomain, tdbi->spi,
579 			    &tdbi->dst, tdbi->proto);
580 		} else
581 			tdb = NULL;
582 		ipsp_spd_lookup(m, af, iphlen, &error, IPSP_DIRECTION_IN,
583 		    tdb, inp, 0);
584 		if (error) {
585 			tcpstat_inc(tcps_rcvnosec);
586 			goto drop;
587 		}
588 	}
589 #endif /* IPSEC */
590 
591 	if (inp == NULL) {
592 		tcpstat_inc(tcps_noport);
593 		goto dropwithreset_ratelim;
594 	}
595 
596 	KASSERT(sotoinpcb(inp->inp_socket) == inp);
597 	KASSERT(intotcpcb(inp) == NULL || intotcpcb(inp)->t_inpcb == inp);
598 	soassertlocked(inp->inp_socket);
599 
600 	/* Check the minimum TTL for socket. */
601 	switch (af) {
602 	case AF_INET:
603 		if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl)
604 			goto drop;
605 		break;
606 #ifdef INET6
607 	case AF_INET6:
608 		if (inp->inp_ip6_minhlim &&
609 		    inp->inp_ip6_minhlim > ip6->ip6_hlim)
610 			goto drop;
611 		break;
612 #endif
613 	}
614 
615 	tp = intotcpcb(inp);
616 	if (tp == NULL)
617 		goto dropwithreset_ratelim;
618 	if (tp->t_state == TCPS_CLOSED)
619 		goto drop;
620 
621 	/* Unscale the window into a 32-bit value. */
622 	if ((tiflags & TH_SYN) == 0)
623 		tiwin = th->th_win << tp->snd_scale;
624 	else
625 		tiwin = th->th_win;
626 
627 	so = inp->inp_socket;
628 	if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) {
629 		union syn_cache_sa src;
630 		union syn_cache_sa dst;
631 
632 		bzero(&src, sizeof(src));
633 		bzero(&dst, sizeof(dst));
634 		switch (af) {
635 		case AF_INET:
636 			src.sin.sin_len = sizeof(struct sockaddr_in);
637 			src.sin.sin_family = AF_INET;
638 			src.sin.sin_addr = ip->ip_src;
639 			src.sin.sin_port = th->th_sport;
640 
641 			dst.sin.sin_len = sizeof(struct sockaddr_in);
642 			dst.sin.sin_family = AF_INET;
643 			dst.sin.sin_addr = ip->ip_dst;
644 			dst.sin.sin_port = th->th_dport;
645 			break;
646 #ifdef INET6
647 		case AF_INET6:
648 			src.sin6.sin6_len = sizeof(struct sockaddr_in6);
649 			src.sin6.sin6_family = AF_INET6;
650 			src.sin6.sin6_addr = ip6->ip6_src;
651 			src.sin6.sin6_port = th->th_sport;
652 
653 			dst.sin6.sin6_len = sizeof(struct sockaddr_in6);
654 			dst.sin6.sin6_family = AF_INET6;
655 			dst.sin6.sin6_addr = ip6->ip6_dst;
656 			dst.sin6.sin6_port = th->th_dport;
657 			break;
658 #endif /* INET6 */
659 		}
660 
661 		if (so->so_options & SO_DEBUG) {
662 			otp = tp;
663 			ostate = tp->t_state;
664 			switch (af) {
665 #ifdef INET6
666 			case AF_INET6:
667 				saveti = (caddr_t) &tcp_saveti6;
668 				memcpy(&tcp_saveti6.ti6_i, ip6, sizeof(*ip6));
669 				memcpy(&tcp_saveti6.ti6_t, th, sizeof(*th));
670 				break;
671 #endif
672 			case AF_INET:
673 				saveti = (caddr_t) &tcp_saveti;
674 				memcpy(&tcp_saveti.ti_i, ip, sizeof(*ip));
675 				memcpy(&tcp_saveti.ti_t, th, sizeof(*th));
676 				break;
677 			}
678 		}
679 		if (so->so_options & SO_ACCEPTCONN) {
680 			switch (tiflags & (TH_RST|TH_SYN|TH_ACK)) {
681 
682 			case TH_SYN|TH_ACK|TH_RST:
683 			case TH_SYN|TH_RST:
684 			case TH_ACK|TH_RST:
685 			case TH_RST:
686 				syn_cache_reset(&src.sa, &dst.sa, th,
687 				    inp->inp_rtableid);
688 				goto drop;
689 
690 			case TH_SYN|TH_ACK:
691 				/*
692 				 * Received a SYN,ACK.  This should
693 				 * never happen while we are in
694 				 * LISTEN.  Send an RST.
695 				 */
696 				goto badsyn;
697 
698 			case TH_ACK:
699 				so = syn_cache_get(&src.sa, &dst.sa,
700 					th, iphlen, tlen, so, m);
701 				if (so == NULL) {
702 					/*
703 					 * We don't have a SYN for
704 					 * this ACK; send an RST.
705 					 */
706 					goto badsyn;
707 				} else if (so == (struct socket *)(-1)) {
708 					/*
709 					 * We were unable to create
710 					 * the connection.  If the
711 					 * 3-way handshake was
712 					 * completed, and RST has
713 					 * been sent to the peer.
714 					 * Since the mbuf might be
715 					 * in use for the reply,
716 					 * do not free it.
717 					 */
718 					m = *mp = NULL;
719 					goto drop;
720 				} else {
721 					/*
722 					 * We have created a
723 					 * full-blown connection.
724 					 */
725 					tp = NULL;
726 					inp = sotoinpcb(so);
727 					tp = intotcpcb(inp);
728 					if (tp == NULL)
729 						goto badsyn;	/*XXX*/
730 
731 				}
732 				break;
733 
734 			default:
735 				/*
736 				 * None of RST, SYN or ACK was set.
737 				 * This is an invalid packet for a
738 				 * TCB in LISTEN state.  Send a RST.
739 				 */
740 				goto badsyn;
741 
742 			case TH_SYN:
743 				/*
744 				 * Received a SYN.
745 				 */
746 #ifdef INET6
747 				/*
748 				 * If deprecated address is forbidden, we do
749 				 * not accept SYN to deprecated interface
750 				 * address to prevent any new inbound
751 				 * connection from getting established.
752 				 * When we do not accept SYN, we send a TCP
753 				 * RST, with deprecated source address (instead
754 				 * of dropping it).  We compromise it as it is
755 				 * much better for peer to send a RST, and
756 				 * RST will be the final packet for the
757 				 * exchange.
758 				 *
759 				 * If we do not forbid deprecated addresses, we
760 				 * accept the SYN packet.  RFC2462 does not
761 				 * suggest dropping SYN in this case.
762 				 * If we decipher RFC2462 5.5.4, it says like
763 				 * this:
764 				 * 1. use of deprecated addr with existing
765 				 *    communication is okay - "SHOULD continue
766 				 *    to be used"
767 				 * 2. use of it with new communication:
768 				 *   (2a) "SHOULD NOT be used if alternate
769 				 *        address with sufficient scope is
770 				 *        available"
771 				 *   (2b) nothing mentioned otherwise.
772 				 * Here we fall into (2b) case as we have no
773 				 * choice in our source address selection - we
774 				 * must obey the peer.
775 				 *
776 				 * The wording in RFC2462 is confusing, and
777 				 * there are multiple description text for
778 				 * deprecated address handling - worse, they
779 				 * are not exactly the same.  I believe 5.5.4
780 				 * is the best one, so we follow 5.5.4.
781 				 */
782 				if (ip6 && !ip6_use_deprecated) {
783 					struct in6_ifaddr *ia6;
784 					struct ifnet *ifp =
785 					    if_get(m->m_pkthdr.ph_ifidx);
786 
787 					if (ifp &&
788 					    (ia6 = in6ifa_ifpwithaddr(ifp,
789 					    &ip6->ip6_dst)) &&
790 					    (ia6->ia6_flags &
791 					    IN6_IFF_DEPRECATED)) {
792 						tp = NULL;
793 						if_put(ifp);
794 						goto dropwithreset;
795 					}
796 					if_put(ifp);
797 				}
798 #endif
799 
800 				/*
801 				 * LISTEN socket received a SYN
802 				 * from itself?  This can't possibly
803 				 * be valid; drop the packet.
804 				 */
805 				if (th->th_dport == th->th_sport) {
806 					switch (af) {
807 #ifdef INET6
808 					case AF_INET6:
809 						if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src,
810 						    &ip6->ip6_dst)) {
811 							tcpstat_inc(tcps_badsyn);
812 							goto drop;
813 						}
814 						break;
815 #endif /* INET6 */
816 					case AF_INET:
817 						if (ip->ip_dst.s_addr == ip->ip_src.s_addr) {
818 							tcpstat_inc(tcps_badsyn);
819 							goto drop;
820 						}
821 						break;
822 					}
823 				}
824 
825 				/*
826 				 * SYN looks ok; create compressed TCP
827 				 * state for it.
828 				 */
829 				if (so->so_qlen > so->so_qlimit ||
830 				    syn_cache_add(&src.sa, &dst.sa, th, iphlen,
831 				    so, m, optp, optlen, &opti, reuse) == -1) {
832 					tcpstat_inc(tcps_dropsyn);
833 					goto drop;
834 				}
835 				return IPPROTO_DONE;
836 			}
837 		}
838 	}
839 
840 #ifdef DIAGNOSTIC
841 	/*
842 	 * Should not happen now that all embryonic connections
843 	 * are handled with compressed state.
844 	 */
845 	if (tp->t_state == TCPS_LISTEN)
846 		panic("tcp_input: TCPS_LISTEN");
847 #endif
848 
849 #if NPF > 0
850 	pf_inp_link(m, inp);
851 #endif
852 
853 	/*
854 	 * Segment received on connection.
855 	 * Reset idle time and keep-alive timer.
856 	 */
857 	tp->t_rcvtime = tcp_now;
858 	if (TCPS_HAVEESTABLISHED(tp->t_state))
859 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
860 
861 	if (tp->sack_enable)
862 		tcp_del_sackholes(tp, th); /* Delete stale SACK holes */
863 
864 	/*
865 	 * Process options.
866 	 */
867 #ifdef TCP_SIGNATURE
868 	if (optp || (tp->t_flags & TF_SIGNATURE))
869 #else
870 	if (optp)
871 #endif
872 		if (tcp_dooptions(tp, optp, optlen, th, m, iphlen, &opti,
873 		    m->m_pkthdr.ph_rtableid))
874 			goto drop;
875 
876 	if (opti.ts_present && opti.ts_ecr) {
877 		int rtt_test;
878 
879 		/* subtract out the tcp timestamp modulator */
880 		opti.ts_ecr -= tp->ts_modulate;
881 
882 		/* make sure ts_ecr is sensible */
883 		rtt_test = tcp_now - opti.ts_ecr;
884 		if (rtt_test < 0 || rtt_test > TCP_RTT_MAX)
885 			opti.ts_ecr = 0;
886 	}
887 
888 #ifdef TCP_ECN
889 	/* if congestion experienced, set ECE bit in subsequent packets. */
890 	if ((iptos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) {
891 		tp->t_flags |= TF_RCVD_CE;
892 		tcpstat_inc(tcps_ecn_rcvce);
893 	}
894 #endif
895 	/*
896 	 * Header prediction: check for the two common cases
897 	 * of a uni-directional data xfer.  If the packet has
898 	 * no control flags, is in-sequence, the window didn't
899 	 * change and we're not retransmitting, it's a
900 	 * candidate.  If the length is zero and the ack moved
901 	 * forward, we're the sender side of the xfer.  Just
902 	 * free the data acked & wake any higher level process
903 	 * that was blocked waiting for space.  If the length
904 	 * is non-zero and the ack didn't move, we're the
905 	 * receiver side.  If we're getting packets in-order
906 	 * (the reassembly queue is empty), add the data to
907 	 * the socket buffer and note that we need a delayed ack.
908 	 */
909 	if (tp->t_state == TCPS_ESTABLISHED &&
910 #ifdef TCP_ECN
911 	    (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ECE|TH_CWR|TH_ACK)) == TH_ACK &&
912 #else
913 	    (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
914 #endif
915 	    (!opti.ts_present || TSTMP_GEQ(opti.ts_val, tp->ts_recent)) &&
916 	    th->th_seq == tp->rcv_nxt &&
917 	    tiwin && tiwin == tp->snd_wnd &&
918 	    tp->snd_nxt == tp->snd_max) {
919 
920 		/*
921 		 * If last ACK falls within this segment's sequence numbers,
922 		 *  record the timestamp.
923 		 * Fix from Braden, see Stevens p. 870
924 		 */
925 		if (opti.ts_present && SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
926 			tp->ts_recent_age = tcp_now;
927 			tp->ts_recent = opti.ts_val;
928 		}
929 
930 		if (tlen == 0) {
931 			if (SEQ_GT(th->th_ack, tp->snd_una) &&
932 			    SEQ_LEQ(th->th_ack, tp->snd_max) &&
933 			    tp->snd_cwnd >= tp->snd_wnd &&
934 			    tp->t_dupacks == 0) {
935 				/*
936 				 * this is a pure ack for outstanding data.
937 				 */
938 				tcpstat_inc(tcps_predack);
939 				if (opti.ts_present && opti.ts_ecr)
940 					tcp_xmit_timer(tp, tcp_now - opti.ts_ecr);
941 				else if (tp->t_rtttime &&
942 				    SEQ_GT(th->th_ack, tp->t_rtseq))
943 					tcp_xmit_timer(tp,
944 					    tcp_now - tp->t_rtttime);
945 				acked = th->th_ack - tp->snd_una;
946 				tcpstat_pkt(tcps_rcvackpack, tcps_rcvackbyte,
947 				    acked);
948 				ND6_HINT(tp);
949 				sbdrop(so, &so->so_snd, acked);
950 
951 				/*
952 				 * If we had a pending ICMP message that
953 				 * refers to data that have just been
954 				 * acknowledged, disregard the recorded ICMP
955 				 * message.
956 				 */
957 				if ((tp->t_flags & TF_PMTUD_PEND) &&
958 				    SEQ_GT(th->th_ack, tp->t_pmtud_th_seq))
959 					tp->t_flags &= ~TF_PMTUD_PEND;
960 
961 				/*
962 				 * Keep track of the largest chunk of data
963 				 * acknowledged since last PMTU update
964 				 */
965 				if (tp->t_pmtud_mss_acked < acked)
966 					tp->t_pmtud_mss_acked = acked;
967 
968 				tp->snd_una = th->th_ack;
969 				/*
970 				 * We want snd_last to track snd_una so
971 				 * as to avoid sequence wraparound problems
972 				 * for very large transfers.
973 				 */
974 #ifdef TCP_ECN
975 				if (SEQ_GT(tp->snd_una, tp->snd_last))
976 #endif
977 				tp->snd_last = tp->snd_una;
978 				m_freem(m);
979 
980 				/*
981 				 * If all outstanding data are acked, stop
982 				 * retransmit timer, otherwise restart timer
983 				 * using current (possibly backed-off) value.
984 				 * If process is waiting for space,
985 				 * wakeup/selwakeup/signal.  If data
986 				 * are ready to send, let tcp_output
987 				 * decide between more output or persist.
988 				 */
989 				if (tp->snd_una == tp->snd_max)
990 					TCP_TIMER_DISARM(tp, TCPT_REXMT);
991 				else if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST) == 0)
992 					TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
993 
994 				tcp_update_sndspace(tp);
995 				if (sb_notify(so, &so->so_snd)) {
996 					tp->t_flags |= TF_BLOCKOUTPUT;
997 					sowwakeup(so);
998 					tp->t_flags &= ~TF_BLOCKOUTPUT;
999 				}
1000 				if (so->so_snd.sb_cc ||
1001 				    tp->t_flags & TF_NEEDOUTPUT)
1002 					(void) tcp_output(tp);
1003 				return IPPROTO_DONE;
1004 			}
1005 		} else if (th->th_ack == tp->snd_una &&
1006 		    TAILQ_EMPTY(&tp->t_segq) &&
1007 		    tlen <= sbspace(so, &so->so_rcv)) {
1008 			/*
1009 			 * This is a pure, in-sequence data packet
1010 			 * with nothing on the reassembly queue and
1011 			 * we have enough buffer space to take it.
1012 			 */
1013 			/* Clean receiver SACK report if present */
1014 			if (tp->sack_enable && tp->rcv_numsacks)
1015 				tcp_clean_sackreport(tp);
1016 			tcpstat_inc(tcps_preddat);
1017 			tp->rcv_nxt += tlen;
1018 			tcpstat_pkt(tcps_rcvpack, tcps_rcvbyte, tlen);
1019 			ND6_HINT(tp);
1020 
1021 			TCP_SETUP_ACK(tp, tiflags, m);
1022 			/*
1023 			 * Drop TCP, IP headers and TCP options then add data
1024 			 * to socket buffer.
1025 			 */
1026 			if (so->so_state & SS_CANTRCVMORE)
1027 				m_freem(m);
1028 			else {
1029 				if (opti.ts_present && opti.ts_ecr) {
1030 					if (tp->rfbuf_ts < opti.ts_ecr &&
1031 					    opti.ts_ecr - tp->rfbuf_ts < hz) {
1032 						tcp_update_rcvspace(tp);
1033 						/* Start over with next RTT. */
1034 						tp->rfbuf_cnt = 0;
1035 						tp->rfbuf_ts = 0;
1036 					} else
1037 						tp->rfbuf_cnt += tlen;
1038 				}
1039 				m_adj(m, iphlen + off);
1040 				sbappendstream(so, &so->so_rcv, m);
1041 			}
1042 			tp->t_flags |= TF_BLOCKOUTPUT;
1043 			sorwakeup(so);
1044 			tp->t_flags &= ~TF_BLOCKOUTPUT;
1045 			if (tp->t_flags & (TF_ACKNOW|TF_NEEDOUTPUT))
1046 				(void) tcp_output(tp);
1047 			return IPPROTO_DONE;
1048 		}
1049 	}
1050 
1051 	/*
1052 	 * Compute mbuf offset to TCP data segment.
1053 	 */
1054 	hdroptlen = iphlen + off;
1055 
1056 	/*
1057 	 * Calculate amount of space in receive window,
1058 	 * and then do TCP input processing.
1059 	 * Receive window is amount of space in rcv queue,
1060 	 * but not less than advertised window.
1061 	 */
1062 	{ int win;
1063 
1064 	win = sbspace(so, &so->so_rcv);
1065 	if (win < 0)
1066 		win = 0;
1067 	tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1068 	}
1069 
1070 	/* Reset receive buffer auto scaling when not in bulk receive mode. */
1071 	tp->rfbuf_cnt = 0;
1072 	tp->rfbuf_ts = 0;
1073 
1074 	switch (tp->t_state) {
1075 
1076 	/*
1077 	 * If the state is SYN_RECEIVED:
1078 	 * 	if seg contains SYN/ACK, send an RST.
1079 	 *	if seg contains an ACK, but not for our SYN/ACK, send an RST
1080 	 */
1081 
1082 	case TCPS_SYN_RECEIVED:
1083 		if (tiflags & TH_ACK) {
1084 			if (tiflags & TH_SYN) {
1085 				tcpstat_inc(tcps_badsyn);
1086 				goto dropwithreset;
1087 			}
1088 			if (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1089 			    SEQ_GT(th->th_ack, tp->snd_max))
1090 				goto dropwithreset;
1091 		}
1092 		break;
1093 
1094 	/*
1095 	 * If the state is SYN_SENT:
1096 	 *	if seg contains an ACK, but not for our SYN, drop the input.
1097 	 *	if seg contains a RST, then drop the connection.
1098 	 *	if seg does not contain SYN, then drop it.
1099 	 * Otherwise this is an acceptable SYN segment
1100 	 *	initialize tp->rcv_nxt and tp->irs
1101 	 *	if seg contains ack then advance tp->snd_una
1102 	 *	if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1103 	 *	arrange for segment to be acked (eventually)
1104 	 *	continue processing rest of data/controls, beginning with URG
1105 	 */
1106 	case TCPS_SYN_SENT:
1107 		if ((tiflags & TH_ACK) &&
1108 		    (SEQ_LEQ(th->th_ack, tp->iss) ||
1109 		     SEQ_GT(th->th_ack, tp->snd_max)))
1110 			goto dropwithreset;
1111 		if (tiflags & TH_RST) {
1112 #ifdef TCP_ECN
1113 			/* if ECN is enabled, fall back to non-ecn at rexmit */
1114 			if (tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN))
1115 				goto drop;
1116 #endif
1117 			if (tiflags & TH_ACK)
1118 				tp = tcp_drop(tp, ECONNREFUSED);
1119 			goto drop;
1120 		}
1121 		if ((tiflags & TH_SYN) == 0)
1122 			goto drop;
1123 		if (tiflags & TH_ACK) {
1124 			tp->snd_una = th->th_ack;
1125 			if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1126 				tp->snd_nxt = tp->snd_una;
1127 		}
1128 		TCP_TIMER_DISARM(tp, TCPT_REXMT);
1129 		tp->irs = th->th_seq;
1130 		tcp_mss(tp, opti.maxseg);
1131 		/* Reset initial window to 1 segment for retransmit */
1132 		if (tp->t_rxtshift > 0)
1133 			tp->snd_cwnd = tp->t_maxseg;
1134 		tcp_rcvseqinit(tp);
1135 		tp->t_flags |= TF_ACKNOW;
1136                 /*
1137                  * If we've sent a SACK_PERMITTED option, and the peer
1138                  * also replied with one, then TF_SACK_PERMIT should have
1139                  * been set in tcp_dooptions().  If it was not, disable SACKs.
1140                  */
1141 		if (tp->sack_enable)
1142 			tp->sack_enable = tp->t_flags & TF_SACK_PERMIT;
1143 #ifdef TCP_ECN
1144 		/*
1145 		 * if ECE is set but CWR is not set for SYN-ACK, or
1146 		 * both ECE and CWR are set for simultaneous open,
1147 		 * peer is ECN capable.
1148 		 */
1149 		if (tcp_do_ecn) {
1150 			switch (tiflags & (TH_ACK|TH_ECE|TH_CWR)) {
1151 			case TH_ACK|TH_ECE:
1152 			case TH_ECE|TH_CWR:
1153 				tp->t_flags |= TF_ECN_PERMIT;
1154 				tiflags &= ~(TH_ECE|TH_CWR);
1155 				tcpstat_inc(tcps_ecn_accepts);
1156 			}
1157 		}
1158 #endif
1159 
1160 		if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
1161 			tcpstat_inc(tcps_connects);
1162 			tp->t_flags |= TF_BLOCKOUTPUT;
1163 			soisconnected(so);
1164 			tp->t_flags &= ~TF_BLOCKOUTPUT;
1165 			tp->t_state = TCPS_ESTABLISHED;
1166 			TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
1167 			/* Do window scaling on this connection? */
1168 			if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1169 				(TF_RCVD_SCALE|TF_REQ_SCALE)) {
1170 				tp->snd_scale = tp->requested_s_scale;
1171 				tp->rcv_scale = tp->request_r_scale;
1172 			}
1173 			tcp_flush_queue(tp);
1174 
1175 			/*
1176 			 * if we didn't have to retransmit the SYN,
1177 			 * use its rtt as our initial srtt & rtt var.
1178 			 */
1179 			if (tp->t_rtttime)
1180 				tcp_xmit_timer(tp, tcp_now - tp->t_rtttime);
1181 			/*
1182 			 * Since new data was acked (the SYN), open the
1183 			 * congestion window by one MSS.  We do this
1184 			 * here, because we won't go through the normal
1185 			 * ACK processing below.  And since this is the
1186 			 * start of the connection, we know we are in
1187 			 * the exponential phase of slow-start.
1188 			 */
1189 			tp->snd_cwnd += tp->t_maxseg;
1190 		} else
1191 			tp->t_state = TCPS_SYN_RECEIVED;
1192 
1193 #if 0
1194 trimthenstep6:
1195 #endif
1196 		/*
1197 		 * Advance th->th_seq to correspond to first data byte.
1198 		 * If data, trim to stay within window,
1199 		 * dropping FIN if necessary.
1200 		 */
1201 		th->th_seq++;
1202 		if (tlen > tp->rcv_wnd) {
1203 			todrop = tlen - tp->rcv_wnd;
1204 			m_adj(m, -todrop);
1205 			tlen = tp->rcv_wnd;
1206 			tiflags &= ~TH_FIN;
1207 			tcpstat_pkt(tcps_rcvpackafterwin, tcps_rcvbyteafterwin,
1208 			    todrop);
1209 		}
1210 		tp->snd_wl1 = th->th_seq - 1;
1211 		tp->rcv_up = th->th_seq;
1212 		goto step6;
1213 	/*
1214 	 * If a new connection request is received while in TIME_WAIT,
1215 	 * drop the old connection and start over if the if the
1216 	 * timestamp or the sequence numbers are above the previous
1217 	 * ones.
1218 	 */
1219 	case TCPS_TIME_WAIT:
1220 		if (((tiflags & (TH_SYN|TH_ACK)) == TH_SYN) &&
1221 		    ((opti.ts_present &&
1222 		    TSTMP_LT(tp->ts_recent, opti.ts_val)) ||
1223 		    SEQ_GT(th->th_seq, tp->rcv_nxt))) {
1224 #if NPF > 0
1225 			/*
1226 			 * The socket will be recreated but the new state
1227 			 * has already been linked to the socket.  Remove the
1228 			 * link between old socket and new state.
1229 			 */
1230 			pf_inp_unlink(inp);
1231 #endif
1232 			/*
1233 			* Advance the iss by at least 32768, but
1234 			* clear the msb in order to make sure
1235 			* that SEG_LT(snd_nxt, iss).
1236 			*/
1237 			iss = tp->snd_nxt +
1238 			    ((arc4random() & 0x7fffffff) | 0x8000);
1239 			reuse = &iss;
1240 			tp = tcp_close(tp);
1241 			inp = NULL;
1242 			goto findpcb;
1243 		}
1244 	}
1245 
1246 	/*
1247 	 * States other than LISTEN or SYN_SENT.
1248 	 * First check timestamp, if present.
1249 	 * Then check that at least some bytes of segment are within
1250 	 * receive window.  If segment begins before rcv_nxt,
1251 	 * drop leading data (and SYN); if nothing left, just ack.
1252 	 *
1253 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1254 	 * and it's less than opti.ts_recent, drop it.
1255 	 */
1256 	if (opti.ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent &&
1257 	    TSTMP_LT(opti.ts_val, tp->ts_recent)) {
1258 
1259 		/* Check to see if ts_recent is over 24 days old.  */
1260 		if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1261 			/*
1262 			 * Invalidate ts_recent.  If this segment updates
1263 			 * ts_recent, the age will be reset later and ts_recent
1264 			 * will get a valid value.  If it does not, setting
1265 			 * ts_recent to zero will at least satisfy the
1266 			 * requirement that zero be placed in the timestamp
1267 			 * echo reply when ts_recent isn't valid.  The
1268 			 * age isn't reset until we get a valid ts_recent
1269 			 * because we don't want out-of-order segments to be
1270 			 * dropped when ts_recent is old.
1271 			 */
1272 			tp->ts_recent = 0;
1273 		} else {
1274 			tcpstat_pkt(tcps_rcvduppack, tcps_rcvdupbyte, tlen);
1275 			tcpstat_inc(tcps_pawsdrop);
1276 			if (tlen)
1277 				goto dropafterack;
1278 			goto drop;
1279 		}
1280 	}
1281 
1282 	todrop = tp->rcv_nxt - th->th_seq;
1283 	if (todrop > 0) {
1284 		if (tiflags & TH_SYN) {
1285 			tiflags &= ~TH_SYN;
1286 			th->th_seq++;
1287 			if (th->th_urp > 1)
1288 				th->th_urp--;
1289 			else
1290 				tiflags &= ~TH_URG;
1291 			todrop--;
1292 		}
1293 		if (todrop > tlen ||
1294 		    (todrop == tlen && (tiflags & TH_FIN) == 0)) {
1295 			/*
1296 			 * Any valid FIN must be to the left of the
1297 			 * window.  At this point, FIN must be a
1298 			 * duplicate or out-of-sequence, so drop it.
1299 			 */
1300 			tiflags &= ~TH_FIN;
1301 			/*
1302 			 * Send ACK to resynchronize, and drop any data,
1303 			 * but keep on processing for RST or ACK.
1304 			 */
1305 			tp->t_flags |= TF_ACKNOW;
1306 			todrop = tlen;
1307 			tcpstat_pkt(tcps_rcvduppack, tcps_rcvdupbyte, todrop);
1308 		} else {
1309 			tcpstat_pkt(tcps_rcvpartduppack, tcps_rcvpartdupbyte,
1310 			    todrop);
1311 		}
1312 		hdroptlen += todrop;	/* drop from head afterwards */
1313 		th->th_seq += todrop;
1314 		tlen -= todrop;
1315 		if (th->th_urp > todrop)
1316 			th->th_urp -= todrop;
1317 		else {
1318 			tiflags &= ~TH_URG;
1319 			th->th_urp = 0;
1320 		}
1321 	}
1322 
1323 	/*
1324 	 * If new data are received on a connection after the
1325 	 * user processes are gone, then RST the other end.
1326 	 */
1327 	if ((so->so_state & SS_NOFDREF) &&
1328 	    tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1329 		tp = tcp_close(tp);
1330 		tcpstat_inc(tcps_rcvafterclose);
1331 		goto dropwithreset;
1332 	}
1333 
1334 	/*
1335 	 * If segment ends after window, drop trailing data
1336 	 * (and PUSH and FIN); if nothing left, just ACK.
1337 	 */
1338 	todrop = (th->th_seq + tlen) - (tp->rcv_nxt+tp->rcv_wnd);
1339 	if (todrop > 0) {
1340 		tcpstat_inc(tcps_rcvpackafterwin);
1341 		if (todrop >= tlen) {
1342 			tcpstat_add(tcps_rcvbyteafterwin, tlen);
1343 			/*
1344 			 * If window is closed can only take segments at
1345 			 * window edge, and have to drop data and PUSH from
1346 			 * incoming segments.  Continue processing, but
1347 			 * remember to ack.  Otherwise, drop segment
1348 			 * and ack.
1349 			 */
1350 			if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1351 				tp->t_flags |= TF_ACKNOW;
1352 				tcpstat_inc(tcps_rcvwinprobe);
1353 			} else
1354 				goto dropafterack;
1355 		} else
1356 			tcpstat_add(tcps_rcvbyteafterwin, todrop);
1357 		m_adj(m, -todrop);
1358 		tlen -= todrop;
1359 		tiflags &= ~(TH_PUSH|TH_FIN);
1360 	}
1361 
1362 	/*
1363 	 * If last ACK falls within this segment's sequence numbers,
1364 	 * record its timestamp if it's more recent.
1365 	 * NOTE that the test is modified according to the latest
1366 	 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1367 	 */
1368 	if (opti.ts_present && TSTMP_GEQ(opti.ts_val, tp->ts_recent) &&
1369 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1370 		tp->ts_recent_age = tcp_now;
1371 		tp->ts_recent = opti.ts_val;
1372 	}
1373 
1374 	/*
1375 	 * If the RST bit is set examine the state:
1376 	 *    SYN_RECEIVED STATE:
1377 	 *	If passive open, return to LISTEN state.
1378 	 *	If active open, inform user that connection was refused.
1379 	 *    ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1380 	 *	Inform user that connection was reset, and close tcb.
1381 	 *    CLOSING, LAST_ACK, TIME_WAIT STATES
1382 	 *	Close the tcb.
1383 	 */
1384 	if (tiflags & TH_RST) {
1385 		if (th->th_seq != tp->last_ack_sent &&
1386 		    th->th_seq != tp->rcv_nxt &&
1387 		    th->th_seq != (tp->rcv_nxt + 1))
1388 			goto drop;
1389 
1390 		switch (tp->t_state) {
1391 		case TCPS_SYN_RECEIVED:
1392 #ifdef TCP_ECN
1393 			/* if ECN is enabled, fall back to non-ecn at rexmit */
1394 			if (tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN))
1395 				goto drop;
1396 #endif
1397 			so->so_error = ECONNREFUSED;
1398 			goto close;
1399 
1400 		case TCPS_ESTABLISHED:
1401 		case TCPS_FIN_WAIT_1:
1402 		case TCPS_FIN_WAIT_2:
1403 		case TCPS_CLOSE_WAIT:
1404 			so->so_error = ECONNRESET;
1405 		close:
1406 			tp->t_state = TCPS_CLOSED;
1407 			tcpstat_inc(tcps_drops);
1408 			tp = tcp_close(tp);
1409 			goto drop;
1410 		case TCPS_CLOSING:
1411 		case TCPS_LAST_ACK:
1412 		case TCPS_TIME_WAIT:
1413 			tp = tcp_close(tp);
1414 			goto drop;
1415 		}
1416 	}
1417 
1418 	/*
1419 	 * If a SYN is in the window, then this is an
1420 	 * error and we ACK and drop the packet.
1421 	 */
1422 	if (tiflags & TH_SYN)
1423 		goto dropafterack_ratelim;
1424 
1425 	/*
1426 	 * If the ACK bit is off we drop the segment and return.
1427 	 */
1428 	if ((tiflags & TH_ACK) == 0) {
1429 		if (tp->t_flags & TF_ACKNOW)
1430 			goto dropafterack;
1431 		else
1432 			goto drop;
1433 	}
1434 
1435 	/*
1436 	 * Ack processing.
1437 	 */
1438 	switch (tp->t_state) {
1439 
1440 	/*
1441 	 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1442 	 * ESTABLISHED state and continue processing.
1443 	 * The ACK was checked above.
1444 	 */
1445 	case TCPS_SYN_RECEIVED:
1446 		tcpstat_inc(tcps_connects);
1447 		tp->t_flags |= TF_BLOCKOUTPUT;
1448 		soisconnected(so);
1449 		tp->t_flags &= ~TF_BLOCKOUTPUT;
1450 		tp->t_state = TCPS_ESTABLISHED;
1451 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
1452 		/* Do window scaling? */
1453 		if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1454 			(TF_RCVD_SCALE|TF_REQ_SCALE)) {
1455 			tp->snd_scale = tp->requested_s_scale;
1456 			tp->rcv_scale = tp->request_r_scale;
1457 			tiwin = th->th_win << tp->snd_scale;
1458 		}
1459 		tcp_flush_queue(tp);
1460 		tp->snd_wl1 = th->th_seq - 1;
1461 		/* fall into ... */
1462 
1463 	/*
1464 	 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1465 	 * ACKs.  If the ack is in the range
1466 	 *	tp->snd_una < th->th_ack <= tp->snd_max
1467 	 * then advance tp->snd_una to th->th_ack and drop
1468 	 * data from the retransmission queue.  If this ACK reflects
1469 	 * more up to date window information we update our window information.
1470 	 */
1471 	case TCPS_ESTABLISHED:
1472 	case TCPS_FIN_WAIT_1:
1473 	case TCPS_FIN_WAIT_2:
1474 	case TCPS_CLOSE_WAIT:
1475 	case TCPS_CLOSING:
1476 	case TCPS_LAST_ACK:
1477 	case TCPS_TIME_WAIT:
1478 #ifdef TCP_ECN
1479 		/*
1480 		 * if we receive ECE and are not already in recovery phase,
1481 		 * reduce cwnd by half but don't slow-start.
1482 		 * advance snd_last to snd_max not to reduce cwnd again
1483 		 * until all outstanding packets are acked.
1484 		 */
1485 		if (tcp_do_ecn && (tiflags & TH_ECE)) {
1486 			if ((tp->t_flags & TF_ECN_PERMIT) &&
1487 			    SEQ_GEQ(tp->snd_una, tp->snd_last)) {
1488 				u_int win;
1489 
1490 				win = min(tp->snd_wnd, tp->snd_cwnd) / tp->t_maxseg;
1491 				if (win > 1) {
1492 					tp->snd_ssthresh = win / 2 * tp->t_maxseg;
1493 					tp->snd_cwnd = tp->snd_ssthresh;
1494 					tp->snd_last = tp->snd_max;
1495 					tp->t_flags |= TF_SEND_CWR;
1496 					tcpstat_inc(tcps_cwr_ecn);
1497 				}
1498 			}
1499 			tcpstat_inc(tcps_ecn_rcvece);
1500 		}
1501 		/*
1502 		 * if we receive CWR, we know that the peer has reduced
1503 		 * its congestion window.  stop sending ecn-echo.
1504 		 */
1505 		if ((tiflags & TH_CWR)) {
1506 			tp->t_flags &= ~TF_RCVD_CE;
1507 			tcpstat_inc(tcps_ecn_rcvcwr);
1508 		}
1509 #endif /* TCP_ECN */
1510 
1511 		if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1512 			/*
1513 			 * Duplicate/old ACK processing.
1514 			 * Increments t_dupacks:
1515 			 *	Pure duplicate (same seq/ack/window, no data)
1516 			 * Doesn't affect t_dupacks:
1517 			 *	Data packets.
1518 			 *	Normal window updates (window opens)
1519 			 * Resets t_dupacks:
1520 			 *	New data ACKed.
1521 			 *	Window shrinks
1522 			 *	Old ACK
1523 			 */
1524 			if (tlen) {
1525 				/* Drop very old ACKs unless th_seq matches */
1526 				if (th->th_seq != tp->rcv_nxt &&
1527 				   SEQ_LT(th->th_ack,
1528 				   tp->snd_una - tp->max_sndwnd)) {
1529 					tcpstat_inc(tcps_rcvacktooold);
1530 					goto drop;
1531 				}
1532 				break;
1533 			}
1534 			/*
1535 			 * If we get an old ACK, there is probably packet
1536 			 * reordering going on.  Be conservative and reset
1537 			 * t_dupacks so that we are less aggressive in
1538 			 * doing a fast retransmit.
1539 			 */
1540 			if (th->th_ack != tp->snd_una) {
1541 				tp->t_dupacks = 0;
1542 				break;
1543 			}
1544 			if (tiwin == tp->snd_wnd) {
1545 				tcpstat_inc(tcps_rcvdupack);
1546 				/*
1547 				 * If we have outstanding data (other than
1548 				 * a window probe), this is a completely
1549 				 * duplicate ack (ie, window info didn't
1550 				 * change), the ack is the biggest we've
1551 				 * seen and we've seen exactly our rexmt
1552 				 * threshold of them, assume a packet
1553 				 * has been dropped and retransmit it.
1554 				 * Kludge snd_nxt & the congestion
1555 				 * window so we send only this one
1556 				 * packet.
1557 				 *
1558 				 * We know we're losing at the current
1559 				 * window size so do congestion avoidance
1560 				 * (set ssthresh to half the current window
1561 				 * and pull our congestion window back to
1562 				 * the new ssthresh).
1563 				 *
1564 				 * Dup acks mean that packets have left the
1565 				 * network (they're now cached at the receiver)
1566 				 * so bump cwnd by the amount in the receiver
1567 				 * to keep a constant cwnd packets in the
1568 				 * network.
1569 				 */
1570 				if (TCP_TIMER_ISARMED(tp, TCPT_REXMT) == 0)
1571 					tp->t_dupacks = 0;
1572 				else if (++tp->t_dupacks == tcprexmtthresh) {
1573 					tcp_seq onxt = tp->snd_nxt;
1574 					u_long win =
1575 					    ulmin(tp->snd_wnd, tp->snd_cwnd) /
1576 						2 / tp->t_maxseg;
1577 
1578 					if (SEQ_LT(th->th_ack, tp->snd_last)){
1579 						/*
1580 						 * False fast retx after
1581 						 * timeout.  Do not cut window.
1582 						 */
1583 						tp->t_dupacks = 0;
1584 						goto drop;
1585 					}
1586 					if (win < 2)
1587 						win = 2;
1588 					tp->snd_ssthresh = win * tp->t_maxseg;
1589 					tp->snd_last = tp->snd_max;
1590 					if (tp->sack_enable) {
1591 						TCP_TIMER_DISARM(tp, TCPT_REXMT);
1592 						tp->t_rtttime = 0;
1593 #ifdef TCP_ECN
1594 						tp->t_flags |= TF_SEND_CWR;
1595 #endif
1596 						tcpstat_inc(tcps_cwr_frecovery);
1597 						tcpstat_inc(tcps_sack_recovery_episode);
1598 						/*
1599 						 * tcp_output() will send
1600 						 * oldest SACK-eligible rtx.
1601 						 */
1602 						(void) tcp_output(tp);
1603 						tp->snd_cwnd = tp->snd_ssthresh+
1604 					           tp->t_maxseg * tp->t_dupacks;
1605 						goto drop;
1606 					}
1607 					TCP_TIMER_DISARM(tp, TCPT_REXMT);
1608 					tp->t_rtttime = 0;
1609 					tp->snd_nxt = th->th_ack;
1610 					tp->snd_cwnd = tp->t_maxseg;
1611 #ifdef TCP_ECN
1612 					tp->t_flags |= TF_SEND_CWR;
1613 #endif
1614 					tcpstat_inc(tcps_cwr_frecovery);
1615 					tcpstat_inc(tcps_sndrexmitfast);
1616 					(void) tcp_output(tp);
1617 
1618 					tp->snd_cwnd = tp->snd_ssthresh +
1619 					    tp->t_maxseg * tp->t_dupacks;
1620 					if (SEQ_GT(onxt, tp->snd_nxt))
1621 						tp->snd_nxt = onxt;
1622 					goto drop;
1623 				} else if (tp->t_dupacks > tcprexmtthresh) {
1624 					tp->snd_cwnd += tp->t_maxseg;
1625 					(void) tcp_output(tp);
1626 					goto drop;
1627 				}
1628 			} else if (tiwin < tp->snd_wnd) {
1629 				/*
1630 				 * The window was retracted!  Previous dup
1631 				 * ACKs may have been due to packets arriving
1632 				 * after the shrunken window, not a missing
1633 				 * packet, so play it safe and reset t_dupacks
1634 				 */
1635 				tp->t_dupacks = 0;
1636 			}
1637 			break;
1638 		}
1639 		/*
1640 		 * If the congestion window was inflated to account
1641 		 * for the other side's cached packets, retract it.
1642 		 */
1643 		if (tp->t_dupacks >= tcprexmtthresh) {
1644 			/* Check for a partial ACK */
1645 			if (SEQ_LT(th->th_ack, tp->snd_last)) {
1646 				if (tp->sack_enable)
1647 					tcp_sack_partialack(tp, th);
1648 				else
1649 					tcp_newreno_partialack(tp, th);
1650 			} else {
1651 				/* Out of fast recovery */
1652 				tp->snd_cwnd = tp->snd_ssthresh;
1653 				if (tcp_seq_subtract(tp->snd_max, th->th_ack) <
1654 				    tp->snd_ssthresh)
1655 					tp->snd_cwnd =
1656 					    tcp_seq_subtract(tp->snd_max,
1657 					    th->th_ack);
1658 				tp->t_dupacks = 0;
1659 			}
1660 		} else {
1661 			/*
1662 			 * Reset the duplicate ACK counter if we
1663 			 * were not in fast recovery.
1664 			 */
1665 			tp->t_dupacks = 0;
1666 		}
1667 		if (SEQ_GT(th->th_ack, tp->snd_max)) {
1668 			tcpstat_inc(tcps_rcvacktoomuch);
1669 			goto dropafterack_ratelim;
1670 		}
1671 		acked = th->th_ack - tp->snd_una;
1672 		tcpstat_pkt(tcps_rcvackpack, tcps_rcvackbyte, acked);
1673 
1674 		/*
1675 		 * If we have a timestamp reply, update smoothed
1676 		 * round trip time.  If no timestamp is present but
1677 		 * transmit timer is running and timed sequence
1678 		 * number was acked, update smoothed round trip time.
1679 		 * Since we now have an rtt measurement, cancel the
1680 		 * timer backoff (cf., Phil Karn's retransmit alg.).
1681 		 * Recompute the initial retransmit timer.
1682 		 */
1683 		if (opti.ts_present && opti.ts_ecr)
1684 			tcp_xmit_timer(tp, tcp_now - opti.ts_ecr);
1685 		else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq))
1686 			tcp_xmit_timer(tp, tcp_now - tp->t_rtttime);
1687 
1688 		/*
1689 		 * If all outstanding data is acked, stop retransmit
1690 		 * timer and remember to restart (more output or persist).
1691 		 * If there is more data to be acked, restart retransmit
1692 		 * timer, using current (possibly backed-off) value.
1693 		 */
1694 		if (th->th_ack == tp->snd_max) {
1695 			TCP_TIMER_DISARM(tp, TCPT_REXMT);
1696 			tp->t_flags |= TF_NEEDOUTPUT;
1697 		} else if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST) == 0)
1698 			TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
1699 		/*
1700 		 * When new data is acked, open the congestion window.
1701 		 * If the window gives us less than ssthresh packets
1702 		 * in flight, open exponentially (maxseg per packet).
1703 		 * Otherwise open linearly: maxseg per window
1704 		 * (maxseg^2 / cwnd per packet).
1705 		 */
1706 		{
1707 		u_int cw = tp->snd_cwnd;
1708 		u_int incr = tp->t_maxseg;
1709 
1710 		if (cw > tp->snd_ssthresh)
1711 			incr = max(incr * incr / cw, 1);
1712 		if (tp->t_dupacks < tcprexmtthresh)
1713 			tp->snd_cwnd = ulmin(cw + incr,
1714 			    TCP_MAXWIN << tp->snd_scale);
1715 		}
1716 		ND6_HINT(tp);
1717 		if (acked > so->so_snd.sb_cc) {
1718 			if (tp->snd_wnd > so->so_snd.sb_cc)
1719 				tp->snd_wnd -= so->so_snd.sb_cc;
1720 			else
1721 				tp->snd_wnd = 0;
1722 			sbdrop(so, &so->so_snd, (int)so->so_snd.sb_cc);
1723 			ourfinisacked = 1;
1724 		} else {
1725 			sbdrop(so, &so->so_snd, acked);
1726 			if (tp->snd_wnd > acked)
1727 				tp->snd_wnd -= acked;
1728 			else
1729 				tp->snd_wnd = 0;
1730 			ourfinisacked = 0;
1731 		}
1732 
1733 		tcp_update_sndspace(tp);
1734 		if (sb_notify(so, &so->so_snd)) {
1735 			tp->t_flags |= TF_BLOCKOUTPUT;
1736 			sowwakeup(so);
1737 			tp->t_flags &= ~TF_BLOCKOUTPUT;
1738 		}
1739 
1740 		/*
1741 		 * If we had a pending ICMP message that referred to data
1742 		 * that have just been acknowledged, disregard the recorded
1743 		 * ICMP message.
1744 		 */
1745 		if ((tp->t_flags & TF_PMTUD_PEND) &&
1746 		    SEQ_GT(th->th_ack, tp->t_pmtud_th_seq))
1747 			tp->t_flags &= ~TF_PMTUD_PEND;
1748 
1749 		/*
1750 		 * Keep track of the largest chunk of data acknowledged
1751 		 * since last PMTU update
1752 		 */
1753 		if (tp->t_pmtud_mss_acked < acked)
1754 			tp->t_pmtud_mss_acked = acked;
1755 
1756 		tp->snd_una = th->th_ack;
1757 #ifdef TCP_ECN
1758 		/* sync snd_last with snd_una */
1759 		if (SEQ_GT(tp->snd_una, tp->snd_last))
1760 			tp->snd_last = tp->snd_una;
1761 #endif
1762 		if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1763 			tp->snd_nxt = tp->snd_una;
1764 
1765 		switch (tp->t_state) {
1766 
1767 		/*
1768 		 * In FIN_WAIT_1 STATE in addition to the processing
1769 		 * for the ESTABLISHED state if our FIN is now acknowledged
1770 		 * then enter FIN_WAIT_2.
1771 		 */
1772 		case TCPS_FIN_WAIT_1:
1773 			if (ourfinisacked) {
1774 				/*
1775 				 * If we can't receive any more
1776 				 * data, then closing user can proceed.
1777 				 * Starting the timer is contrary to the
1778 				 * specification, but if we don't get a FIN
1779 				 * we'll hang forever.
1780 				 */
1781 				if (so->so_state & SS_CANTRCVMORE) {
1782 					tp->t_flags |= TF_BLOCKOUTPUT;
1783 					soisdisconnected(so);
1784 					tp->t_flags &= ~TF_BLOCKOUTPUT;
1785 					TCP_TIMER_ARM(tp, TCPT_2MSL, tcp_maxidle);
1786 				}
1787 				tp->t_state = TCPS_FIN_WAIT_2;
1788 			}
1789 			break;
1790 
1791 		/*
1792 		 * In CLOSING STATE in addition to the processing for
1793 		 * the ESTABLISHED state if the ACK acknowledges our FIN
1794 		 * then enter the TIME-WAIT state, otherwise ignore
1795 		 * the segment.
1796 		 */
1797 		case TCPS_CLOSING:
1798 			if (ourfinisacked) {
1799 				tp->t_state = TCPS_TIME_WAIT;
1800 				tcp_canceltimers(tp);
1801 				TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL);
1802 				tp->t_flags |= TF_BLOCKOUTPUT;
1803 				soisdisconnected(so);
1804 				tp->t_flags &= ~TF_BLOCKOUTPUT;
1805 			}
1806 			break;
1807 
1808 		/*
1809 		 * In LAST_ACK, we may still be waiting for data to drain
1810 		 * and/or to be acked, as well as for the ack of our FIN.
1811 		 * If our FIN is now acknowledged, delete the TCB,
1812 		 * enter the closed state and return.
1813 		 */
1814 		case TCPS_LAST_ACK:
1815 			if (ourfinisacked) {
1816 				tp = tcp_close(tp);
1817 				goto drop;
1818 			}
1819 			break;
1820 
1821 		/*
1822 		 * In TIME_WAIT state the only thing that should arrive
1823 		 * is a retransmission of the remote FIN.  Acknowledge
1824 		 * it and restart the finack timer.
1825 		 */
1826 		case TCPS_TIME_WAIT:
1827 			TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL);
1828 			goto dropafterack;
1829 		}
1830 	}
1831 
1832 step6:
1833 	/*
1834 	 * Update window information.
1835 	 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1836 	 */
1837 	if ((tiflags & TH_ACK) &&
1838 	    (SEQ_LT(tp->snd_wl1, th->th_seq) || (tp->snd_wl1 == th->th_seq &&
1839 	    (SEQ_LT(tp->snd_wl2, th->th_ack) ||
1840 	    (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
1841 		/* keep track of pure window updates */
1842 		if (tlen == 0 &&
1843 		    tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
1844 			tcpstat_inc(tcps_rcvwinupd);
1845 		tp->snd_wnd = tiwin;
1846 		tp->snd_wl1 = th->th_seq;
1847 		tp->snd_wl2 = th->th_ack;
1848 		if (tp->snd_wnd > tp->max_sndwnd)
1849 			tp->max_sndwnd = tp->snd_wnd;
1850 		tp->t_flags |= TF_NEEDOUTPUT;
1851 	}
1852 
1853 	/*
1854 	 * Process segments with URG.
1855 	 */
1856 	if ((tiflags & TH_URG) && th->th_urp &&
1857 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1858 		/*
1859 		 * This is a kludge, but if we receive and accept
1860 		 * random urgent pointers, we'll crash in
1861 		 * soreceive.  It's hard to imagine someone
1862 		 * actually wanting to send this much urgent data.
1863 		 */
1864 		if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
1865 			th->th_urp = 0;			/* XXX */
1866 			tiflags &= ~TH_URG;		/* XXX */
1867 			goto dodata;			/* XXX */
1868 		}
1869 		/*
1870 		 * If this segment advances the known urgent pointer,
1871 		 * then mark the data stream.  This should not happen
1872 		 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1873 		 * a FIN has been received from the remote side.
1874 		 * In these states we ignore the URG.
1875 		 *
1876 		 * According to RFC961 (Assigned Protocols),
1877 		 * the urgent pointer points to the last octet
1878 		 * of urgent data.  We continue, however,
1879 		 * to consider it to indicate the first octet
1880 		 * of data past the urgent section as the original
1881 		 * spec states (in one of two places).
1882 		 */
1883 		if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
1884 			tp->rcv_up = th->th_seq + th->th_urp;
1885 			so->so_oobmark = so->so_rcv.sb_cc +
1886 			    (tp->rcv_up - tp->rcv_nxt) - 1;
1887 			if (so->so_oobmark == 0)
1888 				so->so_state |= SS_RCVATMARK;
1889 			sohasoutofband(so);
1890 			tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
1891 		}
1892 		/*
1893 		 * Remove out of band data so doesn't get presented to user.
1894 		 * This can happen independent of advancing the URG pointer,
1895 		 * but if two URG's are pending at once, some out-of-band
1896 		 * data may creep in... ick.
1897 		 */
1898 		if (th->th_urp <= (u_int16_t) tlen &&
1899 		    (so->so_options & SO_OOBINLINE) == 0)
1900 		        tcp_pulloutofband(so, th->th_urp, m, hdroptlen);
1901 	} else
1902 		/*
1903 		 * If no out of band data is expected,
1904 		 * pull receive urgent pointer along
1905 		 * with the receive window.
1906 		 */
1907 		if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1908 			tp->rcv_up = tp->rcv_nxt;
1909 dodata:							/* XXX */
1910 
1911 	/*
1912 	 * Process the segment text, merging it into the TCP sequencing queue,
1913 	 * and arranging for acknowledgment of receipt if necessary.
1914 	 * This process logically involves adjusting tp->rcv_wnd as data
1915 	 * is presented to the user (this happens in tcp_usrreq.c,
1916 	 * case PRU_RCVD).  If a FIN has already been received on this
1917 	 * connection then we just ignore the text.
1918 	 */
1919 	if ((tlen || (tiflags & TH_FIN)) &&
1920 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1921 		tcp_seq laststart = th->th_seq;
1922 		tcp_seq lastend = th->th_seq + tlen;
1923 
1924 		if (th->th_seq == tp->rcv_nxt && TAILQ_EMPTY(&tp->t_segq) &&
1925 		    tp->t_state == TCPS_ESTABLISHED) {
1926 			TCP_SETUP_ACK(tp, tiflags, m);
1927 			tp->rcv_nxt += tlen;
1928 			tiflags = th->th_flags & TH_FIN;
1929 			tcpstat_pkt(tcps_rcvpack, tcps_rcvbyte, tlen);
1930 			ND6_HINT(tp);
1931 			if (so->so_state & SS_CANTRCVMORE)
1932 				m_freem(m);
1933 			else {
1934 				m_adj(m, hdroptlen);
1935 				sbappendstream(so, &so->so_rcv, m);
1936 			}
1937 			tp->t_flags |= TF_BLOCKOUTPUT;
1938 			sorwakeup(so);
1939 			tp->t_flags &= ~TF_BLOCKOUTPUT;
1940 		} else {
1941 			m_adj(m, hdroptlen);
1942 			tiflags = tcp_reass(tp, th, m, &tlen);
1943 			tp->t_flags |= TF_ACKNOW;
1944 		}
1945 		if (tp->sack_enable)
1946 			tcp_update_sack_list(tp, laststart, lastend);
1947 
1948 		/*
1949 		 * variable len never referenced again in modern BSD,
1950 		 * so why bother computing it ??
1951 		 */
1952 #if 0
1953 		/*
1954 		 * Note the amount of data that peer has sent into
1955 		 * our window, in order to estimate the sender's
1956 		 * buffer size.
1957 		 */
1958 		len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
1959 #endif /* 0 */
1960 	} else {
1961 		m_freem(m);
1962 		tiflags &= ~TH_FIN;
1963 	}
1964 
1965 	/*
1966 	 * If FIN is received ACK the FIN and let the user know
1967 	 * that the connection is closing.  Ignore a FIN received before
1968 	 * the connection is fully established.
1969 	 */
1970 	if ((tiflags & TH_FIN) && TCPS_HAVEESTABLISHED(tp->t_state)) {
1971 		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1972 			tp->t_flags |= TF_BLOCKOUTPUT;
1973 			socantrcvmore(so);
1974 			tp->t_flags &= ~TF_BLOCKOUTPUT;
1975 			tp->t_flags |= TF_ACKNOW;
1976 			tp->rcv_nxt++;
1977 		}
1978 		switch (tp->t_state) {
1979 
1980 		/*
1981 		 * In ESTABLISHED STATE enter the CLOSE_WAIT state.
1982 		 */
1983 		case TCPS_ESTABLISHED:
1984 			tp->t_state = TCPS_CLOSE_WAIT;
1985 			break;
1986 
1987 		/*
1988 		 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1989 		 * enter the CLOSING state.
1990 		 */
1991 		case TCPS_FIN_WAIT_1:
1992 			tp->t_state = TCPS_CLOSING;
1993 			break;
1994 
1995 		/*
1996 		 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1997 		 * starting the time-wait timer, turning off the other
1998 		 * standard timers.
1999 		 */
2000 		case TCPS_FIN_WAIT_2:
2001 			tp->t_state = TCPS_TIME_WAIT;
2002 			tcp_canceltimers(tp);
2003 			TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL);
2004 			tp->t_flags |= TF_BLOCKOUTPUT;
2005 			soisdisconnected(so);
2006 			tp->t_flags &= ~TF_BLOCKOUTPUT;
2007 			break;
2008 
2009 		/*
2010 		 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2011 		 */
2012 		case TCPS_TIME_WAIT:
2013 			TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL);
2014 			break;
2015 		}
2016 	}
2017 	if (otp)
2018 		tcp_trace(TA_INPUT, ostate, tp, otp, saveti, 0, tlen);
2019 
2020 	/*
2021 	 * Return any desired output.
2022 	 */
2023 	if (tp->t_flags & (TF_ACKNOW|TF_NEEDOUTPUT))
2024 		(void) tcp_output(tp);
2025 	return IPPROTO_DONE;
2026 
2027 badsyn:
2028 	/*
2029 	 * Received a bad SYN.  Increment counters and dropwithreset.
2030 	 */
2031 	tcpstat_inc(tcps_badsyn);
2032 	tp = NULL;
2033 	goto dropwithreset;
2034 
2035 dropafterack_ratelim:
2036 	if (ppsratecheck(&tcp_ackdrop_ppslim_last, &tcp_ackdrop_ppslim_count,
2037 	    tcp_ackdrop_ppslim) == 0) {
2038 		/* XXX stat */
2039 		goto drop;
2040 	}
2041 	/* ...fall into dropafterack... */
2042 
2043 dropafterack:
2044 	/*
2045 	 * Generate an ACK dropping incoming segment if it occupies
2046 	 * sequence space, where the ACK reflects our state.
2047 	 */
2048 	if (tiflags & TH_RST)
2049 		goto drop;
2050 	m_freem(m);
2051 	tp->t_flags |= TF_ACKNOW;
2052 	(void) tcp_output(tp);
2053 	return IPPROTO_DONE;
2054 
2055 dropwithreset_ratelim:
2056 	/*
2057 	 * We may want to rate-limit RSTs in certain situations,
2058 	 * particularly if we are sending an RST in response to
2059 	 * an attempt to connect to or otherwise communicate with
2060 	 * a port for which we have no socket.
2061 	 */
2062 	if (ppsratecheck(&tcp_rst_ppslim_last, &tcp_rst_ppslim_count,
2063 	    tcp_rst_ppslim) == 0) {
2064 		/* XXX stat */
2065 		goto drop;
2066 	}
2067 	/* ...fall into dropwithreset... */
2068 
2069 dropwithreset:
2070 	/*
2071 	 * Generate a RST, dropping incoming segment.
2072 	 * Make ACK acceptable to originator of segment.
2073 	 * Don't bother to respond to RST.
2074 	 */
2075 	if (tiflags & TH_RST)
2076 		goto drop;
2077 	if (tiflags & TH_ACK) {
2078 		tcp_respond(tp, mtod(m, caddr_t), th, (tcp_seq)0, th->th_ack,
2079 		    TH_RST, m->m_pkthdr.ph_rtableid);
2080 	} else {
2081 		if (tiflags & TH_SYN)
2082 			tlen++;
2083 		tcp_respond(tp, mtod(m, caddr_t), th, th->th_seq + tlen,
2084 		    (tcp_seq)0, TH_RST|TH_ACK, m->m_pkthdr.ph_rtableid);
2085 	}
2086 	m_freem(m);
2087 	return IPPROTO_DONE;
2088 
2089 drop:
2090 	/*
2091 	 * Drop space held by incoming segment and return.
2092 	 */
2093 	if (otp)
2094 		tcp_trace(TA_DROP, ostate, tp, otp, saveti, 0, tlen);
2095 
2096 	m_freem(m);
2097 	return IPPROTO_DONE;
2098 }
2099 
2100 int
2101 tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th,
2102     struct mbuf *m, int iphlen, struct tcp_opt_info *oi,
2103     u_int rtableid)
2104 {
2105 	u_int16_t mss = 0;
2106 	int opt, optlen;
2107 #ifdef TCP_SIGNATURE
2108 	caddr_t sigp = NULL;
2109 	struct tdb *tdb = NULL;
2110 #endif /* TCP_SIGNATURE */
2111 
2112 	for (; cp && cnt > 0; cnt -= optlen, cp += optlen) {
2113 		opt = cp[0];
2114 		if (opt == TCPOPT_EOL)
2115 			break;
2116 		if (opt == TCPOPT_NOP)
2117 			optlen = 1;
2118 		else {
2119 			if (cnt < 2)
2120 				break;
2121 			optlen = cp[1];
2122 			if (optlen < 2 || optlen > cnt)
2123 				break;
2124 		}
2125 		switch (opt) {
2126 
2127 		default:
2128 			continue;
2129 
2130 		case TCPOPT_MAXSEG:
2131 			if (optlen != TCPOLEN_MAXSEG)
2132 				continue;
2133 			if (!(th->th_flags & TH_SYN))
2134 				continue;
2135 			if (TCPS_HAVERCVDSYN(tp->t_state))
2136 				continue;
2137 			memcpy(&mss, cp + 2, sizeof(mss));
2138 			mss = ntohs(mss);
2139 			oi->maxseg = mss;
2140 			break;
2141 
2142 		case TCPOPT_WINDOW:
2143 			if (optlen != TCPOLEN_WINDOW)
2144 				continue;
2145 			if (!(th->th_flags & TH_SYN))
2146 				continue;
2147 			if (TCPS_HAVERCVDSYN(tp->t_state))
2148 				continue;
2149 			tp->t_flags |= TF_RCVD_SCALE;
2150 			tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2151 			break;
2152 
2153 		case TCPOPT_TIMESTAMP:
2154 			if (optlen != TCPOLEN_TIMESTAMP)
2155 				continue;
2156 			oi->ts_present = 1;
2157 			memcpy(&oi->ts_val, cp + 2, sizeof(oi->ts_val));
2158 			oi->ts_val = ntohl(oi->ts_val);
2159 			memcpy(&oi->ts_ecr, cp + 6, sizeof(oi->ts_ecr));
2160 			oi->ts_ecr = ntohl(oi->ts_ecr);
2161 
2162 			if (!(th->th_flags & TH_SYN))
2163 				continue;
2164 			if (TCPS_HAVERCVDSYN(tp->t_state))
2165 				continue;
2166 			/*
2167 			 * A timestamp received in a SYN makes
2168 			 * it ok to send timestamp requests and replies.
2169 			 */
2170 			tp->t_flags |= TF_RCVD_TSTMP;
2171 			tp->ts_recent = oi->ts_val;
2172 			tp->ts_recent_age = tcp_now;
2173 			break;
2174 
2175 		case TCPOPT_SACK_PERMITTED:
2176 			if (!tp->sack_enable || optlen!=TCPOLEN_SACK_PERMITTED)
2177 				continue;
2178 			if (!(th->th_flags & TH_SYN))
2179 				continue;
2180 			if (TCPS_HAVERCVDSYN(tp->t_state))
2181 				continue;
2182 			/* MUST only be set on SYN */
2183 			tp->t_flags |= TF_SACK_PERMIT;
2184 			break;
2185 		case TCPOPT_SACK:
2186 			tcp_sack_option(tp, th, cp, optlen);
2187 			break;
2188 #ifdef TCP_SIGNATURE
2189 		case TCPOPT_SIGNATURE:
2190 			if (optlen != TCPOLEN_SIGNATURE)
2191 				continue;
2192 
2193 			if (sigp && timingsafe_bcmp(sigp, cp + 2, 16))
2194 				return (-1);
2195 
2196 			sigp = cp + 2;
2197 			break;
2198 #endif /* TCP_SIGNATURE */
2199 		}
2200 	}
2201 
2202 #ifdef TCP_SIGNATURE
2203 	if (tp->t_flags & TF_SIGNATURE) {
2204 		union sockaddr_union src, dst;
2205 
2206 		memset(&src, 0, sizeof(union sockaddr_union));
2207 		memset(&dst, 0, sizeof(union sockaddr_union));
2208 
2209 		switch (tp->pf) {
2210 		case 0:
2211 		case AF_INET:
2212 			src.sa.sa_len = sizeof(struct sockaddr_in);
2213 			src.sa.sa_family = AF_INET;
2214 			src.sin.sin_addr = mtod(m, struct ip *)->ip_src;
2215 			dst.sa.sa_len = sizeof(struct sockaddr_in);
2216 			dst.sa.sa_family = AF_INET;
2217 			dst.sin.sin_addr = mtod(m, struct ip *)->ip_dst;
2218 			break;
2219 #ifdef INET6
2220 		case AF_INET6:
2221 			src.sa.sa_len = sizeof(struct sockaddr_in6);
2222 			src.sa.sa_family = AF_INET6;
2223 			src.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_src;
2224 			dst.sa.sa_len = sizeof(struct sockaddr_in6);
2225 			dst.sa.sa_family = AF_INET6;
2226 			dst.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_dst;
2227 			break;
2228 #endif /* INET6 */
2229 		}
2230 
2231 		tdb = gettdbbysrcdst(rtable_l2(rtableid),
2232 		    0, &src, &dst, IPPROTO_TCP);
2233 
2234 		/*
2235 		 * We don't have an SA for this peer, so we turn off
2236 		 * TF_SIGNATURE on the listen socket
2237 		 */
2238 		if (tdb == NULL && tp->t_state == TCPS_LISTEN)
2239 			tp->t_flags &= ~TF_SIGNATURE;
2240 
2241 	}
2242 
2243 	if ((sigp ? TF_SIGNATURE : 0) ^ (tp->t_flags & TF_SIGNATURE)) {
2244 		tcpstat_inc(tcps_rcvbadsig);
2245 		return (-1);
2246 	}
2247 
2248 	if (sigp) {
2249 		char sig[16];
2250 
2251 		if (tdb == NULL) {
2252 			tcpstat_inc(tcps_rcvbadsig);
2253 			return (-1);
2254 		}
2255 
2256 		if (tcp_signature(tdb, tp->pf, m, th, iphlen, 1, sig) < 0)
2257 			return (-1);
2258 
2259 		if (timingsafe_bcmp(sig, sigp, 16)) {
2260 			tcpstat_inc(tcps_rcvbadsig);
2261 			return (-1);
2262 		}
2263 
2264 		tcpstat_inc(tcps_rcvgoodsig);
2265 	}
2266 #endif /* TCP_SIGNATURE */
2267 
2268 	return (0);
2269 }
2270 
2271 u_long
2272 tcp_seq_subtract(u_long a, u_long b)
2273 {
2274 	return ((long)(a - b));
2275 }
2276 
2277 /*
2278  * This function is called upon receipt of new valid data (while not in header
2279  * prediction mode), and it updates the ordered list of sacks.
2280  */
2281 void
2282 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart,
2283     tcp_seq rcv_lastend)
2284 {
2285 	/*
2286 	 * First reported block MUST be the most recent one.  Subsequent
2287 	 * blocks SHOULD be in the order in which they arrived at the
2288 	 * receiver.  These two conditions make the implementation fully
2289 	 * compliant with RFC 2018.
2290 	 */
2291 	int i, j = 0, count = 0, lastpos = -1;
2292 	struct sackblk sack, firstsack, temp[MAX_SACK_BLKS];
2293 
2294 	/* First clean up current list of sacks */
2295 	for (i = 0; i < tp->rcv_numsacks; i++) {
2296 		sack = tp->sackblks[i];
2297 		if (sack.start == 0 && sack.end == 0) {
2298 			count++; /* count = number of blocks to be discarded */
2299 			continue;
2300 		}
2301 		if (SEQ_LEQ(sack.end, tp->rcv_nxt)) {
2302 			tp->sackblks[i].start = tp->sackblks[i].end = 0;
2303 			count++;
2304 		} else {
2305 			temp[j].start = tp->sackblks[i].start;
2306 			temp[j++].end = tp->sackblks[i].end;
2307 		}
2308 	}
2309 	tp->rcv_numsacks -= count;
2310 	if (tp->rcv_numsacks == 0) { /* no sack blocks currently (fast path) */
2311 		tcp_clean_sackreport(tp);
2312 		if (SEQ_LT(tp->rcv_nxt, rcv_laststart)) {
2313 			/* ==> need first sack block */
2314 			tp->sackblks[0].start = rcv_laststart;
2315 			tp->sackblks[0].end = rcv_lastend;
2316 			tp->rcv_numsacks = 1;
2317 		}
2318 		return;
2319 	}
2320 	/* Otherwise, sack blocks are already present. */
2321 	for (i = 0; i < tp->rcv_numsacks; i++)
2322 		tp->sackblks[i] = temp[i]; /* first copy back sack list */
2323 	if (SEQ_GEQ(tp->rcv_nxt, rcv_lastend))
2324 		return;     /* sack list remains unchanged */
2325 	/*
2326 	 * From here, segment just received should be (part of) the 1st sack.
2327 	 * Go through list, possibly coalescing sack block entries.
2328 	 */
2329 	firstsack.start = rcv_laststart;
2330 	firstsack.end = rcv_lastend;
2331 	for (i = 0; i < tp->rcv_numsacks; i++) {
2332 		sack = tp->sackblks[i];
2333 		if (SEQ_LT(sack.end, firstsack.start) ||
2334 		    SEQ_GT(sack.start, firstsack.end))
2335 			continue; /* no overlap */
2336 		if (sack.start == firstsack.start && sack.end == firstsack.end){
2337 			/*
2338 			 * identical block; delete it here since we will
2339 			 * move it to the front of the list.
2340 			 */
2341 			tp->sackblks[i].start = tp->sackblks[i].end = 0;
2342 			lastpos = i;    /* last posn with a zero entry */
2343 			continue;
2344 		}
2345 		if (SEQ_LEQ(sack.start, firstsack.start))
2346 			firstsack.start = sack.start; /* merge blocks */
2347 		if (SEQ_GEQ(sack.end, firstsack.end))
2348 			firstsack.end = sack.end;     /* merge blocks */
2349 		tp->sackblks[i].start = tp->sackblks[i].end = 0;
2350 		lastpos = i;    /* last posn with a zero entry */
2351 	}
2352 	if (lastpos != -1) {    /* at least one merge */
2353 		for (i = 0, j = 1; i < tp->rcv_numsacks; i++) {
2354 			sack = tp->sackblks[i];
2355 			if (sack.start == 0 && sack.end == 0)
2356 				continue;
2357 			temp[j++] = sack;
2358 		}
2359 		tp->rcv_numsacks = j; /* including first blk (added later) */
2360 		for (i = 1; i < tp->rcv_numsacks; i++) /* now copy back */
2361 			tp->sackblks[i] = temp[i];
2362 	} else {        /* no merges -- shift sacks by 1 */
2363 		if (tp->rcv_numsacks < MAX_SACK_BLKS)
2364 			tp->rcv_numsacks++;
2365 		for (i = tp->rcv_numsacks-1; i > 0; i--)
2366 			tp->sackblks[i] = tp->sackblks[i-1];
2367 	}
2368 	tp->sackblks[0] = firstsack;
2369 	return;
2370 }
2371 
2372 /*
2373  * Process the TCP SACK option.  tp->snd_holes is an ordered list
2374  * of holes (oldest to newest, in terms of the sequence space).
2375  */
2376 void
2377 tcp_sack_option(struct tcpcb *tp, struct tcphdr *th, u_char *cp, int optlen)
2378 {
2379 	int tmp_olen;
2380 	u_char *tmp_cp;
2381 	struct sackhole *cur, *p, *temp;
2382 
2383 	if (!tp->sack_enable)
2384 		return;
2385 	/* SACK without ACK doesn't make sense. */
2386 	if ((th->th_flags & TH_ACK) == 0)
2387 	       return;
2388 	/* Make sure the ACK on this segment is in [snd_una, snd_max]. */
2389 	if (SEQ_LT(th->th_ack, tp->snd_una) ||
2390 	    SEQ_GT(th->th_ack, tp->snd_max))
2391 		return;
2392 	/* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */
2393 	if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
2394 		return;
2395 	/* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */
2396 	tmp_cp = cp + 2;
2397 	tmp_olen = optlen - 2;
2398 	tcpstat_inc(tcps_sack_rcv_opts);
2399 	if (tp->snd_numholes < 0)
2400 		tp->snd_numholes = 0;
2401 	if (tp->t_maxseg == 0)
2402 		panic("tcp_sack_option"); /* Should never happen */
2403 	while (tmp_olen > 0) {
2404 		struct sackblk sack;
2405 
2406 		memcpy(&sack.start, tmp_cp, sizeof(tcp_seq));
2407 		sack.start = ntohl(sack.start);
2408 		memcpy(&sack.end, tmp_cp + sizeof(tcp_seq), sizeof(tcp_seq));
2409 		sack.end = ntohl(sack.end);
2410 		tmp_olen -= TCPOLEN_SACK;
2411 		tmp_cp += TCPOLEN_SACK;
2412 		if (SEQ_LEQ(sack.end, sack.start))
2413 			continue; /* bad SACK fields */
2414 		if (SEQ_LEQ(sack.end, tp->snd_una))
2415 			continue; /* old block */
2416 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
2417 			if (SEQ_LT(sack.start, th->th_ack))
2418 				continue;
2419 		}
2420 		if (SEQ_GT(sack.end, tp->snd_max))
2421 			continue;
2422 		if (tp->snd_holes == NULL) { /* first hole */
2423 			tp->snd_holes = (struct sackhole *)
2424 			    pool_get(&sackhl_pool, PR_NOWAIT);
2425 			if (tp->snd_holes == NULL) {
2426 				/* ENOBUFS, so ignore SACKed block for now */
2427 				goto dropped;
2428 			}
2429 			cur = tp->snd_holes;
2430 			cur->start = th->th_ack;
2431 			cur->end = sack.start;
2432 			cur->rxmit = cur->start;
2433 			cur->next = NULL;
2434 			tp->snd_numholes = 1;
2435 			tp->rcv_lastsack = sack.end;
2436 			/*
2437 			 * dups is at least one.  If more data has been
2438 			 * SACKed, it can be greater than one.
2439 			 */
2440 			cur->dups = min(tcprexmtthresh,
2441 			    ((sack.end - cur->end)/tp->t_maxseg));
2442 			if (cur->dups < 1)
2443 				cur->dups = 1;
2444 			continue; /* with next sack block */
2445 		}
2446 		/* Go thru list of holes:  p = previous,  cur = current */
2447 		p = cur = tp->snd_holes;
2448 		while (cur) {
2449 			if (SEQ_LEQ(sack.end, cur->start))
2450 				/* SACKs data before the current hole */
2451 				break; /* no use going through more holes */
2452 			if (SEQ_GEQ(sack.start, cur->end)) {
2453 				/* SACKs data beyond the current hole */
2454 				cur->dups++;
2455 				if (((sack.end - cur->end)/tp->t_maxseg) >=
2456 				    tcprexmtthresh)
2457 					cur->dups = tcprexmtthresh;
2458 				p = cur;
2459 				cur = cur->next;
2460 				continue;
2461 			}
2462 			if (SEQ_LEQ(sack.start, cur->start)) {
2463 				/* Data acks at least the beginning of hole */
2464 				if (SEQ_GEQ(sack.end, cur->end)) {
2465 					/* Acks entire hole, so delete hole */
2466 					if (p != cur) {
2467 						p->next = cur->next;
2468 						pool_put(&sackhl_pool, cur);
2469 						cur = p->next;
2470 					} else {
2471 						cur = cur->next;
2472 						pool_put(&sackhl_pool, p);
2473 						p = cur;
2474 						tp->snd_holes = p;
2475 					}
2476 					tp->snd_numholes--;
2477 					continue;
2478 				}
2479 				/* otherwise, move start of hole forward */
2480 				cur->start = sack.end;
2481 				cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
2482 				p = cur;
2483 				cur = cur->next;
2484 				continue;
2485 			}
2486 			/* move end of hole backward */
2487 			if (SEQ_GEQ(sack.end, cur->end)) {
2488 				cur->end = sack.start;
2489 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
2490 				cur->dups++;
2491 				if (((sack.end - cur->end)/tp->t_maxseg) >=
2492 				    tcprexmtthresh)
2493 					cur->dups = tcprexmtthresh;
2494 				p = cur;
2495 				cur = cur->next;
2496 				continue;
2497 			}
2498 			if (SEQ_LT(cur->start, sack.start) &&
2499 			    SEQ_GT(cur->end, sack.end)) {
2500 				/*
2501 				 * ACKs some data in middle of a hole; need to
2502 				 * split current hole
2503 				 */
2504 				if (tp->snd_numholes >= TCP_SACKHOLE_LIMIT)
2505 					goto dropped;
2506 				temp = (struct sackhole *)
2507 				    pool_get(&sackhl_pool, PR_NOWAIT);
2508 				if (temp == NULL)
2509 					goto dropped; /* ENOBUFS */
2510 				temp->next = cur->next;
2511 				temp->start = sack.end;
2512 				temp->end = cur->end;
2513 				temp->dups = cur->dups;
2514 				temp->rxmit = SEQ_MAX(cur->rxmit, temp->start);
2515 				cur->end = sack.start;
2516 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
2517 				cur->dups++;
2518 				if (((sack.end - cur->end)/tp->t_maxseg) >=
2519 					tcprexmtthresh)
2520 					cur->dups = tcprexmtthresh;
2521 				cur->next = temp;
2522 				p = temp;
2523 				cur = p->next;
2524 				tp->snd_numholes++;
2525 			}
2526 		}
2527 		/* At this point, p points to the last hole on the list */
2528 		if (SEQ_LT(tp->rcv_lastsack, sack.start)) {
2529 			/*
2530 			 * Need to append new hole at end.
2531 			 * Last hole is p (and it's not NULL).
2532 			 */
2533 			if (tp->snd_numholes >= TCP_SACKHOLE_LIMIT)
2534 				goto dropped;
2535 			temp = (struct sackhole *)
2536 			    pool_get(&sackhl_pool, PR_NOWAIT);
2537 			if (temp == NULL)
2538 				goto dropped; /* ENOBUFS */
2539 			temp->start = tp->rcv_lastsack;
2540 			temp->end = sack.start;
2541 			temp->dups = min(tcprexmtthresh,
2542 			    ((sack.end - sack.start)/tp->t_maxseg));
2543 			if (temp->dups < 1)
2544 				temp->dups = 1;
2545 			temp->rxmit = temp->start;
2546 			temp->next = 0;
2547 			p->next = temp;
2548 			tp->rcv_lastsack = sack.end;
2549 			tp->snd_numholes++;
2550 		}
2551 	}
2552 	return;
2553 dropped:
2554 	tcpstat_inc(tcps_sack_drop_opts);
2555 }
2556 
2557 /*
2558  * Delete stale (i.e, cumulatively ack'd) holes.  Hole is deleted only if
2559  * it is completely acked; otherwise, tcp_sack_option(), called from
2560  * tcp_dooptions(), will fix up the hole.
2561  */
2562 void
2563 tcp_del_sackholes(struct tcpcb *tp, struct tcphdr *th)
2564 {
2565 	if (tp->sack_enable && tp->t_state != TCPS_LISTEN) {
2566 		/* max because this could be an older ack just arrived */
2567 		tcp_seq lastack = SEQ_GT(th->th_ack, tp->snd_una) ?
2568 			th->th_ack : tp->snd_una;
2569 		struct sackhole *cur = tp->snd_holes;
2570 		struct sackhole *prev;
2571 		while (cur)
2572 			if (SEQ_LEQ(cur->end, lastack)) {
2573 				prev = cur;
2574 				cur = cur->next;
2575 				pool_put(&sackhl_pool, prev);
2576 				tp->snd_numholes--;
2577 			} else if (SEQ_LT(cur->start, lastack)) {
2578 				cur->start = lastack;
2579 				if (SEQ_LT(cur->rxmit, cur->start))
2580 					cur->rxmit = cur->start;
2581 				break;
2582 			} else
2583 				break;
2584 		tp->snd_holes = cur;
2585 	}
2586 }
2587 
2588 /*
2589  * Delete all receiver-side SACK information.
2590  */
2591 void
2592 tcp_clean_sackreport(struct tcpcb *tp)
2593 {
2594 	int i;
2595 
2596 	tp->rcv_numsacks = 0;
2597 	for (i = 0; i < MAX_SACK_BLKS; i++)
2598 		tp->sackblks[i].start = tp->sackblks[i].end=0;
2599 
2600 }
2601 
2602 /*
2603  * Partial ack handling within a sack recovery episode.  When a partial ack
2604  * arrives, turn off retransmission timer, deflate the window, do not clear
2605  * tp->t_dupacks.
2606  */
2607 void
2608 tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
2609 {
2610 	/* Turn off retx. timer (will start again next segment) */
2611 	TCP_TIMER_DISARM(tp, TCPT_REXMT);
2612 	tp->t_rtttime = 0;
2613 	/*
2614 	 * Partial window deflation.  This statement relies on the
2615 	 * fact that tp->snd_una has not been updated yet.
2616 	 */
2617 	if (tp->snd_cwnd > (th->th_ack - tp->snd_una)) {
2618 		tp->snd_cwnd -= th->th_ack - tp->snd_una;
2619 		tp->snd_cwnd += tp->t_maxseg;
2620 	} else
2621 		tp->snd_cwnd = tp->t_maxseg;
2622 	tp->snd_cwnd += tp->t_maxseg;
2623 	tp->t_flags |= TF_NEEDOUTPUT;
2624 }
2625 
2626 /*
2627  * Pull out of band byte out of a segment so
2628  * it doesn't appear in the user's data queue.
2629  * It is still reflected in the segment length for
2630  * sequencing purposes.
2631  */
2632 void
2633 tcp_pulloutofband(struct socket *so, u_int urgent, struct mbuf *m, int off)
2634 {
2635         int cnt = off + urgent - 1;
2636 
2637 	while (cnt >= 0) {
2638 		if (m->m_len > cnt) {
2639 			char *cp = mtod(m, caddr_t) + cnt;
2640 			struct tcpcb *tp = sototcpcb(so);
2641 
2642 			tp->t_iobc = *cp;
2643 			tp->t_oobflags |= TCPOOB_HAVEDATA;
2644 			memmove(cp, cp + 1, m->m_len - cnt - 1);
2645 			m->m_len--;
2646 			return;
2647 		}
2648 		cnt -= m->m_len;
2649 		m = m->m_next;
2650 		if (m == NULL)
2651 			break;
2652 	}
2653 	panic("tcp_pulloutofband");
2654 }
2655 
2656 /*
2657  * Collect new round-trip time estimate
2658  * and update averages and current timeout.
2659  */
2660 void
2661 tcp_xmit_timer(struct tcpcb *tp, int rtt)
2662 {
2663 	short delta;
2664 	short rttmin;
2665 
2666 	if (rtt < 0)
2667 		rtt = 0;
2668 	else if (rtt > TCP_RTT_MAX)
2669 		rtt = TCP_RTT_MAX;
2670 
2671 	tcpstat_inc(tcps_rttupdated);
2672 	if (tp->t_srtt != 0) {
2673 		/*
2674 		 * delta is fixed point with 2 (TCP_RTT_BASE_SHIFT) bits
2675 		 * after the binary point (scaled by 4), whereas
2676 		 * srtt is stored as fixed point with 5 bits after the
2677 		 * binary point (i.e., scaled by 32).  The following magic
2678 		 * is equivalent to the smoothing algorithm in rfc793 with
2679 		 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2680 		 * point).
2681 		 */
2682 		delta = (rtt << TCP_RTT_BASE_SHIFT) -
2683 		    (tp->t_srtt >> TCP_RTT_SHIFT);
2684 		if ((tp->t_srtt += delta) <= 0)
2685 			tp->t_srtt = 1 << TCP_RTT_BASE_SHIFT;
2686 		/*
2687 		 * We accumulate a smoothed rtt variance (actually, a
2688 		 * smoothed mean difference), then set the retransmit
2689 		 * timer to smoothed rtt + 4 times the smoothed variance.
2690 		 * rttvar is stored as fixed point with 4 bits after the
2691 		 * binary point (scaled by 16).  The following is
2692 		 * equivalent to rfc793 smoothing with an alpha of .75
2693 		 * (rttvar = rttvar*3/4 + |delta| / 4).  This replaces
2694 		 * rfc793's wired-in beta.
2695 		 */
2696 		if (delta < 0)
2697 			delta = -delta;
2698 		delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
2699 		if ((tp->t_rttvar += delta) <= 0)
2700 			tp->t_rttvar = 1 << TCP_RTT_BASE_SHIFT;
2701 	} else {
2702 		/*
2703 		 * No rtt measurement yet - use the unsmoothed rtt.
2704 		 * Set the variance to half the rtt (so our first
2705 		 * retransmit happens at 3*rtt).
2706 		 */
2707 		tp->t_srtt = (rtt + 1) << (TCP_RTT_SHIFT + TCP_RTT_BASE_SHIFT);
2708 		tp->t_rttvar = (rtt + 1) <<
2709 		    (TCP_RTTVAR_SHIFT + TCP_RTT_BASE_SHIFT - 1);
2710 	}
2711 	tp->t_rtttime = 0;
2712 	tp->t_rxtshift = 0;
2713 
2714 	/*
2715 	 * the retransmit should happen at rtt + 4 * rttvar.
2716 	 * Because of the way we do the smoothing, srtt and rttvar
2717 	 * will each average +1/2 tick of bias.  When we compute
2718 	 * the retransmit timer, we want 1/2 tick of rounding and
2719 	 * 1 extra tick because of +-1/2 tick uncertainty in the
2720 	 * firing of the timer.  The bias will give us exactly the
2721 	 * 1.5 tick we need.  But, because the bias is
2722 	 * statistical, we have to test that we don't drop below
2723 	 * the minimum feasible timer (which is 2 ticks).
2724 	 */
2725 	rttmin = min(max(rtt + 2, tp->t_rttmin), TCPTV_REXMTMAX);
2726 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), rttmin, TCPTV_REXMTMAX);
2727 
2728 	/*
2729 	 * We received an ack for a packet that wasn't retransmitted;
2730 	 * it is probably safe to discard any error indications we've
2731 	 * received recently.  This isn't quite right, but close enough
2732 	 * for now (a route might have failed after we sent a segment,
2733 	 * and the return path might not be symmetrical).
2734 	 */
2735 	tp->t_softerror = 0;
2736 }
2737 
2738 /*
2739  * Determine a reasonable value for maxseg size.
2740  * If the route is known, check route for mtu.
2741  * If none, use an mss that can be handled on the outgoing
2742  * interface without forcing IP to fragment; if bigger than
2743  * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2744  * to utilize large mbufs.  If no route is found, route has no mtu,
2745  * or the destination isn't local, use a default, hopefully conservative
2746  * size (usually 512 or the default IP max size, but no more than the mtu
2747  * of the interface), as we can't discover anything about intervening
2748  * gateways or networks.  We also initialize the congestion/slow start
2749  * window to be a single segment if the destination isn't local.
2750  * While looking at the routing entry, we also initialize other path-dependent
2751  * parameters from pre-set or cached values in the routing entry.
2752  *
2753  * Also take into account the space needed for options that we
2754  * send regularly.  Make maxseg shorter by that amount to assure
2755  * that we can send maxseg amount of data even when the options
2756  * are present.  Store the upper limit of the length of options plus
2757  * data in maxopd.
2758  *
2759  * NOTE: offer == -1 indicates that the maxseg size changed due to
2760  * Path MTU discovery.
2761  */
2762 int
2763 tcp_mss(struct tcpcb *tp, int offer)
2764 {
2765 	struct rtentry *rt;
2766 	struct ifnet *ifp = NULL;
2767 	int mss, mssopt;
2768 	int iphlen;
2769 	struct inpcb *inp;
2770 
2771 	inp = tp->t_inpcb;
2772 
2773 	mssopt = mss = tcp_mssdflt;
2774 
2775 	rt = in_pcbrtentry(inp);
2776 
2777 	if (rt == NULL)
2778 		goto out;
2779 
2780 	ifp = if_get(rt->rt_ifidx);
2781 	if (ifp == NULL)
2782 		goto out;
2783 
2784 	switch (tp->pf) {
2785 #ifdef INET6
2786 	case AF_INET6:
2787 		iphlen = sizeof(struct ip6_hdr);
2788 		break;
2789 #endif
2790 	case AF_INET:
2791 		iphlen = sizeof(struct ip);
2792 		break;
2793 	default:
2794 		/* the family does not support path MTU discovery */
2795 		goto out;
2796 	}
2797 
2798 	/*
2799 	 * if there's an mtu associated with the route and we support
2800 	 * path MTU discovery for the underlying protocol family, use it.
2801 	 */
2802 	if (rt->rt_mtu) {
2803 		/*
2804 		 * One may wish to lower MSS to take into account options,
2805 		 * especially security-related options.
2806 		 */
2807 		if (tp->pf == AF_INET6 && rt->rt_mtu < IPV6_MMTU) {
2808 			/*
2809 			 * RFC2460 section 5, last paragraph: if path MTU is
2810 			 * smaller than 1280, use 1280 as packet size and
2811 			 * attach fragment header.
2812 			 */
2813 			mss = IPV6_MMTU - iphlen - sizeof(struct ip6_frag) -
2814 			    sizeof(struct tcphdr);
2815 		} else {
2816 			mss = rt->rt_mtu - iphlen -
2817 			    sizeof(struct tcphdr);
2818 		}
2819 	} else if (ifp->if_flags & IFF_LOOPBACK) {
2820 		mss = ifp->if_mtu - iphlen - sizeof(struct tcphdr);
2821 	} else if (tp->pf == AF_INET) {
2822 		if (ip_mtudisc)
2823 			mss = ifp->if_mtu - iphlen - sizeof(struct tcphdr);
2824 	}
2825 #ifdef INET6
2826 	else if (tp->pf == AF_INET6) {
2827 		/*
2828 		 * for IPv6, path MTU discovery is always turned on,
2829 		 * or the node must use packet size <= 1280.
2830 		 */
2831 		mss = ifp->if_mtu - iphlen - sizeof(struct tcphdr);
2832 	}
2833 #endif /* INET6 */
2834 
2835 	/* Calculate the value that we offer in TCPOPT_MAXSEG */
2836 	if (offer != -1) {
2837 		mssopt = ifp->if_mtu - iphlen - sizeof(struct tcphdr);
2838 		mssopt = max(tcp_mssdflt, mssopt);
2839 	}
2840  out:
2841 	if_put(ifp);
2842 	/*
2843 	 * The current mss, t_maxseg, is initialized to the default value.
2844 	 * If we compute a smaller value, reduce the current mss.
2845 	 * If we compute a larger value, return it for use in sending
2846 	 * a max seg size option, but don't store it for use
2847 	 * unless we received an offer at least that large from peer.
2848 	 *
2849 	 * However, do not accept offers lower than the minimum of
2850 	 * the interface MTU and 216.
2851 	 */
2852 	if (offer > 0)
2853 		tp->t_peermss = offer;
2854 	if (tp->t_peermss)
2855 		mss = min(mss, max(tp->t_peermss, 216));
2856 
2857 	/* sanity - at least max opt. space */
2858 	mss = max(mss, 64);
2859 
2860 	/*
2861 	 * maxopd stores the maximum length of data AND options
2862 	 * in a segment; maxseg is the amount of data in a normal
2863 	 * segment.  We need to store this value (maxopd) apart
2864 	 * from maxseg, because now every segment carries options
2865 	 * and thus we normally have somewhat less data in segments.
2866 	 */
2867 	tp->t_maxopd = mss;
2868 
2869 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2870 	    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
2871 		mss -= TCPOLEN_TSTAMP_APPA;
2872 #ifdef TCP_SIGNATURE
2873 	if (tp->t_flags & TF_SIGNATURE)
2874 		mss -= TCPOLEN_SIGLEN;
2875 #endif
2876 
2877 	if (offer == -1) {
2878 		/* mss changed due to Path MTU discovery */
2879 		tp->t_flags &= ~TF_PMTUD_PEND;
2880 		tp->t_pmtud_mtu_sent = 0;
2881 		tp->t_pmtud_mss_acked = 0;
2882 		if (mss < tp->t_maxseg) {
2883 			/*
2884 			 * Follow suggestion in RFC 2414 to reduce the
2885 			 * congestion window by the ratio of the old
2886 			 * segment size to the new segment size.
2887 			 */
2888 			tp->snd_cwnd = ulmax((tp->snd_cwnd / tp->t_maxseg) *
2889 					     mss, mss);
2890 		}
2891 	} else if (tcp_do_rfc3390 == 2) {
2892 		/* increase initial window  */
2893 		tp->snd_cwnd = ulmin(10 * mss, ulmax(2 * mss, 14600));
2894 	} else if (tcp_do_rfc3390) {
2895 		/* increase initial window  */
2896 		tp->snd_cwnd = ulmin(4 * mss, ulmax(2 * mss, 4380));
2897 	} else
2898 		tp->snd_cwnd = mss;
2899 
2900 	tp->t_maxseg = mss;
2901 
2902 	return (offer != -1 ? mssopt : mss);
2903 }
2904 
2905 u_int
2906 tcp_hdrsz(struct tcpcb *tp)
2907 {
2908 	u_int hlen;
2909 
2910 	switch (tp->pf) {
2911 #ifdef INET6
2912 	case AF_INET6:
2913 		hlen = sizeof(struct ip6_hdr);
2914 		break;
2915 #endif
2916 	case AF_INET:
2917 		hlen = sizeof(struct ip);
2918 		break;
2919 	default:
2920 		hlen = 0;
2921 		break;
2922 	}
2923 	hlen += sizeof(struct tcphdr);
2924 
2925 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2926 	    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
2927 		hlen += TCPOLEN_TSTAMP_APPA;
2928 #ifdef TCP_SIGNATURE
2929 	if (tp->t_flags & TF_SIGNATURE)
2930 		hlen += TCPOLEN_SIGLEN;
2931 #endif
2932 	return (hlen);
2933 }
2934 
2935 /*
2936  * Set connection variables based on the effective MSS.
2937  * We are passed the TCPCB for the actual connection.  If we
2938  * are the server, we are called by the compressed state engine
2939  * when the 3-way handshake is complete.  If we are the client,
2940  * we are called when we receive the SYN,ACK from the server.
2941  *
2942  * NOTE: The t_maxseg value must be initialized in the TCPCB
2943  * before this routine is called!
2944  */
2945 void
2946 tcp_mss_update(struct tcpcb *tp)
2947 {
2948 	int mss;
2949 	u_long bufsize;
2950 	struct rtentry *rt;
2951 	struct socket *so;
2952 
2953 	so = tp->t_inpcb->inp_socket;
2954 	mss = tp->t_maxseg;
2955 
2956 	rt = in_pcbrtentry(tp->t_inpcb);
2957 
2958 	if (rt == NULL)
2959 		return;
2960 
2961 	bufsize = so->so_snd.sb_hiwat;
2962 	if (bufsize < mss) {
2963 		mss = bufsize;
2964 		/* Update t_maxseg and t_maxopd */
2965 		tcp_mss(tp, mss);
2966 	} else {
2967 		bufsize = roundup(bufsize, mss);
2968 		if (bufsize > sb_max)
2969 			bufsize = sb_max;
2970 		(void)sbreserve(so, &so->so_snd, bufsize);
2971 	}
2972 
2973 	bufsize = so->so_rcv.sb_hiwat;
2974 	if (bufsize > mss) {
2975 		bufsize = roundup(bufsize, mss);
2976 		if (bufsize > sb_max)
2977 			bufsize = sb_max;
2978 		(void)sbreserve(so, &so->so_rcv, bufsize);
2979 	}
2980 
2981 }
2982 
2983 /*
2984  * When a partial ack arrives, force the retransmission of the
2985  * next unacknowledged segment.  Do not clear tp->t_dupacks.
2986  * By setting snd_nxt to ti_ack, this forces retransmission timer
2987  * to be started again.
2988  */
2989 void
2990 tcp_newreno_partialack(struct tcpcb *tp, struct tcphdr *th)
2991 {
2992 	/*
2993 	 * snd_una has not been updated and the socket send buffer
2994 	 * not yet drained of the acked data, so we have to leave
2995 	 * snd_una as it was to get the correct data offset in
2996 	 * tcp_output().
2997 	 */
2998 	tcp_seq onxt = tp->snd_nxt;
2999 	u_long  ocwnd = tp->snd_cwnd;
3000 
3001 	TCP_TIMER_DISARM(tp, TCPT_REXMT);
3002 	tp->t_rtttime = 0;
3003 	tp->snd_nxt = th->th_ack;
3004 	/*
3005 	 * Set snd_cwnd to one segment beyond acknowledged offset
3006 	 * (tp->snd_una not yet updated when this function is called)
3007 	 */
3008 	tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3009 	(void)tcp_output(tp);
3010 	tp->snd_cwnd = ocwnd;
3011 	if (SEQ_GT(onxt, tp->snd_nxt))
3012 		tp->snd_nxt = onxt;
3013 	/*
3014 	 * Partial window deflation.  Relies on fact that tp->snd_una
3015 	 * not updated yet.
3016 	 */
3017 	if (tp->snd_cwnd > th->th_ack - tp->snd_una)
3018 		tp->snd_cwnd -= th->th_ack - tp->snd_una;
3019 	else
3020 		tp->snd_cwnd = 0;
3021 	tp->snd_cwnd += tp->t_maxseg;
3022 }
3023 
3024 int
3025 tcp_mss_adv(struct mbuf *m, int af)
3026 {
3027 	int mss = 0;
3028 	int iphlen;
3029 	struct ifnet *ifp = NULL;
3030 
3031 	if (m && (m->m_flags & M_PKTHDR))
3032 		ifp = if_get(m->m_pkthdr.ph_ifidx);
3033 
3034 	switch (af) {
3035 	case AF_INET:
3036 		if (ifp != NULL)
3037 			mss = ifp->if_mtu;
3038 		iphlen = sizeof(struct ip);
3039 		break;
3040 #ifdef INET6
3041 	case AF_INET6:
3042 		if (ifp != NULL)
3043 			mss = ifp->if_mtu;
3044 		iphlen = sizeof(struct ip6_hdr);
3045 		break;
3046 #endif
3047 	default:
3048 		unhandled_af(af);
3049 	}
3050 	if_put(ifp);
3051 	mss = mss - iphlen - sizeof(struct tcphdr);
3052 	return (max(mss, tcp_mssdflt));
3053 }
3054 
3055 /*
3056  * TCP compressed state engine.  Currently used to hold compressed
3057  * state for SYN_RECEIVED.
3058  */
3059 
3060 /* syn hash parameters */
3061 int	tcp_syn_hash_size = TCP_SYN_HASH_SIZE;
3062 int	tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
3063 int	tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
3064 int	tcp_syn_use_limit = 100000;
3065 
3066 struct syn_cache_set tcp_syn_cache[2];
3067 int tcp_syn_cache_active;
3068 
3069 #define SYN_HASH(sa, sp, dp, rand) \
3070 	(((sa)->s_addr ^ (rand)[0]) *				\
3071 	(((((u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
3072 #ifndef INET6
3073 #define	SYN_HASHALL(hash, src, dst, rand) \
3074 do {									\
3075 	hash = SYN_HASH(&satosin(src)->sin_addr,			\
3076 		satosin(src)->sin_port,					\
3077 		satosin(dst)->sin_port, (rand));			\
3078 } while (/*CONSTCOND*/ 0)
3079 #else
3080 #define SYN_HASH6(sa, sp, dp, rand) \
3081 	(((sa)->s6_addr32[0] ^ (rand)[0]) *			\
3082 	((sa)->s6_addr32[1] ^ (rand)[1]) *			\
3083 	((sa)->s6_addr32[2] ^ (rand)[2]) *			\
3084 	((sa)->s6_addr32[3] ^ (rand)[3]) *			\
3085 	(((((u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
3086 
3087 #define SYN_HASHALL(hash, src, dst, rand) \
3088 do {									\
3089 	switch ((src)->sa_family) {					\
3090 	case AF_INET:							\
3091 		hash = SYN_HASH(&satosin(src)->sin_addr,		\
3092 			satosin(src)->sin_port,				\
3093 			satosin(dst)->sin_port, (rand));		\
3094 		break;							\
3095 	case AF_INET6:							\
3096 		hash = SYN_HASH6(&satosin6(src)->sin6_addr,		\
3097 			satosin6(src)->sin6_port,			\
3098 			satosin6(dst)->sin6_port, (rand));		\
3099 		break;							\
3100 	default:							\
3101 		hash = 0;						\
3102 	}								\
3103 } while (/*CONSTCOND*/0)
3104 #endif /* INET6 */
3105 
3106 void
3107 syn_cache_rm(struct syn_cache *sc)
3108 {
3109 	sc->sc_flags |= SCF_DEAD;
3110 	TAILQ_REMOVE(&sc->sc_buckethead->sch_bucket, sc, sc_bucketq);
3111 	sc->sc_tp = NULL;
3112 	LIST_REMOVE(sc, sc_tpq);
3113 	sc->sc_buckethead->sch_length--;
3114 	timeout_del(&sc->sc_timer);
3115 	sc->sc_set->scs_count--;
3116 }
3117 
3118 void
3119 syn_cache_put(struct syn_cache *sc)
3120 {
3121 	m_free(sc->sc_ipopts);
3122 	if (sc->sc_route4.ro_rt != NULL) {
3123 		rtfree(sc->sc_route4.ro_rt);
3124 		sc->sc_route4.ro_rt = NULL;
3125 	}
3126 	timeout_set(&sc->sc_timer, syn_cache_reaper, sc);
3127 	timeout_add(&sc->sc_timer, 0);
3128 }
3129 
3130 struct pool syn_cache_pool;
3131 
3132 /*
3133  * We don't estimate RTT with SYNs, so each packet starts with the default
3134  * RTT and each timer step has a fixed timeout value.
3135  */
3136 #define	SYN_CACHE_TIMER_ARM(sc)						\
3137 do {									\
3138 	TCPT_RANGESET((sc)->sc_rxtcur,					\
3139 	    TCPTV_SRTTDFLT * tcp_backoff[(sc)->sc_rxtshift], TCPTV_MIN,	\
3140 	    TCPTV_REXMTMAX);						\
3141 	if (!timeout_initialized(&(sc)->sc_timer))			\
3142 		timeout_set_proc(&(sc)->sc_timer, syn_cache_timer, (sc)); \
3143 	timeout_add(&(sc)->sc_timer, (sc)->sc_rxtcur * (hz / PR_SLOWHZ)); \
3144 } while (/*CONSTCOND*/0)
3145 
3146 #define	SYN_CACHE_TIMESTAMP(sc)	tcp_now + (sc)->sc_modulate
3147 
3148 void
3149 syn_cache_init(void)
3150 {
3151 	int i;
3152 
3153 	/* Initialize the hash buckets. */
3154 	tcp_syn_cache[0].scs_buckethead = mallocarray(tcp_syn_hash_size,
3155 	    sizeof(struct syn_cache_head), M_SYNCACHE, M_WAITOK|M_ZERO);
3156 	tcp_syn_cache[1].scs_buckethead = mallocarray(tcp_syn_hash_size,
3157 	    sizeof(struct syn_cache_head), M_SYNCACHE, M_WAITOK|M_ZERO);
3158 	tcp_syn_cache[0].scs_size = tcp_syn_hash_size;
3159 	tcp_syn_cache[1].scs_size = tcp_syn_hash_size;
3160 	for (i = 0; i < tcp_syn_hash_size; i++) {
3161 		TAILQ_INIT(&tcp_syn_cache[0].scs_buckethead[i].sch_bucket);
3162 		TAILQ_INIT(&tcp_syn_cache[1].scs_buckethead[i].sch_bucket);
3163 	}
3164 
3165 	/* Initialize the syn cache pool. */
3166 	pool_init(&syn_cache_pool, sizeof(struct syn_cache), 0, IPL_SOFTNET,
3167 	    0, "syncache", NULL);
3168 }
3169 
3170 void
3171 syn_cache_insert(struct syn_cache *sc, struct tcpcb *tp)
3172 {
3173 	struct syn_cache_set *set = &tcp_syn_cache[tcp_syn_cache_active];
3174 	struct syn_cache_head *scp;
3175 	struct syn_cache *sc2;
3176 	int i;
3177 
3178 	NET_ASSERT_LOCKED();
3179 
3180 	/*
3181 	 * If there are no entries in the hash table, reinitialize
3182 	 * the hash secrets.  To avoid useless cache swaps and
3183 	 * reinitialization, use it until the limit is reached.
3184 	 * An emtpy cache is also the oportunity to resize the hash.
3185 	 */
3186 	if (set->scs_count == 0 && set->scs_use <= 0) {
3187 		set->scs_use = tcp_syn_use_limit;
3188 		if (set->scs_size != tcp_syn_hash_size) {
3189 			scp = mallocarray(tcp_syn_hash_size, sizeof(struct
3190 			    syn_cache_head), M_SYNCACHE, M_NOWAIT|M_ZERO);
3191 			if (scp == NULL) {
3192 				/* Try again next time. */
3193 				set->scs_use = 0;
3194 			} else {
3195 				free(set->scs_buckethead, M_SYNCACHE,
3196 				    set->scs_size *
3197 				    sizeof(struct syn_cache_head));
3198 				set->scs_buckethead = scp;
3199 				set->scs_size = tcp_syn_hash_size;
3200 				for (i = 0; i < tcp_syn_hash_size; i++)
3201 					TAILQ_INIT(&scp[i].sch_bucket);
3202 			}
3203 		}
3204 		arc4random_buf(set->scs_random, sizeof(set->scs_random));
3205 		tcpstat_inc(tcps_sc_seedrandom);
3206 	}
3207 
3208 	SYN_HASHALL(sc->sc_hash, &sc->sc_src.sa, &sc->sc_dst.sa,
3209 	    set->scs_random);
3210 	scp = &set->scs_buckethead[sc->sc_hash % set->scs_size];
3211 	sc->sc_buckethead = scp;
3212 
3213 	/*
3214 	 * Make sure that we don't overflow the per-bucket
3215 	 * limit or the total cache size limit.
3216 	 */
3217 	if (scp->sch_length >= tcp_syn_bucket_limit) {
3218 		tcpstat_inc(tcps_sc_bucketoverflow);
3219 		/*
3220 		 * Someone might attack our bucket hash function.  Reseed
3221 		 * with random as soon as the passive syn cache gets empty.
3222 		 */
3223 		set->scs_use = 0;
3224 		/*
3225 		 * The bucket is full.  Toss the oldest element in the
3226 		 * bucket.  This will be the first entry in the bucket.
3227 		 */
3228 		sc2 = TAILQ_FIRST(&scp->sch_bucket);
3229 #ifdef DIAGNOSTIC
3230 		/*
3231 		 * This should never happen; we should always find an
3232 		 * entry in our bucket.
3233 		 */
3234 		if (sc2 == NULL)
3235 			panic("%s: bucketoverflow: impossible", __func__);
3236 #endif
3237 		syn_cache_rm(sc2);
3238 		syn_cache_put(sc2);
3239 	} else if (set->scs_count >= tcp_syn_cache_limit) {
3240 		struct syn_cache_head *scp2, *sce;
3241 
3242 		tcpstat_inc(tcps_sc_overflowed);
3243 		/*
3244 		 * The cache is full.  Toss the oldest entry in the
3245 		 * first non-empty bucket we can find.
3246 		 *
3247 		 * XXX We would really like to toss the oldest
3248 		 * entry in the cache, but we hope that this
3249 		 * condition doesn't happen very often.
3250 		 */
3251 		scp2 = scp;
3252 		if (TAILQ_EMPTY(&scp2->sch_bucket)) {
3253 			sce = &set->scs_buckethead[set->scs_size];
3254 			for (++scp2; scp2 != scp; scp2++) {
3255 				if (scp2 >= sce)
3256 					scp2 = &set->scs_buckethead[0];
3257 				if (! TAILQ_EMPTY(&scp2->sch_bucket))
3258 					break;
3259 			}
3260 #ifdef DIAGNOSTIC
3261 			/*
3262 			 * This should never happen; we should always find a
3263 			 * non-empty bucket.
3264 			 */
3265 			if (scp2 == scp)
3266 				panic("%s: cacheoverflow: impossible",
3267 				    __func__);
3268 #endif
3269 		}
3270 		sc2 = TAILQ_FIRST(&scp2->sch_bucket);
3271 		syn_cache_rm(sc2);
3272 		syn_cache_put(sc2);
3273 	}
3274 
3275 	/*
3276 	 * Initialize the entry's timer.
3277 	 */
3278 	sc->sc_rxttot = 0;
3279 	sc->sc_rxtshift = 0;
3280 	SYN_CACHE_TIMER_ARM(sc);
3281 
3282 	/* Link it from tcpcb entry */
3283 	LIST_INSERT_HEAD(&tp->t_sc, sc, sc_tpq);
3284 
3285 	/* Put it into the bucket. */
3286 	TAILQ_INSERT_TAIL(&scp->sch_bucket, sc, sc_bucketq);
3287 	scp->sch_length++;
3288 	sc->sc_set = set;
3289 	set->scs_count++;
3290 	set->scs_use--;
3291 
3292 	tcpstat_inc(tcps_sc_added);
3293 
3294 	/*
3295 	 * If the active cache has exceeded its use limit and
3296 	 * the passive syn cache is empty, exchange their roles.
3297 	 */
3298 	if (set->scs_use <= 0 &&
3299 	    tcp_syn_cache[!tcp_syn_cache_active].scs_count == 0)
3300 		tcp_syn_cache_active = !tcp_syn_cache_active;
3301 }
3302 
3303 /*
3304  * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
3305  * If we have retransmitted an entry the maximum number of times, expire
3306  * that entry.
3307  */
3308 void
3309 syn_cache_timer(void *arg)
3310 {
3311 	struct syn_cache *sc = arg;
3312 
3313 	NET_LOCK();
3314 	if (sc->sc_flags & SCF_DEAD)
3315 		goto out;
3316 
3317 	if (__predict_false(sc->sc_rxtshift == TCP_MAXRXTSHIFT)) {
3318 		/* Drop it -- too many retransmissions. */
3319 		goto dropit;
3320 	}
3321 
3322 	/*
3323 	 * Compute the total amount of time this entry has
3324 	 * been on a queue.  If this entry has been on longer
3325 	 * than the keep alive timer would allow, expire it.
3326 	 */
3327 	sc->sc_rxttot += sc->sc_rxtcur;
3328 	if (sc->sc_rxttot >= tcptv_keep_init)
3329 		goto dropit;
3330 
3331 	tcpstat_inc(tcps_sc_retransmitted);
3332 	(void) syn_cache_respond(sc, NULL);
3333 
3334 	/* Advance the timer back-off. */
3335 	sc->sc_rxtshift++;
3336 	SYN_CACHE_TIMER_ARM(sc);
3337 
3338  out:
3339 	NET_UNLOCK();
3340 	return;
3341 
3342  dropit:
3343 	tcpstat_inc(tcps_sc_timed_out);
3344 	syn_cache_rm(sc);
3345 	syn_cache_put(sc);
3346 	NET_UNLOCK();
3347 }
3348 
3349 void
3350 syn_cache_reaper(void *arg)
3351 {
3352 	struct syn_cache *sc = arg;
3353 
3354 	pool_put(&syn_cache_pool, (sc));
3355 	return;
3356 }
3357 
3358 /*
3359  * Remove syn cache created by the specified tcb entry,
3360  * because this does not make sense to keep them
3361  * (if there's no tcb entry, syn cache entry will never be used)
3362  */
3363 void
3364 syn_cache_cleanup(struct tcpcb *tp)
3365 {
3366 	struct syn_cache *sc, *nsc;
3367 
3368 	NET_ASSERT_LOCKED();
3369 
3370 	LIST_FOREACH_SAFE(sc, &tp->t_sc, sc_tpq, nsc) {
3371 #ifdef DIAGNOSTIC
3372 		if (sc->sc_tp != tp)
3373 			panic("invalid sc_tp in syn_cache_cleanup");
3374 #endif
3375 		syn_cache_rm(sc);
3376 		syn_cache_put(sc);
3377 	}
3378 	/* just for safety */
3379 	LIST_INIT(&tp->t_sc);
3380 }
3381 
3382 /*
3383  * Find an entry in the syn cache.
3384  */
3385 struct syn_cache *
3386 syn_cache_lookup(struct sockaddr *src, struct sockaddr *dst,
3387     struct syn_cache_head **headp, u_int rtableid)
3388 {
3389 	struct syn_cache_set *sets[2];
3390 	struct syn_cache *sc;
3391 	struct syn_cache_head *scp;
3392 	u_int32_t hash;
3393 	int i;
3394 
3395 	NET_ASSERT_LOCKED();
3396 
3397 	/* Check the active cache first, the passive cache is likely emtpy. */
3398 	sets[0] = &tcp_syn_cache[tcp_syn_cache_active];
3399 	sets[1] = &tcp_syn_cache[!tcp_syn_cache_active];
3400 	for (i = 0; i < 2; i++) {
3401 		if (sets[i]->scs_count == 0)
3402 			continue;
3403 		SYN_HASHALL(hash, src, dst, sets[i]->scs_random);
3404 		scp = &sets[i]->scs_buckethead[hash % sets[i]->scs_size];
3405 		*headp = scp;
3406 		TAILQ_FOREACH(sc, &scp->sch_bucket, sc_bucketq) {
3407 			if (sc->sc_hash != hash)
3408 				continue;
3409 			if (!bcmp(&sc->sc_src, src, src->sa_len) &&
3410 			    !bcmp(&sc->sc_dst, dst, dst->sa_len) &&
3411 			    rtable_l2(rtableid) == rtable_l2(sc->sc_rtableid))
3412 				return (sc);
3413 		}
3414 	}
3415 	return (NULL);
3416 }
3417 
3418 /*
3419  * This function gets called when we receive an ACK for a
3420  * socket in the LISTEN state.  We look up the connection
3421  * in the syn cache, and if its there, we pull it out of
3422  * the cache and turn it into a full-blown connection in
3423  * the SYN-RECEIVED state.
3424  *
3425  * The return values may not be immediately obvious, and their effects
3426  * can be subtle, so here they are:
3427  *
3428  *	NULL	SYN was not found in cache; caller should drop the
3429  *		packet and send an RST.
3430  *
3431  *	-1	We were unable to create the new connection, and are
3432  *		aborting it.  An ACK,RST is being sent to the peer
3433  *		(unless we got screwey sequence numbners; see below),
3434  *		because the 3-way handshake has been completed.  Caller
3435  *		should not free the mbuf, since we may be using it.  If
3436  *		we are not, we will free it.
3437  *
3438  *	Otherwise, the return value is a pointer to the new socket
3439  *	associated with the connection.
3440  */
3441 struct socket *
3442 syn_cache_get(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
3443     u_int hlen, u_int tlen, struct socket *so, struct mbuf *m)
3444 {
3445 	struct syn_cache *sc;
3446 	struct syn_cache_head *scp;
3447 	struct inpcb *inp, *oldinp;
3448 	struct tcpcb *tp = NULL;
3449 	struct mbuf *am;
3450 	struct socket *oso;
3451 
3452 	NET_ASSERT_LOCKED();
3453 
3454 	sc = syn_cache_lookup(src, dst, &scp, sotoinpcb(so)->inp_rtableid);
3455 	if (sc == NULL)
3456 		return (NULL);
3457 
3458 	/*
3459 	 * Verify the sequence and ack numbers.  Try getting the correct
3460 	 * response again.
3461 	 */
3462 	if ((th->th_ack != sc->sc_iss + 1) ||
3463 	    SEQ_LEQ(th->th_seq, sc->sc_irs) ||
3464 	    SEQ_GT(th->th_seq, sc->sc_irs + 1 + sc->sc_win)) {
3465 		(void) syn_cache_respond(sc, m);
3466 		return ((struct socket *)(-1));
3467 	}
3468 
3469 	/* Remove this cache entry */
3470 	syn_cache_rm(sc);
3471 
3472 	/*
3473 	 * Ok, create the full blown connection, and set things up
3474 	 * as they would have been set up if we had created the
3475 	 * connection when the SYN arrived.  If we can't create
3476 	 * the connection, abort it.
3477 	 */
3478 	oso = so;
3479 	so = sonewconn(so, SS_ISCONNECTED);
3480 	if (so == NULL)
3481 		goto resetandabort;
3482 
3483 	oldinp = sotoinpcb(oso);
3484 	inp = sotoinpcb(so);
3485 
3486 #ifdef IPSEC
3487 	/*
3488 	 * We need to copy the required security levels
3489 	 * from the old pcb. Ditto for any other
3490 	 * IPsec-related information.
3491 	 */
3492 	memcpy(inp->inp_seclevel, oldinp->inp_seclevel,
3493 	    sizeof(oldinp->inp_seclevel));
3494 #endif /* IPSEC */
3495 #ifdef INET6
3496 	/*
3497 	 * inp still has the OLD in_pcb stuff, set the
3498 	 * v6-related flags on the new guy, too.
3499 	 */
3500 	inp->inp_flags |= (oldinp->inp_flags & INP_IPV6);
3501 	if (inp->inp_flags & INP_IPV6) {
3502 		inp->inp_ipv6.ip6_hlim = oldinp->inp_ipv6.ip6_hlim;
3503 		inp->inp_hops = oldinp->inp_hops;
3504 	} else
3505 #endif /* INET6 */
3506 	{
3507 		inp->inp_ip.ip_ttl = oldinp->inp_ip.ip_ttl;
3508 	}
3509 
3510 #if NPF > 0
3511 	if (m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) {
3512 		struct pf_divert *divert;
3513 
3514 		divert = pf_find_divert(m);
3515 		KASSERT(divert != NULL);
3516 		inp->inp_rtableid = divert->rdomain;
3517 	} else
3518 #endif
3519 	/* inherit rtable from listening socket */
3520 	inp->inp_rtableid = sc->sc_rtableid;
3521 
3522 	inp->inp_lport = th->th_dport;
3523 	switch (src->sa_family) {
3524 #ifdef INET6
3525 	case AF_INET6:
3526 		inp->inp_laddr6 = satosin6(dst)->sin6_addr;
3527 		break;
3528 #endif /* INET6 */
3529 	case AF_INET:
3530 		inp->inp_laddr = satosin(dst)->sin_addr;
3531 		inp->inp_options = ip_srcroute(m);
3532 		if (inp->inp_options == NULL) {
3533 			inp->inp_options = sc->sc_ipopts;
3534 			sc->sc_ipopts = NULL;
3535 		}
3536 		break;
3537 	}
3538 	in_pcbrehash(inp);
3539 
3540 	/*
3541 	 * Give the new socket our cached route reference.
3542 	 */
3543 	if (src->sa_family == AF_INET)
3544 		inp->inp_route = sc->sc_route4;         /* struct assignment */
3545 #ifdef INET6
3546 	else
3547 		inp->inp_route6 = sc->sc_route6;
3548 #endif
3549 	sc->sc_route4.ro_rt = NULL;
3550 
3551 	am = m_get(M_DONTWAIT, MT_SONAME);	/* XXX */
3552 	if (am == NULL)
3553 		goto resetandabort;
3554 	am->m_len = src->sa_len;
3555 	memcpy(mtod(am, caddr_t), src, src->sa_len);
3556 	if (in_pcbconnect(inp, am)) {
3557 		(void) m_free(am);
3558 		goto resetandabort;
3559 	}
3560 	(void) m_free(am);
3561 
3562 	tp = intotcpcb(inp);
3563 	tp->t_flags = sototcpcb(oso)->t_flags & (TF_NOPUSH|TF_NODELAY);
3564 	if (sc->sc_request_r_scale != 15) {
3565 		tp->requested_s_scale = sc->sc_requested_s_scale;
3566 		tp->request_r_scale = sc->sc_request_r_scale;
3567 		tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
3568 	}
3569 	if (sc->sc_flags & SCF_TIMESTAMP)
3570 		tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
3571 
3572 	tp->t_template = tcp_template(tp);
3573 	if (tp->t_template == 0) {
3574 		tp = tcp_drop(tp, ENOBUFS);	/* destroys socket */
3575 		so = NULL;
3576 		goto abort;
3577 	}
3578 	tp->sack_enable = sc->sc_flags & SCF_SACK_PERMIT;
3579 	tp->ts_modulate = sc->sc_modulate;
3580 	tp->ts_recent = sc->sc_timestamp;
3581 	tp->iss = sc->sc_iss;
3582 	tp->irs = sc->sc_irs;
3583 	tcp_sendseqinit(tp);
3584 	tp->snd_last = tp->snd_una;
3585 #ifdef TCP_ECN
3586 	if (sc->sc_flags & SCF_ECN_PERMIT) {
3587 		tp->t_flags |= TF_ECN_PERMIT;
3588 		tcpstat_inc(tcps_ecn_accepts);
3589 	}
3590 #endif
3591 	if (sc->sc_flags & SCF_SACK_PERMIT)
3592 		tp->t_flags |= TF_SACK_PERMIT;
3593 #ifdef TCP_SIGNATURE
3594 	if (sc->sc_flags & SCF_SIGNATURE)
3595 		tp->t_flags |= TF_SIGNATURE;
3596 #endif
3597 	tcp_rcvseqinit(tp);
3598 	tp->t_state = TCPS_SYN_RECEIVED;
3599 	tp->t_rcvtime = tcp_now;
3600 	TCP_TIMER_ARM(tp, TCPT_KEEP, tcptv_keep_init);
3601 	tcpstat_inc(tcps_accepts);
3602 
3603 	tcp_mss(tp, sc->sc_peermaxseg);	 /* sets t_maxseg */
3604 	if (sc->sc_peermaxseg)
3605 		tcp_mss_update(tp);
3606 	/* Reset initial window to 1 segment for retransmit */
3607 	if (sc->sc_rxtshift > 0)
3608 		tp->snd_cwnd = tp->t_maxseg;
3609 	tp->snd_wl1 = sc->sc_irs;
3610 	tp->rcv_up = sc->sc_irs + 1;
3611 
3612 	/*
3613 	 * This is what whould have happened in tcp_output() when
3614 	 * the SYN,ACK was sent.
3615 	 */
3616 	tp->snd_up = tp->snd_una;
3617 	tp->snd_max = tp->snd_nxt = tp->iss+1;
3618 	TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
3619 	if (sc->sc_win > 0 && SEQ_GT(tp->rcv_nxt + sc->sc_win, tp->rcv_adv))
3620 		tp->rcv_adv = tp->rcv_nxt + sc->sc_win;
3621 	tp->last_ack_sent = tp->rcv_nxt;
3622 
3623 	tcpstat_inc(tcps_sc_completed);
3624 	syn_cache_put(sc);
3625 	return (so);
3626 
3627 resetandabort:
3628 	tcp_respond(NULL, mtod(m, caddr_t), th, (tcp_seq)0, th->th_ack, TH_RST,
3629 	    m->m_pkthdr.ph_rtableid);
3630 abort:
3631 	m_freem(m);
3632 	if (so != NULL)
3633 		(void) soabort(so);
3634 	syn_cache_put(sc);
3635 	tcpstat_inc(tcps_sc_aborted);
3636 	return ((struct socket *)(-1));
3637 }
3638 
3639 /*
3640  * This function is called when we get a RST for a
3641  * non-existent connection, so that we can see if the
3642  * connection is in the syn cache.  If it is, zap it.
3643  */
3644 
3645 void
3646 syn_cache_reset(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
3647     u_int rtableid)
3648 {
3649 	struct syn_cache *sc;
3650 	struct syn_cache_head *scp;
3651 
3652 	NET_ASSERT_LOCKED();
3653 
3654 	if ((sc = syn_cache_lookup(src, dst, &scp, rtableid)) == NULL)
3655 		return;
3656 	if (SEQ_LT(th->th_seq, sc->sc_irs) ||
3657 	    SEQ_GT(th->th_seq, sc->sc_irs + 1))
3658 		return;
3659 	syn_cache_rm(sc);
3660 	tcpstat_inc(tcps_sc_reset);
3661 	syn_cache_put(sc);
3662 }
3663 
3664 void
3665 syn_cache_unreach(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
3666     u_int rtableid)
3667 {
3668 	struct syn_cache *sc;
3669 	struct syn_cache_head *scp;
3670 
3671 	NET_ASSERT_LOCKED();
3672 
3673 	if ((sc = syn_cache_lookup(src, dst, &scp, rtableid)) == NULL)
3674 		return;
3675 	/* If the sequence number != sc_iss, then it's a bogus ICMP msg */
3676 	if (ntohl (th->th_seq) != sc->sc_iss) {
3677 		return;
3678 	}
3679 
3680 	/*
3681 	 * If we've retransmitted 3 times and this is our second error,
3682 	 * we remove the entry.  Otherwise, we allow it to continue on.
3683 	 * This prevents us from incorrectly nuking an entry during a
3684 	 * spurious network outage.
3685 	 *
3686 	 * See tcp_notify().
3687 	 */
3688 	if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtshift < 3) {
3689 		sc->sc_flags |= SCF_UNREACH;
3690 		return;
3691 	}
3692 
3693 	syn_cache_rm(sc);
3694 	tcpstat_inc(tcps_sc_unreach);
3695 	syn_cache_put(sc);
3696 }
3697 
3698 /*
3699  * Given a LISTEN socket and an inbound SYN request, add
3700  * this to the syn cache, and send back a segment:
3701  *	<SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
3702  * to the source.
3703  *
3704  * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
3705  * Doing so would require that we hold onto the data and deliver it
3706  * to the application.  However, if we are the target of a SYN-flood
3707  * DoS attack, an attacker could send data which would eventually
3708  * consume all available buffer space if it were ACKed.  By not ACKing
3709  * the data, we avoid this DoS scenario.
3710  */
3711 
3712 int
3713 syn_cache_add(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
3714     u_int iphlen, struct socket *so, struct mbuf *m, u_char *optp, int optlen,
3715     struct tcp_opt_info *oi, tcp_seq *issp)
3716 {
3717 	struct tcpcb tb, *tp;
3718 	long win;
3719 	struct syn_cache *sc;
3720 	struct syn_cache_head *scp;
3721 	struct mbuf *ipopts;
3722 
3723 	tp = sototcpcb(so);
3724 
3725 	/*
3726 	 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
3727 	 *
3728 	 * Note this check is performed in tcp_input() very early on.
3729 	 */
3730 
3731 	/*
3732 	 * Initialize some local state.
3733 	 */
3734 	win = sbspace(so, &so->so_rcv);
3735 	if (win > TCP_MAXWIN)
3736 		win = TCP_MAXWIN;
3737 
3738 	bzero(&tb, sizeof(tb));
3739 #ifdef TCP_SIGNATURE
3740 	if (optp || (tp->t_flags & TF_SIGNATURE)) {
3741 #else
3742 	if (optp) {
3743 #endif
3744 		tb.pf = tp->pf;
3745 		tb.sack_enable = tp->sack_enable;
3746 		tb.t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0;
3747 #ifdef TCP_SIGNATURE
3748 		if (tp->t_flags & TF_SIGNATURE)
3749 			tb.t_flags |= TF_SIGNATURE;
3750 #endif
3751 		tb.t_state = TCPS_LISTEN;
3752 		if (tcp_dooptions(&tb, optp, optlen, th, m, iphlen, oi,
3753 		    sotoinpcb(so)->inp_rtableid))
3754 			return (-1);
3755 	}
3756 
3757 	switch (src->sa_family) {
3758 	case AF_INET:
3759 		/*
3760 		 * Remember the IP options, if any.
3761 		 */
3762 		ipopts = ip_srcroute(m);
3763 		break;
3764 	default:
3765 		ipopts = NULL;
3766 	}
3767 
3768 	/*
3769 	 * See if we already have an entry for this connection.
3770 	 * If we do, resend the SYN,ACK.  We do not count this
3771 	 * as a retransmission (XXX though maybe we should).
3772 	 */
3773 	sc = syn_cache_lookup(src, dst, &scp, sotoinpcb(so)->inp_rtableid);
3774 	if (sc != NULL) {
3775 		tcpstat_inc(tcps_sc_dupesyn);
3776 		if (ipopts) {
3777 			/*
3778 			 * If we were remembering a previous source route,
3779 			 * forget it and use the new one we've been given.
3780 			 */
3781 			m_free(sc->sc_ipopts);
3782 			sc->sc_ipopts = ipopts;
3783 		}
3784 		sc->sc_timestamp = tb.ts_recent;
3785 		if (syn_cache_respond(sc, m) == 0) {
3786 			tcpstat_inc(tcps_sndacks);
3787 			tcpstat_inc(tcps_sndtotal);
3788 		}
3789 		return (0);
3790 	}
3791 
3792 	sc = pool_get(&syn_cache_pool, PR_NOWAIT|PR_ZERO);
3793 	if (sc == NULL) {
3794 		m_free(ipopts);
3795 		return (-1);
3796 	}
3797 
3798 	/*
3799 	 * Fill in the cache, and put the necessary IP and TCP
3800 	 * options into the reply.
3801 	 */
3802 	memcpy(&sc->sc_src, src, src->sa_len);
3803 	memcpy(&sc->sc_dst, dst, dst->sa_len);
3804 	sc->sc_rtableid = sotoinpcb(so)->inp_rtableid;
3805 	sc->sc_flags = 0;
3806 	sc->sc_ipopts = ipopts;
3807 	sc->sc_irs = th->th_seq;
3808 
3809 	sc->sc_iss = issp ? *issp : arc4random();
3810 	sc->sc_peermaxseg = oi->maxseg;
3811 	sc->sc_ourmaxseg = tcp_mss_adv(m, sc->sc_src.sa.sa_family);
3812 	sc->sc_win = win;
3813 	sc->sc_timestamp = tb.ts_recent;
3814 	if ((tb.t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP)) ==
3815 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
3816 		sc->sc_flags |= SCF_TIMESTAMP;
3817 		sc->sc_modulate = arc4random();
3818 	}
3819 	if ((tb.t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
3820 	    (TF_RCVD_SCALE|TF_REQ_SCALE)) {
3821 		sc->sc_requested_s_scale = tb.requested_s_scale;
3822 		sc->sc_request_r_scale = 0;
3823 		/*
3824 		 * Pick the smallest possible scaling factor that
3825 		 * will still allow us to scale up to sb_max.
3826 		 *
3827 		 * We do this because there are broken firewalls that
3828 		 * will corrupt the window scale option, leading to
3829 		 * the other endpoint believing that our advertised
3830 		 * window is unscaled.  At scale factors larger than
3831 		 * 5 the unscaled window will drop below 1500 bytes,
3832 		 * leading to serious problems when traversing these
3833 		 * broken firewalls.
3834 		 *
3835 		 * With the default sbmax of 256K, a scale factor
3836 		 * of 3 will be chosen by this algorithm.  Those who
3837 		 * choose a larger sbmax should watch out
3838 		 * for the compatiblity problems mentioned above.
3839 		 *
3840 		 * RFC1323: The Window field in a SYN (i.e., a <SYN>
3841 		 * or <SYN,ACK>) segment itself is never scaled.
3842 		 */
3843 		while (sc->sc_request_r_scale < TCP_MAX_WINSHIFT &&
3844 		    (TCP_MAXWIN << sc->sc_request_r_scale) < sb_max)
3845 			sc->sc_request_r_scale++;
3846 	} else {
3847 		sc->sc_requested_s_scale = 15;
3848 		sc->sc_request_r_scale = 15;
3849 	}
3850 #ifdef TCP_ECN
3851 	/*
3852 	 * if both ECE and CWR flag bits are set, peer is ECN capable.
3853 	 */
3854 	if (tcp_do_ecn &&
3855 	    (th->th_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR))
3856 		sc->sc_flags |= SCF_ECN_PERMIT;
3857 #endif
3858 	/*
3859 	 * Set SCF_SACK_PERMIT if peer did send a SACK_PERMITTED option
3860 	 * (i.e., if tcp_dooptions() did set TF_SACK_PERMIT).
3861 	 */
3862 	if (tb.sack_enable && (tb.t_flags & TF_SACK_PERMIT))
3863 		sc->sc_flags |= SCF_SACK_PERMIT;
3864 #ifdef TCP_SIGNATURE
3865 	if (tb.t_flags & TF_SIGNATURE)
3866 		sc->sc_flags |= SCF_SIGNATURE;
3867 #endif
3868 	sc->sc_tp = tp;
3869 	if (syn_cache_respond(sc, m) == 0) {
3870 		syn_cache_insert(sc, tp);
3871 		tcpstat_inc(tcps_sndacks);
3872 		tcpstat_inc(tcps_sndtotal);
3873 	} else {
3874 		syn_cache_put(sc);
3875 		tcpstat_inc(tcps_sc_dropped);
3876 	}
3877 
3878 	return (0);
3879 }
3880 
3881 int
3882 syn_cache_respond(struct syn_cache *sc, struct mbuf *m)
3883 {
3884 	u_int8_t *optp;
3885 	int optlen, error;
3886 	u_int16_t tlen;
3887 	struct ip *ip = NULL;
3888 #ifdef INET6
3889 	struct ip6_hdr *ip6 = NULL;
3890 #endif
3891 	struct tcphdr *th;
3892 	u_int hlen;
3893 	struct inpcb *inp;
3894 
3895 	switch (sc->sc_src.sa.sa_family) {
3896 	case AF_INET:
3897 		hlen = sizeof(struct ip);
3898 		break;
3899 #ifdef INET6
3900 	case AF_INET6:
3901 		hlen = sizeof(struct ip6_hdr);
3902 		break;
3903 #endif
3904 	default:
3905 		m_freem(m);
3906 		return (EAFNOSUPPORT);
3907 	}
3908 
3909 	/* Compute the size of the TCP options. */
3910 	optlen = 4 + (sc->sc_request_r_scale != 15 ? 4 : 0) +
3911 	    ((sc->sc_flags & SCF_SACK_PERMIT) ? 4 : 0) +
3912 #ifdef TCP_SIGNATURE
3913 	    ((sc->sc_flags & SCF_SIGNATURE) ? TCPOLEN_SIGLEN : 0) +
3914 #endif
3915 	    ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0);
3916 
3917 	tlen = hlen + sizeof(struct tcphdr) + optlen;
3918 
3919 	/*
3920 	 * Create the IP+TCP header from scratch.
3921 	 */
3922 	m_freem(m);
3923 #ifdef DIAGNOSTIC
3924 	if (max_linkhdr + tlen > MCLBYTES)
3925 		return (ENOBUFS);
3926 #endif
3927 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3928 	if (m && max_linkhdr + tlen > MHLEN) {
3929 		MCLGET(m, M_DONTWAIT);
3930 		if ((m->m_flags & M_EXT) == 0) {
3931 			m_freem(m);
3932 			m = NULL;
3933 		}
3934 	}
3935 	if (m == NULL)
3936 		return (ENOBUFS);
3937 
3938 	/* Fixup the mbuf. */
3939 	m->m_data += max_linkhdr;
3940 	m->m_len = m->m_pkthdr.len = tlen;
3941 	m->m_pkthdr.ph_ifidx = 0;
3942 	m->m_pkthdr.ph_rtableid = sc->sc_rtableid;
3943 	memset(mtod(m, u_char *), 0, tlen);
3944 
3945 	switch (sc->sc_src.sa.sa_family) {
3946 	case AF_INET:
3947 		ip = mtod(m, struct ip *);
3948 		ip->ip_dst = sc->sc_src.sin.sin_addr;
3949 		ip->ip_src = sc->sc_dst.sin.sin_addr;
3950 		ip->ip_p = IPPROTO_TCP;
3951 		th = (struct tcphdr *)(ip + 1);
3952 		th->th_dport = sc->sc_src.sin.sin_port;
3953 		th->th_sport = sc->sc_dst.sin.sin_port;
3954 		break;
3955 #ifdef INET6
3956 	case AF_INET6:
3957 		ip6 = mtod(m, struct ip6_hdr *);
3958 		ip6->ip6_dst = sc->sc_src.sin6.sin6_addr;
3959 		ip6->ip6_src = sc->sc_dst.sin6.sin6_addr;
3960 		ip6->ip6_nxt = IPPROTO_TCP;
3961 		/* ip6_plen will be updated in ip6_output() */
3962 		th = (struct tcphdr *)(ip6 + 1);
3963 		th->th_dport = sc->sc_src.sin6.sin6_port;
3964 		th->th_sport = sc->sc_dst.sin6.sin6_port;
3965 		break;
3966 #endif
3967 	default:
3968 		unhandled_af(sc->sc_src.sa.sa_family);
3969 	}
3970 
3971 	th->th_seq = htonl(sc->sc_iss);
3972 	th->th_ack = htonl(sc->sc_irs + 1);
3973 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
3974 	th->th_flags = TH_SYN|TH_ACK;
3975 #ifdef TCP_ECN
3976 	/* Set ECE for SYN-ACK if peer supports ECN. */
3977 	if (tcp_do_ecn && (sc->sc_flags & SCF_ECN_PERMIT))
3978 		th->th_flags |= TH_ECE;
3979 #endif
3980 	th->th_win = htons(sc->sc_win);
3981 	/* th_sum already 0 */
3982 	/* th_urp already 0 */
3983 
3984 	/* Tack on the TCP options. */
3985 	optp = (u_int8_t *)(th + 1);
3986 	*optp++ = TCPOPT_MAXSEG;
3987 	*optp++ = 4;
3988 	*optp++ = (sc->sc_ourmaxseg >> 8) & 0xff;
3989 	*optp++ = sc->sc_ourmaxseg & 0xff;
3990 
3991 	/* Include SACK_PERMIT_HDR option if peer has already done so. */
3992 	if (sc->sc_flags & SCF_SACK_PERMIT) {
3993 		*((u_int32_t *)optp) = htonl(TCPOPT_SACK_PERMIT_HDR);
3994 		optp += 4;
3995 	}
3996 
3997 	if (sc->sc_request_r_scale != 15) {
3998 		*((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
3999 		    TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
4000 		    sc->sc_request_r_scale);
4001 		optp += 4;
4002 	}
4003 
4004 	if (sc->sc_flags & SCF_TIMESTAMP) {
4005 		u_int32_t *lp = (u_int32_t *)(optp);
4006 		/* Form timestamp option as shown in appendix A of RFC 1323. */
4007 		*lp++ = htonl(TCPOPT_TSTAMP_HDR);
4008 		*lp++ = htonl(SYN_CACHE_TIMESTAMP(sc));
4009 		*lp   = htonl(sc->sc_timestamp);
4010 		optp += TCPOLEN_TSTAMP_APPA;
4011 	}
4012 
4013 #ifdef TCP_SIGNATURE
4014 	if (sc->sc_flags & SCF_SIGNATURE) {
4015 		union sockaddr_union src, dst;
4016 		struct tdb *tdb;
4017 
4018 		bzero(&src, sizeof(union sockaddr_union));
4019 		bzero(&dst, sizeof(union sockaddr_union));
4020 		src.sa.sa_len = sc->sc_src.sa.sa_len;
4021 		src.sa.sa_family = sc->sc_src.sa.sa_family;
4022 		dst.sa.sa_len = sc->sc_dst.sa.sa_len;
4023 		dst.sa.sa_family = sc->sc_dst.sa.sa_family;
4024 
4025 		switch (sc->sc_src.sa.sa_family) {
4026 		case 0:	/*default to PF_INET*/
4027 		case AF_INET:
4028 			src.sin.sin_addr = mtod(m, struct ip *)->ip_src;
4029 			dst.sin.sin_addr = mtod(m, struct ip *)->ip_dst;
4030 			break;
4031 #ifdef INET6
4032 		case AF_INET6:
4033 			src.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_src;
4034 			dst.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_dst;
4035 			break;
4036 #endif /* INET6 */
4037 		}
4038 
4039 		tdb = gettdbbysrcdst(rtable_l2(sc->sc_rtableid),
4040 		    0, &src, &dst, IPPROTO_TCP);
4041 		if (tdb == NULL) {
4042 			m_freem(m);
4043 			return (EPERM);
4044 		}
4045 
4046 		/* Send signature option */
4047 		*(optp++) = TCPOPT_SIGNATURE;
4048 		*(optp++) = TCPOLEN_SIGNATURE;
4049 
4050 		if (tcp_signature(tdb, sc->sc_src.sa.sa_family, m, th,
4051 		    hlen, 0, optp) < 0) {
4052 			m_freem(m);
4053 			return (EINVAL);
4054 		}
4055 		optp += 16;
4056 
4057 		/* Pad options list to the next 32 bit boundary and
4058 		 * terminate it.
4059 		 */
4060 		*optp++ = TCPOPT_NOP;
4061 		*optp++ = TCPOPT_EOL;
4062 	}
4063 #endif /* TCP_SIGNATURE */
4064 
4065 	/* Compute the packet's checksum. */
4066 	switch (sc->sc_src.sa.sa_family) {
4067 	case AF_INET:
4068 		ip->ip_len = htons(tlen - hlen);
4069 		th->th_sum = 0;
4070 		th->th_sum = in_cksum(m, tlen);
4071 		break;
4072 #ifdef INET6
4073 	case AF_INET6:
4074 		ip6->ip6_plen = htons(tlen - hlen);
4075 		th->th_sum = 0;
4076 		th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
4077 		break;
4078 #endif
4079 	}
4080 
4081 	/* use IPsec policy and ttl from listening socket, on SYN ACK */
4082 	inp = sc->sc_tp ? sc->sc_tp->t_inpcb : NULL;
4083 
4084 	/*
4085 	 * Fill in some straggling IP bits.  Note the stack expects
4086 	 * ip_len to be in host order, for convenience.
4087 	 */
4088 	switch (sc->sc_src.sa.sa_family) {
4089 	case AF_INET:
4090 		ip->ip_len = htons(tlen);
4091 		ip->ip_ttl = inp ? inp->inp_ip.ip_ttl : ip_defttl;
4092 		if (inp != NULL)
4093 			ip->ip_tos = inp->inp_ip.ip_tos;
4094 		break;
4095 #ifdef INET6
4096 	case AF_INET6:
4097 		ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
4098 		ip6->ip6_vfc |= IPV6_VERSION;
4099 		ip6->ip6_plen = htons(tlen - hlen);
4100 		/* ip6_hlim will be initialized afterwards */
4101 		/* leave flowlabel = 0, it is legal and require no state mgmt */
4102 		break;
4103 #endif
4104 	}
4105 
4106 	switch (sc->sc_src.sa.sa_family) {
4107 	case AF_INET:
4108 		error = ip_output(m, sc->sc_ipopts, &sc->sc_route4,
4109 		    (ip_mtudisc ? IP_MTUDISC : 0),  NULL, inp, 0);
4110 		break;
4111 #ifdef INET6
4112 	case AF_INET6:
4113 		ip6->ip6_hlim = in6_selecthlim(inp);
4114 
4115 		error = ip6_output(m, NULL /*XXX*/, &sc->sc_route6, 0,
4116 		    NULL, NULL);
4117 		break;
4118 #endif
4119 	default:
4120 		error = EAFNOSUPPORT;
4121 		break;
4122 	}
4123 	return (error);
4124 }
4125