xref: /dflybsd-src/sys/netinet/tcp_output.c (revision 1c482147161e84edc1dbe4f1b41114b9ad16c552)
1 /*
2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36  *	The Regents of the University of California.  All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)tcp_output.c	8.4 (Berkeley) 5/24/95
67  * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.20 2003/01/29 22:45:36 hsu Exp $
68  */
69 
70 #include "opt_inet.h"
71 #include "opt_inet6.h"
72 #include "opt_ipsec.h"
73 #include "opt_tcpdebug.h"
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/sysctl.h>
79 #include <sys/mbuf.h>
80 #include <sys/domain.h>
81 #include <sys/protosw.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/in_cksum.h>
85 #include <sys/thread.h>
86 #include <sys/globaldata.h>
87 
88 #include <net/route.h>
89 
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/in_pcb.h>
94 #include <netinet/ip_var.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet/ip6.h>
97 #include <netinet6/ip6_var.h>
98 #include <netinet/tcp.h>
99 #define	TCPOUTFLAGS
100 #include <netinet/tcp_fsm.h>
101 #include <netinet/tcp_seq.h>
102 #include <netinet/tcp_timer.h>
103 #include <netinet/tcp_timer2.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet/tcpip.h>
106 #ifdef TCPDEBUG
107 #include <netinet/tcp_debug.h>
108 #endif
109 
110 #ifdef IPSEC
111 #include <netinet6/ipsec.h>
112 #endif /*IPSEC*/
113 
114 #ifdef FAST_IPSEC
115 #include <netproto/ipsec/ipsec.h>
116 #define	IPSEC
117 #endif /*FAST_IPSEC*/
118 
119 #ifdef notyet
120 extern struct mbuf *m_copypack();
121 #endif
122 
123 int path_mtu_discovery = 0;
124 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW,
125 	&path_mtu_discovery, 1, "Enable Path MTU Discovery");
126 
127 static int avoid_pure_win_update = 1;
128 SYSCTL_INT(_net_inet_tcp, OID_AUTO, avoid_pure_win_update, CTLFLAG_RW,
129 	&avoid_pure_win_update, 1, "Avoid pure window updates when possible");
130 
131 int tcp_do_autosndbuf = 1;
132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW,
133     &tcp_do_autosndbuf, 0, "Enable automatic send buffer sizing");
134 
135 int tcp_autosndbuf_inc = 8*1024;
136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW,
137     &tcp_autosndbuf_inc, 0, "Incrementor step size of automatic send buffer");
138 
139 int tcp_autosndbuf_max = 2*1024*1024;
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW,
141     &tcp_autosndbuf_max, 0, "Max size of automatic send buffer");
142 
143 static int tcp_idle_cwv = 1;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_cwv, CTLFLAG_RW,
145     &tcp_idle_cwv, 0,
146     "Congestion window validation after idle period (part of RFC2861)");
147 
148 static int tcp_idle_restart = 1;
149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_restart, CTLFLAG_RW,
150     &tcp_idle_restart, 0, "Reset congestion window after idle period");
151 
152 static void	tcp_idle_cwnd_validate(struct tcpcb *);
153 
154 /*
155  * Tcp output routine: figure out what should be sent and send it.
156  */
157 int
158 tcp_output(struct tcpcb *tp)
159 {
160 	struct inpcb * const inp = tp->t_inpcb;
161 	struct socket *so = inp->inp_socket;
162 	long len, recvwin, sendwin;
163 	int nsacked = 0;
164 	int off, flags, error = 0;
165 #ifdef TCP_SIGNATURE
166 	int sigoff = 0;
167 #endif
168 	struct mbuf *m;
169 	struct ip *ip;
170 	struct ipovly *ipov;
171 	struct tcphdr *th;
172 	u_char opt[TCP_MAXOLEN];
173 	unsigned int ipoptlen, optlen, hdrlen;
174 	int idle, idle_cwv = 0;
175 	boolean_t sendalot;
176 	struct ip6_hdr *ip6;
177 #ifdef INET6
178 	const boolean_t isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
179 #else
180 	const boolean_t isipv6 = FALSE;
181 #endif
182 
183 	KKASSERT(so->so_port == &curthread->td_msgport);
184 
185 	/*
186 	 * Determine length of data that should be transmitted,
187 	 * and flags that will be used.
188 	 * If there is some data or critical controls (SYN, RST)
189 	 * to send, then transmit; otherwise, investigate further.
190 	 */
191 
192 	/*
193 	 * If we have been idle for a while, the send congestion window
194 	 * could be no longer representative of the current state of the
195 	 * link; need to validate congestion window.  However, we should
196 	 * not perform congestion window validation here, since we could
197 	 * be asked to send pure ACK.
198 	 */
199 	if (tp->snd_max == tp->snd_una &&
200 	    (ticks - tp->snd_last) >= tp->t_rxtcur && tcp_idle_restart)
201 		idle_cwv = 1;
202 
203 	/*
204 	 * Calculate whether the transmit stream was previously idle
205 	 * and adjust TF_LASTIDLE for the next time.
206 	 */
207 	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
208 	if (idle && (tp->t_flags & TF_MORETOCOME))
209 		tp->t_flags |= TF_LASTIDLE;
210 	else
211 		tp->t_flags &= ~TF_LASTIDLE;
212 
213 	if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
214 	    !IN_FASTRECOVERY(tp))
215 		nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt);
216 
217 again:
218 	m = NULL;
219 	ip = NULL;
220 	ipov = NULL;
221 	th = NULL;
222 	ip6 = NULL;
223 
224 	/* Make use of SACK information when slow-starting after a RTO. */
225 	if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
226 	    !IN_FASTRECOVERY(tp)) {
227 		tcp_seq old_snd_nxt = tp->snd_nxt;
228 
229 		tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt);
230 		nsacked += tp->snd_nxt - old_snd_nxt;
231 	}
232 
233 	sendalot = FALSE;
234 	off = tp->snd_nxt - tp->snd_una;
235 	sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked);
236 	sendwin = min(sendwin, tp->snd_bwnd);
237 
238 	flags = tcp_outflags[tp->t_state];
239 	/*
240 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
241 	 * state flags.
242 	 */
243 	if (tp->t_flags & TF_NEEDFIN)
244 		flags |= TH_FIN;
245 	if (tp->t_flags & TF_NEEDSYN)
246 		flags |= TH_SYN;
247 
248 	/*
249 	 * If in persist timeout with window of 0, send 1 byte.
250 	 * Otherwise, if window is small but nonzero
251 	 * and timer expired, we will send what we can
252 	 * and go to transmit state.
253 	 */
254 	if (tp->t_flags & TF_FORCE) {
255 		if (sendwin == 0) {
256 			/*
257 			 * If we still have some data to send, then
258 			 * clear the FIN bit.  Usually this would
259 			 * happen below when it realizes that we
260 			 * aren't sending all the data.  However,
261 			 * if we have exactly 1 byte of unsent data,
262 			 * then it won't clear the FIN bit below,
263 			 * and if we are in persist state, we wind
264 			 * up sending the packet without recording
265 			 * that we sent the FIN bit.
266 			 *
267 			 * We can't just blindly clear the FIN bit,
268 			 * because if we don't have any more data
269 			 * to send then the probe will be the FIN
270 			 * itself.
271 			 */
272 			if (off < so->so_snd.ssb_cc)
273 				flags &= ~TH_FIN;
274 			sendwin = 1;
275 		} else {
276 			tcp_callout_stop(tp, tp->tt_persist);
277 			tp->t_rxtshift = 0;
278 		}
279 	}
280 
281 	/*
282 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
283 	 * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in
284 	 * a negative length.  This can also occur when TCP opens up
285 	 * its congestion window while receiving additional duplicate
286 	 * acks after fast-retransmit because TCP will reset snd_nxt
287 	 * to snd_max after the fast-retransmit.
288 	 *
289 	 * A negative length can also occur when we are in the
290 	 * TCPS_SYN_RECEIVED state due to a simultanious connect where
291 	 * our SYN has not been acked yet.
292 	 *
293 	 * In the normal retransmit-FIN-only case, however, snd_nxt will
294 	 * be set to snd_una, the offset will be 0, and the length may
295 	 * wind up 0.
296 	 */
297 	len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off;
298 
299 	/*
300 	 * Lop off SYN bit if it has already been sent.  However, if this
301 	 * is SYN-SENT state and if segment contains data, suppress sending
302 	 * segment (sending the segment would be an option if we still
303 	 * did TAO and the remote host supported it).
304 	 */
305 	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
306 		flags &= ~TH_SYN;
307 		off--, len++;
308 		if (len > 0 && tp->t_state == TCPS_SYN_SENT) {
309 			tp->t_flags &= ~TF_ACKNOW;
310 			return 0;
311 		}
312 	}
313 
314 	/*
315 	 * Be careful not to send data and/or FIN on SYN segments.
316 	 * This measure is needed to prevent interoperability problems
317 	 * with not fully conformant TCP implementations.
318 	 */
319 	if (flags & TH_SYN) {
320 		len = 0;
321 		flags &= ~TH_FIN;
322 	}
323 
324 	if (len < 0) {
325 		/*
326 		 * A negative len can occur if our FIN has been sent but not
327 		 * acked, or if we are in a simultanious connect in the
328 		 * TCPS_SYN_RECEIVED state with our SYN sent but not yet
329 		 * acked.
330 		 *
331 		 * If our window has contracted to 0 in the FIN case
332 		 * (which can only occur if we have NOT been called to
333 		 * retransmit as per code a few paragraphs up) then we
334 		 * want to shift the retransmit timer over to the
335 		 * persist timer.
336 		 *
337 		 * However, if we are in the TCPS_SYN_RECEIVED state
338 		 * (the SYN case) we will be in a simultanious connect and
339 		 * the window may be zero degeneratively.  In this case we
340 		 * do not want to shift to the persist timer after the SYN
341 		 * or the SYN+ACK transmission.
342 		 */
343 		len = 0;
344 		if (sendwin == 0 && tp->t_state != TCPS_SYN_RECEIVED) {
345 			tcp_callout_stop(tp, tp->tt_rexmt);
346 			tp->t_rxtshift = 0;
347 			tp->snd_nxt = tp->snd_una;
348 			if (!tcp_callout_active(tp, tp->tt_persist))
349 				tcp_setpersist(tp);
350 		}
351 	}
352 
353 	KASSERT(len >= 0, ("%s: len < 0", __func__));
354 	/*
355 	 * Automatic sizing of send socket buffer.  Often the send buffer
356 	 * size is not optimally adjusted to the actual network conditions
357 	 * at hand (delay bandwidth product).  Setting the buffer size too
358 	 * small limits throughput on links with high bandwidth and high
359 	 * delay (eg. trans-continental/oceanic links).  Setting the
360 	 * buffer size too big consumes too much real kernel memory,
361 	 * especially with many connections on busy servers.
362 	 *
363 	 * The criteria to step up the send buffer one notch are:
364 	 *  1. receive window of remote host is larger than send buffer
365 	 *     (with a fudge factor of 5/4th);
366 	 *  2. send buffer is filled to 7/8th with data (so we actually
367 	 *     have data to make use of it);
368 	 *  3. send buffer fill has not hit maximal automatic size;
369 	 *  4. our send window (slow start and cogestion controlled) is
370 	 *     larger than sent but unacknowledged data in send buffer.
371 	 *
372 	 * The remote host receive window scaling factor may limit the
373 	 * growing of the send buffer before it reaches its allowed
374 	 * maximum.
375 	 *
376 	 * It scales directly with slow start or congestion window
377 	 * and does at most one step per received ACK.  This fast
378 	 * scaling has the drawback of growing the send buffer beyond
379 	 * what is strictly necessary to make full use of a given
380 	 * delay*bandwith product.  However testing has shown this not
381 	 * to be much of an problem.  At worst we are trading wasting
382 	 * of available bandwith (the non-use of it) for wasting some
383 	 * socket buffer memory.
384 	 *
385 	 * TODO: Shrink send buffer during idle periods together
386 	 * with congestion window.  Requires another timer.  Has to
387 	 * wait for upcoming tcp timer rewrite.
388 	 */
389 	if (tcp_do_autosndbuf && so->so_snd.ssb_flags & SSB_AUTOSIZE) {
390 		if ((tp->snd_wnd / 4 * 5) >= so->so_snd.ssb_hiwat &&
391 		    so->so_snd.ssb_cc >= (so->so_snd.ssb_hiwat / 8 * 7) &&
392 		    so->so_snd.ssb_cc < tcp_autosndbuf_max &&
393 		    sendwin >= (so->so_snd.ssb_cc - (tp->snd_nxt - tp->snd_una))) {
394 			u_long newsize;
395 
396 			newsize = ulmin(so->so_snd.ssb_hiwat +
397 					 tcp_autosndbuf_inc,
398 					tcp_autosndbuf_max);
399 			if (!ssb_reserve(&so->so_snd, newsize, so, NULL))
400 				atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE);
401 			if (newsize >= (TCP_MAXWIN << tp->snd_scale))
402 				atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE);
403 		}
404 	}
405 
406 	/*
407 	 * Truncate to the maximum segment length and ensure that FIN is
408 	 * removed if the length no longer contains the last data byte.
409 	 */
410 	if (len > tp->t_maxseg) {
411 		len = tp->t_maxseg;
412 		sendalot = TRUE;
413 	}
414 	if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc))
415 		flags &= ~TH_FIN;
416 
417 	recvwin = ssb_space(&so->so_rcv);
418 
419 	/*
420 	 * Sender silly window avoidance.   We transmit under the following
421 	 * conditions when len is non-zero:
422 	 *
423 	 *	- We have a full segment
424 	 *	- This is the last buffer in a write()/send() and we are
425 	 *	  either idle or running NODELAY
426 	 *	- we've timed out (e.g. persist timer)
427 	 *	- we have more then 1/2 the maximum send window's worth of
428 	 *	  data (receiver may be limiting the window size)
429 	 *	- we need to retransmit
430 	 */
431 	if (len) {
432 		if (len == tp->t_maxseg)
433 			goto send;
434 		/*
435 		 * NOTE! on localhost connections an 'ack' from the remote
436 		 * end may occur synchronously with the output and cause
437 		 * us to flush a buffer queued with moretocome.  XXX
438 		 *
439 		 * note: the len + off check is almost certainly unnecessary.
440 		 */
441 		if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
442 		    (idle || (tp->t_flags & TF_NODELAY)) &&
443 		    len + off >= so->so_snd.ssb_cc &&
444 		    !(tp->t_flags & TF_NOPUSH)) {
445 			goto send;
446 		}
447 		if (tp->t_flags & TF_FORCE)		/* typ. timeout case */
448 			goto send;
449 		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
450 			goto send;
451 		if (SEQ_LT(tp->snd_nxt, tp->snd_max))	/* retransmit case */
452 			goto send;
453 	}
454 
455 	/*
456 	 * Compare available window to amount of window
457 	 * known to peer (as advertised window less
458 	 * next expected input).  If the difference is at least two
459 	 * max size segments, or at least 50% of the maximum possible
460 	 * window, then want to send a window update to peer.
461 	 */
462 	if (recvwin > 0) {
463 		/*
464 		 * "adv" is the amount we can increase the window,
465 		 * taking into account that we are limited by
466 		 * TCP_MAXWIN << tp->rcv_scale.
467 		 */
468 		long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) -
469 			(tp->rcv_adv - tp->rcv_nxt);
470 		long hiwat;
471 
472 		/*
473 		 * This ack case typically occurs when the user has drained
474 		 * the TCP socket buffer sufficiently to warrent an ack
475 		 * containing a 'pure window update'... that is, an ack that
476 		 * ONLY updates the tcp window.
477 		 *
478 		 * It is unclear why we would need to do a pure window update
479 		 * past 2 segments if we are going to do one at 1/2 the high
480 		 * water mark anyway, especially since under normal conditions
481 		 * the user program will drain the socket buffer quickly.
482 		 * The 2-segment pure window update will often add a large
483 		 * number of extra, unnecessary acks to the stream.
484 		 *
485 		 * avoid_pure_win_update now defaults to 1.
486 		 */
487 		if (avoid_pure_win_update == 0 ||
488 		    (tp->t_flags & TF_RXRESIZED)) {
489 			if (adv >= (long) (2 * tp->t_maxseg)) {
490 				goto send;
491 			}
492 		}
493 		hiwat = (long)(TCP_MAXWIN << tp->rcv_scale);
494 		if (hiwat > (long)so->so_rcv.ssb_hiwat)
495 			hiwat = (long)so->so_rcv.ssb_hiwat;
496 		if (adv >= hiwat / 2)
497 			goto send;
498 	}
499 
500 	/*
501 	 * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
502 	 * is also a catch-all for the retransmit timer timeout case.
503 	 */
504 	if (tp->t_flags & TF_ACKNOW)
505 		goto send;
506 	if ((flags & TH_RST) ||
507 	    ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN)))
508 		goto send;
509 	if (SEQ_GT(tp->snd_up, tp->snd_una))
510 		goto send;
511 	/*
512 	 * If our state indicates that FIN should be sent
513 	 * and we have not yet done so, then we need to send.
514 	 */
515 	if ((flags & TH_FIN) &&
516 	    (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una))
517 		goto send;
518 
519 	/*
520 	 * TCP window updates are not reliable, rather a polling protocol
521 	 * using ``persist'' packets is used to insure receipt of window
522 	 * updates.  The three ``states'' for the output side are:
523 	 *	idle			not doing retransmits or persists
524 	 *	persisting		to move a small or zero window
525 	 *	(re)transmitting	and thereby not persisting
526 	 *
527 	 * tcp_callout_active(tp, tp->tt_persist)
528 	 *	is true when we are in persist state.
529 	 * The TF_FORCE flag in tp->t_flags
530 	 *	is set when we are called to send a persist packet.
531 	 * tcp_callout_active(tp, tp->tt_rexmt)
532 	 *	is set when we are retransmitting
533 	 * The output side is idle when both timers are zero.
534 	 *
535 	 * If send window is too small, there is data to transmit, and no
536 	 * retransmit or persist is pending, then go to persist state.
537 	 *
538 	 * If nothing happens soon, send when timer expires:
539 	 * if window is nonzero, transmit what we can, otherwise force out
540 	 * a byte.
541 	 *
542 	 * Don't try to set the persist state if we are in TCPS_SYN_RECEIVED
543 	 * with data pending.  This situation can occur during a
544 	 * simultanious connect.
545 	 */
546 	if (so->so_snd.ssb_cc > 0 &&
547 	    tp->t_state != TCPS_SYN_RECEIVED &&
548 	    !tcp_callout_active(tp, tp->tt_rexmt) &&
549 	    !tcp_callout_active(tp, tp->tt_persist)) {
550 		tp->t_rxtshift = 0;
551 		tcp_setpersist(tp);
552 	}
553 
554 	/*
555 	 * No reason to send a segment, just return.
556 	 */
557 	return (0);
558 
559 send:
560 	/*
561 	 * Before ESTABLISHED, force sending of initial options
562 	 * unless TCP set not to do any options.
563 	 * NOTE: we assume that the IP/TCP header plus TCP options
564 	 * always fit in a single mbuf, leaving room for a maximum
565 	 * link header, i.e.
566 	 *	max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES
567 	 */
568 	optlen = 0;
569 	if (isipv6)
570 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
571 	else
572 		hdrlen = sizeof(struct tcpiphdr);
573 	if (flags & TH_SYN) {
574 		tp->snd_nxt = tp->iss;
575 		if (!(tp->t_flags & TF_NOOPT)) {
576 			u_short mss;
577 
578 			opt[0] = TCPOPT_MAXSEG;
579 			opt[1] = TCPOLEN_MAXSEG;
580 			mss = htons((u_short) tcp_mssopt(tp));
581 			memcpy(opt + 2, &mss, sizeof mss);
582 			optlen = TCPOLEN_MAXSEG;
583 
584 			if ((tp->t_flags & TF_REQ_SCALE) &&
585 			    (!(flags & TH_ACK) ||
586 			     (tp->t_flags & TF_RCVD_SCALE))) {
587 				*((u_int32_t *)(opt + optlen)) = htonl(
588 					TCPOPT_NOP << 24 |
589 					TCPOPT_WINDOW << 16 |
590 					TCPOLEN_WINDOW << 8 |
591 					tp->request_r_scale);
592 				optlen += 4;
593 			}
594 
595 			if ((tcp_do_sack && !(flags & TH_ACK)) ||
596 			    tp->t_flags & TF_SACK_PERMITTED) {
597 				uint32_t *lp = (uint32_t *)(opt + optlen);
598 
599 				*lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED);
600 				optlen += TCPOLEN_SACK_PERMITTED_ALIGNED;
601 			}
602 		}
603 	}
604 
605 	/*
606 	 * Send a timestamp and echo-reply if this is a SYN and our side
607 	 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
608 	 * and our peer have sent timestamps in our SYN's.
609 	 */
610 	if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
611 	    !(flags & TH_RST) &&
612 	    (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) {
613 		u_int32_t *lp = (u_int32_t *)(opt + optlen);
614 
615 		/* Form timestamp option as shown in appendix A of RFC 1323. */
616 		*lp++ = htonl(TCPOPT_TSTAMP_HDR);
617 		*lp++ = htonl(ticks);
618 		*lp   = htonl(tp->ts_recent);
619 		optlen += TCPOLEN_TSTAMP_APPA;
620 	}
621 
622 	/* Set receive buffer autosizing timestamp. */
623 	if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE))
624 		tp->rfbuf_ts = ticks;
625 
626 	/*
627 	 * If this is a SACK connection and we have a block to report,
628 	 * fill in the SACK blocks in the TCP options.
629 	 */
630 	if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) ==
631 		TF_SACK_PERMITTED &&
632 	    (!TAILQ_EMPTY(&tp->t_segq) ||
633 	     tp->reportblk.rblk_start != tp->reportblk.rblk_end))
634 		tcp_sack_fill_report(tp, opt, &optlen);
635 
636 #ifdef TCP_SIGNATURE
637 	if (tp->t_flags & TF_SIGNATURE) {
638 		int i;
639 		u_char *bp;
640 		/*
641 		 * Initialize TCP-MD5 option (RFC2385)
642 		 */
643 		bp = (u_char *)opt + optlen;
644 		*bp++ = TCPOPT_SIGNATURE;
645 		*bp++ = TCPOLEN_SIGNATURE;
646 		sigoff = optlen + 2;
647 		for (i = 0; i < TCP_SIGLEN; i++)
648 			*bp++ = 0;
649 		optlen += TCPOLEN_SIGNATURE;
650 		/*
651 		 * Terminate options list and maintain 32-bit alignment.
652 		 */
653 		*bp++ = TCPOPT_NOP;
654 		*bp++ = TCPOPT_EOL;
655 		optlen += 2;
656 	}
657 #endif /* TCP_SIGNATURE */
658 	KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options"));
659 	hdrlen += optlen;
660 
661 	if (isipv6) {
662 		ipoptlen = ip6_optlen(inp);
663 	} else {
664 		if (inp->inp_options) {
665 			ipoptlen = inp->inp_options->m_len -
666 			    offsetof(struct ipoption, ipopt_list);
667 		} else {
668 			ipoptlen = 0;
669 		}
670 	}
671 #ifdef IPSEC
672 	ipoptlen += ipsec_hdrsiz_tcp(tp);
673 #endif
674 
675 	/*
676 	 * Adjust data length if insertion of options will bump the packet
677 	 * length beyond the t_maxopd length.  Clear FIN to prevent premature
678 	 * closure since there is still more data to send after this (now
679 	 * truncated) packet.
680 	 *
681 	 * If just the options do not fit we are in a no-win situation and
682 	 * we treat it as an unreachable host.
683 	 */
684 	if (len + optlen + ipoptlen > tp->t_maxopd) {
685 		if (tp->t_maxopd <= optlen + ipoptlen) {
686 			static time_t last_optlen_report;
687 
688 			if (last_optlen_report != time_second) {
689 				last_optlen_report = time_second;
690 				kprintf("tcpcb %p: MSS (%d) too small to hold options!\n", tp, tp->t_maxopd);
691 			}
692 			error = EHOSTUNREACH;
693 			goto out;
694 		} else {
695 			flags &= ~TH_FIN;
696 			len = tp->t_maxopd - optlen - ipoptlen;
697 			sendalot = TRUE;
698 		}
699 	}
700 
701 #ifdef INET6
702 	KASSERT(max_linkhdr + hdrlen <= MCLBYTES, ("tcphdr too big"));
703 #else
704 	KASSERT(max_linkhdr + hdrlen <= MHLEN, ("tcphdr too big"));
705 #endif
706 
707 	/*
708 	 * Grab a header mbuf, attaching a copy of data to
709 	 * be transmitted, and initialize the header from
710 	 * the template for sends on this connection.
711 	 */
712 	if (len) {
713 		if ((tp->t_flags & TF_FORCE) && len == 1)
714 			tcpstat.tcps_sndprobe++;
715 		else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
716 			if (tp->snd_nxt == tp->snd_una)
717 				tp->snd_max_rexmt = tp->snd_max;
718 			if (nsacked) {
719 				tcpstat.tcps_sndsackrtopack++;
720 				tcpstat.tcps_sndsackrtobyte += len;
721 			}
722 			tcpstat.tcps_sndrexmitpack++;
723 			tcpstat.tcps_sndrexmitbyte += len;
724 		} else {
725 			tcpstat.tcps_sndpack++;
726 			tcpstat.tcps_sndbyte += len;
727 		}
728 		if (idle_cwv) {
729 			idle_cwv = 0;
730 			tcp_idle_cwnd_validate(tp);
731 		}
732 		/* Update last send time after CWV */
733 		tp->snd_last = ticks;
734 #ifdef notyet
735 		if ((m = m_copypack(so->so_snd.ssb_mb, off, (int)len,
736 		    max_linkhdr + hdrlen)) == NULL) {
737 			error = ENOBUFS;
738 			goto after_th;
739 		}
740 		/*
741 		 * m_copypack left space for our hdr; use it.
742 		 */
743 		m->m_len += hdrlen;
744 		m->m_data -= hdrlen;
745 #else
746 #ifndef INET6
747 		m = m_gethdr(MB_DONTWAIT, MT_HEADER);
748 #else
749 		m = m_getl(hdrlen + max_linkhdr, MB_DONTWAIT, MT_HEADER,
750 			   M_PKTHDR, NULL);
751 #endif
752 		if (m == NULL) {
753 			error = ENOBUFS;
754 			goto after_th;
755 		}
756 		m->m_data += max_linkhdr;
757 		m->m_len = hdrlen;
758 		if (len <= MHLEN - hdrlen - max_linkhdr) {
759 			m_copydata(so->so_snd.ssb_mb, off, (int) len,
760 			    mtod(m, caddr_t) + hdrlen);
761 			m->m_len += len;
762 		} else {
763 			m->m_next = m_copy(so->so_snd.ssb_mb, off, (int) len);
764 			if (m->m_next == NULL) {
765 				m_free(m);
766 				m = NULL;
767 				error = ENOBUFS;
768 				goto after_th;
769 			}
770 		}
771 #endif
772 		/*
773 		 * If we're sending everything we've got, set PUSH.
774 		 * (This will keep happy those implementations which only
775 		 * give data to the user when a buffer fills or
776 		 * a PUSH comes in.)
777 		 */
778 		if (off + len == so->so_snd.ssb_cc)
779 			flags |= TH_PUSH;
780 	} else {
781 		if (tp->t_flags & TF_ACKNOW)
782 			tcpstat.tcps_sndacks++;
783 		else if (flags & (TH_SYN | TH_FIN | TH_RST))
784 			tcpstat.tcps_sndctrl++;
785 		else if (SEQ_GT(tp->snd_up, tp->snd_una))
786 			tcpstat.tcps_sndurg++;
787 		else
788 			tcpstat.tcps_sndwinup++;
789 
790 		MGETHDR(m, MB_DONTWAIT, MT_HEADER);
791 		if (m == NULL) {
792 			error = ENOBUFS;
793 			goto after_th;
794 		}
795 		if (isipv6 &&
796 		    (hdrlen + max_linkhdr > MHLEN) && hdrlen <= MHLEN)
797 			MH_ALIGN(m, hdrlen);
798 		else
799 			m->m_data += max_linkhdr;
800 		m->m_len = hdrlen;
801 	}
802 	m->m_pkthdr.rcvif = NULL;
803 	if (isipv6) {
804 		ip6 = mtod(m, struct ip6_hdr *);
805 		th = (struct tcphdr *)(ip6 + 1);
806 		tcp_fillheaders(tp, ip6, th);
807 	} else {
808 		ip = mtod(m, struct ip *);
809 		ipov = (struct ipovly *)ip;
810 		th = (struct tcphdr *)(ip + 1);
811 		/* this picks up the pseudo header (w/o the length) */
812 		tcp_fillheaders(tp, ip, th);
813 	}
814 after_th:
815 	/*
816 	 * Fill in fields, remembering maximum advertised
817 	 * window for use in delaying messages about window sizes.
818 	 * If resending a FIN, be sure not to use a new sequence number.
819 	 */
820 	if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
821 	    tp->snd_nxt == tp->snd_max)
822 		tp->snd_nxt--;
823 
824 	if (th != NULL) {
825 		/*
826 		 * If we are doing retransmissions, then snd_nxt will
827 		 * not reflect the first unsent octet.  For ACK only
828 		 * packets, we do not want the sequence number of the
829 		 * retransmitted packet, we want the sequence number
830 		 * of the next unsent octet.  So, if there is no data
831 		 * (and no SYN or FIN), use snd_max instead of snd_nxt
832 		 * when filling in ti_seq.  But if we are in persist
833 		 * state, snd_max might reflect one byte beyond the
834 		 * right edge of the window, so use snd_nxt in that
835 		 * case, since we know we aren't doing a retransmission.
836 		 * (retransmit and persist are mutually exclusive...)
837 		 */
838 		if (len || (flags & (TH_SYN|TH_FIN)) ||
839 		    tcp_callout_active(tp, tp->tt_persist))
840 			th->th_seq = htonl(tp->snd_nxt);
841 		else
842 			th->th_seq = htonl(tp->snd_max);
843 		th->th_ack = htonl(tp->rcv_nxt);
844 		if (optlen) {
845 			bcopy(opt, th + 1, optlen);
846 			th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
847 		}
848 		th->th_flags = flags;
849 	}
850 
851 	/*
852 	 * Calculate receive window.  Don't shrink window, but avoid
853 	 * silly window syndrome by sending a 0 window if the actual
854 	 * window is less then one segment.
855 	 */
856 	if (recvwin < (long)(so->so_rcv.ssb_hiwat / 4) &&
857 	    recvwin < (long)tp->t_maxseg)
858 		recvwin = 0;
859 	if (recvwin < (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt))
860 		recvwin = (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt);
861 	if (recvwin > (long)TCP_MAXWIN << tp->rcv_scale)
862 		recvwin = (long)TCP_MAXWIN << tp->rcv_scale;
863 
864 	/*
865 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised
866 	 * a 0 window.  This may cause the remote transmitter to stall.  This
867 	 * flag tells soreceive() to disable delayed acknowledgements when
868 	 * draining the buffer.  This can occur if the receiver is attempting
869 	 * to read more data then can be buffered prior to transmitting on
870 	 * the connection.
871 	 */
872 	if (recvwin == 0)
873 		tp->t_flags |= TF_RXWIN0SENT;
874 	else
875 		tp->t_flags &= ~TF_RXWIN0SENT;
876 
877 	if (th != NULL)
878 		th->th_win = htons((u_short) (recvwin>>tp->rcv_scale));
879 
880 	if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
881 		if (th != NULL) {
882 			th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
883 			th->th_flags |= TH_URG;
884 		}
885 	} else {
886 		/*
887 		 * If no urgent pointer to send, then we pull
888 		 * the urgent pointer to the left edge of the send window
889 		 * so that it doesn't drift into the send window on sequence
890 		 * number wraparound.
891 		 */
892 		tp->snd_up = tp->snd_una;		/* drag it along */
893 	}
894 
895 	if (th != NULL) {
896 #ifdef TCP_SIGNATURE
897 		if (tp->t_flags & TF_SIGNATURE) {
898 			tcpsignature_compute(m, len, optlen,
899 			    (u_char *)(th + 1) + sigoff, IPSEC_DIR_OUTBOUND);
900 		}
901 #endif /* TCP_SIGNATURE */
902 
903 		/*
904 		 * Put TCP length in extended header, and then
905 		 * checksum extended header and data.
906 		 */
907 		m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
908 		if (isipv6) {
909 			/*
910 			 * ip6_plen is not need to be filled now, and will be
911 			 * filled in ip6_output().
912 			 */
913 			th->th_sum = in6_cksum(m, IPPROTO_TCP,
914 			    sizeof(struct ip6_hdr),
915 			    sizeof(struct tcphdr) + optlen + len);
916 		} else {
917 			m->m_pkthdr.csum_flags = CSUM_TCP;
918 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
919 			if (len + optlen) {
920 				th->th_sum = in_addword(th->th_sum,
921 				    htons((u_short)(optlen + len)));
922 			}
923 
924 			/*
925 			 * IP version must be set here for ipv4/ipv6 checking
926 			 * later
927 			 */
928 			KASSERT(ip->ip_v == IPVERSION,
929 			    ("%s: IP version incorrect: %d",
930 			     __func__, ip->ip_v));
931 		}
932 	}
933 
934 	/*
935 	 * In transmit state, time the transmission and arrange for
936 	 * the retransmit.  In persist state, just set snd_max.
937 	 */
938 	if (!(tp->t_flags & TF_FORCE) ||
939 	    !tcp_callout_active(tp, tp->tt_persist)) {
940 		tcp_seq startseq = tp->snd_nxt;
941 
942 		/*
943 		 * Advance snd_nxt over sequence space of this segment.
944 		 */
945 		if (flags & (TH_SYN | TH_FIN)) {
946 			if (flags & TH_SYN)
947 				tp->snd_nxt++;
948 			if (flags & TH_FIN) {
949 				tp->snd_nxt++;
950 				tp->t_flags |= TF_SENTFIN;
951 			}
952 		}
953 		tp->snd_nxt += len;
954 		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
955 			tp->snd_max = tp->snd_nxt;
956 			/*
957 			 * Time this transmission if not a retransmission and
958 			 * not currently timing anything.
959 			 */
960 			if (tp->t_rtttime == 0) {
961 				tp->t_rtttime = ticks;
962 				tp->t_rtseq = startseq;
963 				tcpstat.tcps_segstimed++;
964 			}
965 		}
966 
967 		/*
968 		 * Set retransmit timer if not currently set,
969 		 * and not doing a pure ack or a keep-alive probe.
970 		 * Initial value for retransmit timer is smoothed
971 		 * round-trip time + 2 * round-trip time variance.
972 		 * Initialize shift counter which is used for backoff
973 		 * of retransmit time.
974 		 */
975 		if (!tcp_callout_active(tp, tp->tt_rexmt) &&
976 		    tp->snd_nxt != tp->snd_una) {
977 			if (tcp_callout_active(tp, tp->tt_persist)) {
978 				tcp_callout_stop(tp, tp->tt_persist);
979 				tp->t_rxtshift = 0;
980 			}
981 			tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
982 			    tcp_timer_rexmt);
983 		}
984 	} else {
985 		/*
986 		 * Persist case, update snd_max but since we are in
987 		 * persist mode (no window) we do not update snd_nxt.
988 		 */
989 		int xlen = len;
990 		if (flags & TH_SYN)
991 			panic("tcp_output: persist timer to send SYN");
992 		if (flags & TH_FIN) {
993 			++xlen;
994 			tp->t_flags |= TF_SENTFIN;
995 		}
996 		if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
997 			tp->snd_max = tp->snd_nxt + xlen;
998 	}
999 
1000 	if (th != NULL) {
1001 #ifdef TCPDEBUG
1002 		/* Trace. */
1003 		if (so->so_options & SO_DEBUG) {
1004 			tcp_trace(TA_OUTPUT, tp->t_state, tp,
1005 			    mtod(m, void *), th, 0);
1006 		}
1007 #endif
1008 
1009 		/*
1010 		 * Fill in IP length and desired time to live and
1011 		 * send to IP level.  There should be a better way
1012 		 * to handle ttl and tos; we could keep them in
1013 		 * the template, but need a way to checksum without them.
1014 		 */
1015 		/*
1016 		 * m->m_pkthdr.len should have been set before cksum
1017 		 * calcuration, because in6_cksum() need it.
1018 		 */
1019 		if (isipv6) {
1020 			/*
1021 			 * we separately set hoplimit for every segment,
1022 			 * since the user might want to change the value
1023 			 * via setsockopt.  Also, desired default hop
1024 			 * limit might be changed via Neighbor Discovery.
1025 			 */
1026 			ip6->ip6_hlim = in6_selecthlim(inp,
1027 			    (inp->in6p_route.ro_rt ?
1028 			     inp->in6p_route.ro_rt->rt_ifp : NULL));
1029 
1030 			/* TODO: IPv6 IP6TOS_ECT bit on */
1031 			error = ip6_output(m, inp->in6p_outputopts,
1032 			    &inp->in6p_route, (so->so_options & SO_DONTROUTE),
1033 			    NULL, NULL, inp);
1034 		} else {
1035 			struct rtentry *rt;
1036 			ip->ip_len = m->m_pkthdr.len;
1037 #ifdef INET6
1038 			if (INP_CHECK_SOCKAF(so, AF_INET6))
1039 				ip->ip_ttl = in6_selecthlim(inp,
1040 				    (inp->in6p_route.ro_rt ?
1041 				     inp->in6p_route.ro_rt->rt_ifp : NULL));
1042 			else
1043 #endif
1044 				ip->ip_ttl = inp->inp_ip_ttl;	/* XXX */
1045 
1046 			ip->ip_tos = inp->inp_ip_tos;	/* XXX */
1047 			/*
1048 			 * See if we should do MTU discovery.
1049 			 * We do it only if the following are true:
1050 			 *	1) we have a valid route to the destination
1051 			 *	2) the MTU is not locked (if it is,
1052 			 *	   then discovery has been disabled)
1053 			 */
1054 			if (path_mtu_discovery &&
1055 			    (rt = inp->inp_route.ro_rt) &&
1056 			    (rt->rt_flags & RTF_UP) &&
1057 			    !(rt->rt_rmx.rmx_locks & RTV_MTU))
1058 				ip->ip_off |= IP_DF;
1059 
1060 			error = ip_output(m, inp->inp_options, &inp->inp_route,
1061 					  (so->so_options & SO_DONTROUTE) |
1062 					  IP_DEBUGROUTE, NULL, inp);
1063 		}
1064 	} else {
1065 		KASSERT(error != 0, ("no error, but th not set"));
1066 	}
1067 	if (error) {
1068 		tp->t_flags &= ~TF_ACKNOW;
1069 
1070 		/*
1071 		 * We know that the packet was lost, so back out the
1072 		 * sequence number advance, if any.
1073 		 */
1074 		if (!(tp->t_flags & TF_FORCE) ||
1075 		    !tcp_callout_active(tp, tp->tt_persist)) {
1076 			/*
1077 			 * No need to check for TH_FIN here because
1078 			 * the TF_SENTFIN flag handles that case.
1079 			 */
1080 			if (!(flags & TH_SYN))
1081 				tp->snd_nxt -= len;
1082 		}
1083 
1084 out:
1085 		if (error == ENOBUFS) {
1086 			/*
1087 			 * If we can't send, make sure there is something
1088 			 * to get us going again later.
1089 			 *
1090 			 * The persist timer isn't necessarily allowed in all
1091 			 * states, use the rexmt timer.
1092 			 */
1093 			if (!tcp_callout_active(tp, tp->tt_rexmt) &&
1094 			    !tcp_callout_active(tp, tp->tt_persist)) {
1095 				tcp_callout_reset(tp, tp->tt_rexmt,
1096 						  tp->t_rxtcur,
1097 						  tcp_timer_rexmt);
1098 #if 0
1099 				tp->t_rxtshift = 0;
1100 				tcp_setpersist(tp);
1101 #endif
1102 			}
1103 			tcp_quench(inp, 0);
1104 			return (0);
1105 		}
1106 		if (error == EMSGSIZE) {
1107 			/*
1108 			 * ip_output() will have already fixed the route
1109 			 * for us.  tcp_mtudisc() will, as its last action,
1110 			 * initiate retransmission, so it is important to
1111 			 * not do so here.
1112 			 */
1113 			tcp_mtudisc(inp, 0);
1114 			return 0;
1115 		}
1116 		if ((error == EHOSTUNREACH || error == ENETDOWN) &&
1117 		    TCPS_HAVERCVDSYN(tp->t_state)) {
1118 			tp->t_softerror = error;
1119 			return (0);
1120 		}
1121 		return (error);
1122 	}
1123 	tcpstat.tcps_sndtotal++;
1124 
1125 	/*
1126 	 * Data sent (as far as we can tell).
1127 	 *
1128 	 * If this advertises a larger window than any other segment,
1129 	 * then remember the size of the advertised window.
1130 	 *
1131 	 * Any pending ACK has now been sent.
1132 	 */
1133 	if (recvwin > 0 && SEQ_GT(tp->rcv_nxt + recvwin, tp->rcv_adv)) {
1134 		tp->rcv_adv = tp->rcv_nxt + recvwin;
1135 		tp->t_flags &= ~TF_RXRESIZED;
1136 	}
1137 	tp->last_ack_sent = tp->rcv_nxt;
1138 	tp->t_flags &= ~TF_ACKNOW;
1139 	if (tcp_delack_enabled)
1140 		tcp_callout_stop(tp, tp->tt_delack);
1141 	if (sendalot)
1142 		goto again;
1143 	return (0);
1144 }
1145 
1146 void
1147 tcp_setpersist(struct tcpcb *tp)
1148 {
1149 	int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1150 	int tt;
1151 
1152 	if (tp->t_state == TCPS_SYN_SENT ||
1153 	    tp->t_state == TCPS_SYN_RECEIVED) {
1154 		panic("tcp_setpersist: not established yet, current %s",
1155 		      tp->t_state == TCPS_SYN_SENT ?
1156 		      "SYN_SENT" : "SYN_RECEIVED");
1157 	}
1158 
1159 	if (tcp_callout_active(tp, tp->tt_rexmt))
1160 		panic("tcp_setpersist: retransmit pending");
1161 	/*
1162 	 * Start/restart persistance timer.
1163 	 */
1164 	TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], TCPTV_PERSMIN,
1165 		      TCPTV_PERSMAX);
1166 	tcp_callout_reset(tp, tp->tt_persist, tt, tcp_timer_persist);
1167 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
1168 		tp->t_rxtshift++;
1169 }
1170 
1171 static void
1172 tcp_idle_cwnd_validate(struct tcpcb *tp)
1173 {
1174 	u_long initial_cwnd = tcp_initial_window(tp);
1175 	u_long min_cwnd;
1176 
1177 	tcpstat.tcps_sndidle++;
1178 
1179 	/* According to RFC5681: RW=min(IW,cwnd) */
1180 	min_cwnd = min(tp->snd_cwnd, initial_cwnd);
1181 
1182 	if (tcp_idle_cwv) {
1183 		u_long idle_time, decay_cwnd;
1184 
1185 		/*
1186 		 * RFC2861, but only after idle period.
1187 		 */
1188 
1189 		/*
1190 		 * Before the congestion window is reduced, ssthresh
1191 		 * is set to the maximum of its current value and 3/4
1192 		 * cwnd.  If the sender then has more data to send
1193 		 * than the decayed cwnd allows, the TCP will slow-
1194 		 * start (perform exponential increase) at least
1195 		 * half-way back up to the old value of cwnd.
1196 		 */
1197 		tp->snd_ssthresh = max(tp->snd_ssthresh,
1198 		    (3 * tp->snd_cwnd) / 4);
1199 
1200 		/*
1201 		 * Decay the congestion window by half for every RTT
1202 		 * that the flow remains inactive.
1203 		 *
1204 		 * The difference between our implementation and
1205 		 * RFC2861 is that we don't allow cwnd to go below
1206 		 * the value allowed by RFC5681 (min_cwnd).
1207 		 */
1208 		idle_time = ticks - tp->snd_last;
1209 		decay_cwnd = tp->snd_cwnd;
1210 		while (idle_time >= tp->t_rxtcur &&
1211 		    decay_cwnd > min_cwnd) {
1212 			decay_cwnd >>= 1;
1213 			idle_time -= tp->t_rxtcur;
1214 		}
1215 		tp->snd_cwnd = max(decay_cwnd, min_cwnd);
1216 	} else {
1217 		/*
1218 		 * Slow-start from scratch to re-determine the send
1219 		 * congestion window.
1220 		 */
1221 		tp->snd_cwnd = min_cwnd;
1222 	}
1223 
1224 	/* Restart ABC counting during congestion avoidance */
1225 	tp->snd_wacked = 0;
1226 }
1227