1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.20 2003/01/29 22:45:36 hsu Exp $ 68 * $DragonFly: src/sys/netinet/tcp_output.c,v 1.34 2007/04/22 01:13:14 dillon Exp $ 69 */ 70 71 #include "opt_inet.h" 72 #include "opt_inet6.h" 73 #include "opt_ipsec.h" 74 #include "opt_tcpdebug.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 #include <sys/mbuf.h> 81 #include <sys/domain.h> 82 #include <sys/protosw.h> 83 #include <sys/socket.h> 84 #include <sys/socketvar.h> 85 #include <sys/in_cksum.h> 86 #include <sys/thread.h> 87 #include <sys/globaldata.h> 88 89 #include <net/route.h> 90 91 #include <netinet/in.h> 92 #include <netinet/in_systm.h> 93 #include <netinet/ip.h> 94 #include <netinet/in_pcb.h> 95 #include <netinet/ip_var.h> 96 #include <netinet6/in6_pcb.h> 97 #include <netinet/ip6.h> 98 #include <netinet6/ip6_var.h> 99 #include <netinet/tcp.h> 100 #define TCPOUTFLAGS 101 #include <netinet/tcp_fsm.h> 102 #include <netinet/tcp_seq.h> 103 #include <netinet/tcp_timer.h> 104 #include <netinet/tcp_timer2.h> 105 #include <netinet/tcp_var.h> 106 #include <netinet/tcpip.h> 107 #ifdef TCPDEBUG 108 #include <netinet/tcp_debug.h> 109 #endif 110 111 #ifdef IPSEC 112 #include <netinet6/ipsec.h> 113 #endif /*IPSEC*/ 114 115 #ifdef FAST_IPSEC 116 #include <netproto/ipsec/ipsec.h> 117 #define IPSEC 118 #endif /*FAST_IPSEC*/ 119 120 #ifdef notyet 121 extern struct mbuf *m_copypack(); 122 #endif 123 124 int path_mtu_discovery = 0; 125 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW, 126 &path_mtu_discovery, 1, "Enable Path MTU Discovery"); 127 128 static int avoid_pure_win_update = 1; 129 SYSCTL_INT(_net_inet_tcp, OID_AUTO, avoid_pure_win_update, CTLFLAG_RW, 130 &avoid_pure_win_update, 1, "Avoid pure window updates when possible"); 131 132 int tcp_do_autosndbuf = 1; 133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW, 134 &tcp_do_autosndbuf, 0, "Enable automatic send buffer sizing"); 135 136 int tcp_autosndbuf_inc = 8*1024; 137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW, 138 &tcp_autosndbuf_inc, 0, "Incrementor step size of automatic send buffer"); 139 140 int tcp_autosndbuf_max = 2*1024*1024; 141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW, 142 &tcp_autosndbuf_max, 0, "Max size of automatic send buffer"); 143 144 static int tcp_idle_cwv = 1; 145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_cwv, CTLFLAG_RW, 146 &tcp_idle_cwv, 0, 147 "Congestion window validation after idle period (part of RFC2861)"); 148 149 static int tcp_idle_restart = 1; 150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_restart, CTLFLAG_RW, 151 &tcp_idle_restart, 0, "Reset congestion window after idle period"); 152 153 static void tcp_idle_cwnd_validate(struct tcpcb *); 154 155 /* 156 * Tcp output routine: figure out what should be sent and send it. 157 */ 158 int 159 tcp_output(struct tcpcb *tp) 160 { 161 struct inpcb * const inp = tp->t_inpcb; 162 struct socket *so = inp->inp_socket; 163 long len, recvwin, sendwin; 164 int nsacked = 0; 165 int off, flags, error = 0; 166 #ifdef TCP_SIGNATURE 167 int sigoff = 0; 168 #endif 169 struct mbuf *m; 170 struct ip *ip; 171 struct ipovly *ipov; 172 struct tcphdr *th; 173 u_char opt[TCP_MAXOLEN]; 174 unsigned int ipoptlen, optlen, hdrlen; 175 int idle, idle_cwv = 0; 176 boolean_t sendalot; 177 struct ip6_hdr *ip6; 178 #ifdef INET6 179 const boolean_t isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 180 #else 181 const boolean_t isipv6 = FALSE; 182 #endif 183 184 KKASSERT(so->so_port == &curthread->td_msgport); 185 186 /* 187 * Determine length of data that should be transmitted, 188 * and flags that will be used. 189 * If there is some data or critical controls (SYN, RST) 190 * to send, then transmit; otherwise, investigate further. 191 */ 192 193 /* 194 * If we have been idle for a while, the send congestion window 195 * could be no longer representative of the current state of the 196 * link; need to validate congestion window. However, we should 197 * not perform congestion window validation here, since we could 198 * be asked to send pure ACK. 199 */ 200 if (tp->snd_max == tp->snd_una && 201 (ticks - tp->snd_last) >= tp->t_rxtcur && tcp_idle_restart) 202 idle_cwv = 1; 203 204 /* 205 * Calculate whether the transmit stream was previously idle 206 * and adjust TF_LASTIDLE for the next time. 207 */ 208 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 209 if (idle && (tp->t_flags & TF_MORETOCOME)) 210 tp->t_flags |= TF_LASTIDLE; 211 else 212 tp->t_flags &= ~TF_LASTIDLE; 213 214 if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && 215 !IN_FASTRECOVERY(tp)) 216 nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt); 217 218 again: 219 m = NULL; 220 ip = NULL; 221 ipov = NULL; 222 th = NULL; 223 ip6 = NULL; 224 225 /* Make use of SACK information when slow-starting after a RTO. */ 226 if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && 227 !IN_FASTRECOVERY(tp)) { 228 tcp_seq old_snd_nxt = tp->snd_nxt; 229 230 tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt); 231 nsacked += tp->snd_nxt - old_snd_nxt; 232 } 233 234 sendalot = FALSE; 235 off = tp->snd_nxt - tp->snd_una; 236 sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked); 237 sendwin = min(sendwin, tp->snd_bwnd); 238 239 flags = tcp_outflags[tp->t_state]; 240 /* 241 * Get standard flags, and add SYN or FIN if requested by 'hidden' 242 * state flags. 243 */ 244 if (tp->t_flags & TF_NEEDFIN) 245 flags |= TH_FIN; 246 if (tp->t_flags & TF_NEEDSYN) 247 flags |= TH_SYN; 248 249 /* 250 * If in persist timeout with window of 0, send 1 byte. 251 * Otherwise, if window is small but nonzero 252 * and timer expired, we will send what we can 253 * and go to transmit state. 254 */ 255 if (tp->t_flags & TF_FORCE) { 256 if (sendwin == 0) { 257 /* 258 * If we still have some data to send, then 259 * clear the FIN bit. Usually this would 260 * happen below when it realizes that we 261 * aren't sending all the data. However, 262 * if we have exactly 1 byte of unsent data, 263 * then it won't clear the FIN bit below, 264 * and if we are in persist state, we wind 265 * up sending the packet without recording 266 * that we sent the FIN bit. 267 * 268 * We can't just blindly clear the FIN bit, 269 * because if we don't have any more data 270 * to send then the probe will be the FIN 271 * itself. 272 */ 273 if (off < so->so_snd.ssb_cc) 274 flags &= ~TH_FIN; 275 sendwin = 1; 276 } else { 277 tcp_callout_stop(tp, tp->tt_persist); 278 tp->t_rxtshift = 0; 279 } 280 } 281 282 /* 283 * If snd_nxt == snd_max and we have transmitted a FIN, the 284 * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in 285 * a negative length. This can also occur when TCP opens up 286 * its congestion window while receiving additional duplicate 287 * acks after fast-retransmit because TCP will reset snd_nxt 288 * to snd_max after the fast-retransmit. 289 * 290 * A negative length can also occur when we are in the 291 * TCPS_SYN_RECEIVED state due to a simultanious connect where 292 * our SYN has not been acked yet. 293 * 294 * In the normal retransmit-FIN-only case, however, snd_nxt will 295 * be set to snd_una, the offset will be 0, and the length may 296 * wind up 0. 297 */ 298 len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off; 299 300 /* 301 * Lop off SYN bit if it has already been sent. However, if this 302 * is SYN-SENT state and if segment contains data, suppress sending 303 * segment (sending the segment would be an option if we still 304 * did TAO and the remote host supported it). 305 */ 306 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { 307 flags &= ~TH_SYN; 308 off--, len++; 309 if (len > 0 && tp->t_state == TCPS_SYN_SENT) 310 return 0; 311 } 312 313 /* 314 * Be careful not to send data and/or FIN on SYN segments. 315 * This measure is needed to prevent interoperability problems 316 * with not fully conformant TCP implementations. 317 */ 318 if (flags & TH_SYN) { 319 len = 0; 320 flags &= ~TH_FIN; 321 } 322 323 if (len < 0) { 324 /* 325 * A negative len can occur if our FIN has been sent but not 326 * acked, or if we are in a simultanious connect in the 327 * TCPS_SYN_RECEIVED state with our SYN sent but not yet 328 * acked. 329 * 330 * If our window has contracted to 0 in the FIN case 331 * (which can only occur if we have NOT been called to 332 * retransmit as per code a few paragraphs up) then we 333 * want to shift the retransmit timer over to the 334 * persist timer. 335 * 336 * However, if we are in the TCPS_SYN_RECEIVED state 337 * (the SYN case) we will be in a simultanious connect and 338 * the window may be zero degeneratively. In this case we 339 * do not want to shift to the persist timer after the SYN 340 * or the SYN+ACK transmission. 341 */ 342 len = 0; 343 if (sendwin == 0 && tp->t_state != TCPS_SYN_RECEIVED) { 344 tcp_callout_stop(tp, tp->tt_rexmt); 345 tp->t_rxtshift = 0; 346 tp->snd_nxt = tp->snd_una; 347 if (!tcp_callout_active(tp, tp->tt_persist)) 348 tcp_setpersist(tp); 349 } 350 } 351 352 KASSERT(len >= 0, ("%s: len < 0", __func__)); 353 /* 354 * Automatic sizing of send socket buffer. Often the send buffer 355 * size is not optimally adjusted to the actual network conditions 356 * at hand (delay bandwidth product). Setting the buffer size too 357 * small limits throughput on links with high bandwidth and high 358 * delay (eg. trans-continental/oceanic links). Setting the 359 * buffer size too big consumes too much real kernel memory, 360 * especially with many connections on busy servers. 361 * 362 * The criteria to step up the send buffer one notch are: 363 * 1. receive window of remote host is larger than send buffer 364 * (with a fudge factor of 5/4th); 365 * 2. send buffer is filled to 7/8th with data (so we actually 366 * have data to make use of it); 367 * 3. send buffer fill has not hit maximal automatic size; 368 * 4. our send window (slow start and cogestion controlled) is 369 * larger than sent but unacknowledged data in send buffer. 370 * 371 * The remote host receive window scaling factor may limit the 372 * growing of the send buffer before it reaches its allowed 373 * maximum. 374 * 375 * It scales directly with slow start or congestion window 376 * and does at most one step per received ACK. This fast 377 * scaling has the drawback of growing the send buffer beyond 378 * what is strictly necessary to make full use of a given 379 * delay*bandwith product. However testing has shown this not 380 * to be much of an problem. At worst we are trading wasting 381 * of available bandwith (the non-use of it) for wasting some 382 * socket buffer memory. 383 * 384 * TODO: Shrink send buffer during idle periods together 385 * with congestion window. Requires another timer. Has to 386 * wait for upcoming tcp timer rewrite. 387 */ 388 if (tcp_do_autosndbuf && so->so_snd.ssb_flags & SSB_AUTOSIZE) { 389 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.ssb_hiwat && 390 so->so_snd.ssb_cc >= (so->so_snd.ssb_hiwat / 8 * 7) && 391 so->so_snd.ssb_cc < tcp_autosndbuf_max && 392 sendwin >= (so->so_snd.ssb_cc - (tp->snd_nxt - tp->snd_una))) { 393 u_long newsize; 394 395 newsize = ulmin(so->so_snd.ssb_hiwat + 396 tcp_autosndbuf_inc, 397 tcp_autosndbuf_max); 398 if (!ssb_reserve(&so->so_snd, newsize, so, NULL)) 399 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); 400 if (newsize >= (TCP_MAXWIN << tp->snd_scale)) 401 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); 402 } 403 } 404 405 /* 406 * Truncate to the maximum segment length and ensure that FIN is 407 * removed if the length no longer contains the last data byte. 408 */ 409 if (len > tp->t_maxseg) { 410 len = tp->t_maxseg; 411 sendalot = TRUE; 412 } 413 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc)) 414 flags &= ~TH_FIN; 415 416 recvwin = ssb_space(&so->so_rcv); 417 418 /* 419 * Sender silly window avoidance. We transmit under the following 420 * conditions when len is non-zero: 421 * 422 * - We have a full segment 423 * - This is the last buffer in a write()/send() and we are 424 * either idle or running NODELAY 425 * - we've timed out (e.g. persist timer) 426 * - we have more then 1/2 the maximum send window's worth of 427 * data (receiver may be limiting the window size) 428 * - we need to retransmit 429 */ 430 if (len) { 431 if (len == tp->t_maxseg) 432 goto send; 433 /* 434 * NOTE! on localhost connections an 'ack' from the remote 435 * end may occur synchronously with the output and cause 436 * us to flush a buffer queued with moretocome. XXX 437 * 438 * note: the len + off check is almost certainly unnecessary. 439 */ 440 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 441 (idle || (tp->t_flags & TF_NODELAY)) && 442 len + off >= so->so_snd.ssb_cc && 443 !(tp->t_flags & TF_NOPUSH)) { 444 goto send; 445 } 446 if (tp->t_flags & TF_FORCE) /* typ. timeout case */ 447 goto send; 448 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) 449 goto send; 450 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ 451 goto send; 452 } 453 454 /* 455 * Compare available window to amount of window 456 * known to peer (as advertised window less 457 * next expected input). If the difference is at least two 458 * max size segments, or at least 50% of the maximum possible 459 * window, then want to send a window update to peer. 460 */ 461 if (recvwin > 0) { 462 /* 463 * "adv" is the amount we can increase the window, 464 * taking into account that we are limited by 465 * TCP_MAXWIN << tp->rcv_scale. 466 */ 467 long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) - 468 (tp->rcv_adv - tp->rcv_nxt); 469 long hiwat; 470 471 /* 472 * This ack case typically occurs when the user has drained 473 * the TCP socket buffer sufficiently to warrent an ack 474 * containing a 'pure window update'... that is, an ack that 475 * ONLY updates the tcp window. 476 * 477 * It is unclear why we would need to do a pure window update 478 * past 2 segments if we are going to do one at 1/2 the high 479 * water mark anyway, especially since under normal conditions 480 * the user program will drain the socket buffer quickly. 481 * The 2-segment pure window update will often add a large 482 * number of extra, unnecessary acks to the stream. 483 * 484 * avoid_pure_win_update now defaults to 1. 485 */ 486 if (avoid_pure_win_update == 0 || 487 (tp->t_flags & TF_RXRESIZED)) { 488 if (adv >= (long) (2 * tp->t_maxseg)) { 489 goto send; 490 } 491 } 492 hiwat = (long)(TCP_MAXWIN << tp->rcv_scale); 493 if (hiwat > (long)so->so_rcv.ssb_hiwat) 494 hiwat = (long)so->so_rcv.ssb_hiwat; 495 if (adv >= hiwat / 2) 496 goto send; 497 } 498 499 /* 500 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 501 * is also a catch-all for the retransmit timer timeout case. 502 */ 503 if (tp->t_flags & TF_ACKNOW) 504 goto send; 505 if ((flags & TH_RST) || 506 ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN))) 507 goto send; 508 if (SEQ_GT(tp->snd_up, tp->snd_una)) 509 goto send; 510 /* 511 * If our state indicates that FIN should be sent 512 * and we have not yet done so, then we need to send. 513 */ 514 if ((flags & TH_FIN) && 515 (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) 516 goto send; 517 518 /* 519 * TCP window updates are not reliable, rather a polling protocol 520 * using ``persist'' packets is used to insure receipt of window 521 * updates. The three ``states'' for the output side are: 522 * idle not doing retransmits or persists 523 * persisting to move a small or zero window 524 * (re)transmitting and thereby not persisting 525 * 526 * tcp_callout_active(tp, tp->tt_persist) 527 * is true when we are in persist state. 528 * The TF_FORCE flag in tp->t_flags 529 * is set when we are called to send a persist packet. 530 * tcp_callout_active(tp, tp->tt_rexmt) 531 * is set when we are retransmitting 532 * The output side is idle when both timers are zero. 533 * 534 * If send window is too small, there is data to transmit, and no 535 * retransmit or persist is pending, then go to persist state. 536 * 537 * If nothing happens soon, send when timer expires: 538 * if window is nonzero, transmit what we can, otherwise force out 539 * a byte. 540 * 541 * Don't try to set the persist state if we are in TCPS_SYN_RECEIVED 542 * with data pending. This situation can occur during a 543 * simultanious connect. 544 */ 545 if (so->so_snd.ssb_cc > 0 && 546 tp->t_state != TCPS_SYN_RECEIVED && 547 !tcp_callout_active(tp, tp->tt_rexmt) && 548 !tcp_callout_active(tp, tp->tt_persist)) { 549 tp->t_rxtshift = 0; 550 tcp_setpersist(tp); 551 } 552 553 /* 554 * No reason to send a segment, just return. 555 */ 556 return (0); 557 558 send: 559 /* 560 * Before ESTABLISHED, force sending of initial options 561 * unless TCP set not to do any options. 562 * NOTE: we assume that the IP/TCP header plus TCP options 563 * always fit in a single mbuf, leaving room for a maximum 564 * link header, i.e. 565 * max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES 566 */ 567 optlen = 0; 568 if (isipv6) 569 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 570 else 571 hdrlen = sizeof(struct tcpiphdr); 572 if (flags & TH_SYN) { 573 tp->snd_nxt = tp->iss; 574 if (!(tp->t_flags & TF_NOOPT)) { 575 u_short mss; 576 577 opt[0] = TCPOPT_MAXSEG; 578 opt[1] = TCPOLEN_MAXSEG; 579 mss = htons((u_short) tcp_mssopt(tp)); 580 memcpy(opt + 2, &mss, sizeof mss); 581 optlen = TCPOLEN_MAXSEG; 582 583 if ((tp->t_flags & TF_REQ_SCALE) && 584 (!(flags & TH_ACK) || 585 (tp->t_flags & TF_RCVD_SCALE))) { 586 *((u_int32_t *)(opt + optlen)) = htonl( 587 TCPOPT_NOP << 24 | 588 TCPOPT_WINDOW << 16 | 589 TCPOLEN_WINDOW << 8 | 590 tp->request_r_scale); 591 optlen += 4; 592 } 593 594 if ((tcp_do_sack && !(flags & TH_ACK)) || 595 tp->t_flags & TF_SACK_PERMITTED) { 596 uint32_t *lp = (uint32_t *)(opt + optlen); 597 598 *lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); 599 optlen += TCPOLEN_SACK_PERMITTED_ALIGNED; 600 } 601 } 602 } 603 604 /* 605 * Send a timestamp and echo-reply if this is a SYN and our side 606 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side 607 * and our peer have sent timestamps in our SYN's. 608 */ 609 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 610 !(flags & TH_RST) && 611 (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) { 612 u_int32_t *lp = (u_int32_t *)(opt + optlen); 613 614 /* Form timestamp option as shown in appendix A of RFC 1323. */ 615 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 616 *lp++ = htonl(ticks); 617 *lp = htonl(tp->ts_recent); 618 optlen += TCPOLEN_TSTAMP_APPA; 619 } 620 621 /* Set receive buffer autosizing timestamp. */ 622 if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) 623 tp->rfbuf_ts = ticks; 624 625 /* 626 * If this is a SACK connection and we have a block to report, 627 * fill in the SACK blocks in the TCP options. 628 */ 629 if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) == 630 TF_SACK_PERMITTED && 631 (!LIST_EMPTY(&tp->t_segq) || 632 tp->reportblk.rblk_start != tp->reportblk.rblk_end)) 633 tcp_sack_fill_report(tp, opt, &optlen); 634 635 #ifdef TCP_SIGNATURE 636 if (tp->t_flags & TF_SIGNATURE) { 637 int i; 638 u_char *bp; 639 /* 640 * Initialize TCP-MD5 option (RFC2385) 641 */ 642 bp = (u_char *)opt + optlen; 643 *bp++ = TCPOPT_SIGNATURE; 644 *bp++ = TCPOLEN_SIGNATURE; 645 sigoff = optlen + 2; 646 for (i = 0; i < TCP_SIGLEN; i++) 647 *bp++ = 0; 648 optlen += TCPOLEN_SIGNATURE; 649 /* 650 * Terminate options list and maintain 32-bit alignment. 651 */ 652 *bp++ = TCPOPT_NOP; 653 *bp++ = TCPOPT_EOL; 654 optlen += 2; 655 } 656 #endif /* TCP_SIGNATURE */ 657 KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options")); 658 hdrlen += optlen; 659 660 if (isipv6) { 661 ipoptlen = ip6_optlen(inp); 662 } else { 663 if (inp->inp_options) { 664 ipoptlen = inp->inp_options->m_len - 665 offsetof(struct ipoption, ipopt_list); 666 } else { 667 ipoptlen = 0; 668 } 669 } 670 #ifdef IPSEC 671 ipoptlen += ipsec_hdrsiz_tcp(tp); 672 #endif 673 674 /* 675 * Adjust data length if insertion of options will bump the packet 676 * length beyond the t_maxopd length. Clear FIN to prevent premature 677 * closure since there is still more data to send after this (now 678 * truncated) packet. 679 * 680 * If just the options do not fit we are in a no-win situation and 681 * we treat it as an unreachable host. 682 */ 683 if (len + optlen + ipoptlen > tp->t_maxopd) { 684 if (tp->t_maxopd <= optlen + ipoptlen) { 685 static time_t last_optlen_report; 686 687 if (last_optlen_report != time_second) { 688 last_optlen_report = time_second; 689 kprintf("tcpcb %p: MSS (%d) too small to hold options!\n", tp, tp->t_maxopd); 690 } 691 error = EHOSTUNREACH; 692 goto out; 693 } else { 694 flags &= ~TH_FIN; 695 len = tp->t_maxopd - optlen - ipoptlen; 696 sendalot = TRUE; 697 } 698 } 699 700 #ifdef INET6 701 KASSERT(max_linkhdr + hdrlen <= MCLBYTES, ("tcphdr too big")); 702 #else 703 KASSERT(max_linkhdr + hdrlen <= MHLEN, ("tcphdr too big")); 704 #endif 705 706 /* 707 * Grab a header mbuf, attaching a copy of data to 708 * be transmitted, and initialize the header from 709 * the template for sends on this connection. 710 */ 711 if (len) { 712 if ((tp->t_flags & TF_FORCE) && len == 1) 713 tcpstat.tcps_sndprobe++; 714 else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 715 if (tp->snd_nxt == tp->snd_una) 716 tp->snd_max_rexmt = tp->snd_max; 717 if (nsacked) { 718 tcpstat.tcps_sndsackrtopack++; 719 tcpstat.tcps_sndsackrtobyte += len; 720 } 721 tcpstat.tcps_sndrexmitpack++; 722 tcpstat.tcps_sndrexmitbyte += len; 723 } else { 724 tcpstat.tcps_sndpack++; 725 tcpstat.tcps_sndbyte += len; 726 } 727 if (idle_cwv) { 728 idle_cwv = 0; 729 tcp_idle_cwnd_validate(tp); 730 } 731 /* Update last send time after CWV */ 732 tp->snd_last = ticks; 733 #ifdef notyet 734 if ((m = m_copypack(so->so_snd.ssb_mb, off, (int)len, 735 max_linkhdr + hdrlen)) == NULL) { 736 error = ENOBUFS; 737 goto after_th; 738 } 739 /* 740 * m_copypack left space for our hdr; use it. 741 */ 742 m->m_len += hdrlen; 743 m->m_data -= hdrlen; 744 #else 745 #ifndef INET6 746 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 747 #else 748 m = m_getl(hdrlen + max_linkhdr, MB_DONTWAIT, MT_HEADER, 749 M_PKTHDR, NULL); 750 #endif 751 if (m == NULL) { 752 error = ENOBUFS; 753 goto after_th; 754 } 755 m->m_data += max_linkhdr; 756 m->m_len = hdrlen; 757 if (len <= MHLEN - hdrlen - max_linkhdr) { 758 m_copydata(so->so_snd.ssb_mb, off, (int) len, 759 mtod(m, caddr_t) + hdrlen); 760 m->m_len += len; 761 } else { 762 m->m_next = m_copy(so->so_snd.ssb_mb, off, (int) len); 763 if (m->m_next == NULL) { 764 m_free(m); 765 m = NULL; 766 error = ENOBUFS; 767 goto after_th; 768 } 769 } 770 #endif 771 /* 772 * If we're sending everything we've got, set PUSH. 773 * (This will keep happy those implementations which only 774 * give data to the user when a buffer fills or 775 * a PUSH comes in.) 776 */ 777 if (off + len == so->so_snd.ssb_cc) 778 flags |= TH_PUSH; 779 } else { 780 if (tp->t_flags & TF_ACKNOW) 781 tcpstat.tcps_sndacks++; 782 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 783 tcpstat.tcps_sndctrl++; 784 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 785 tcpstat.tcps_sndurg++; 786 else 787 tcpstat.tcps_sndwinup++; 788 789 MGETHDR(m, MB_DONTWAIT, MT_HEADER); 790 if (m == NULL) { 791 error = ENOBUFS; 792 goto after_th; 793 } 794 if (isipv6 && 795 (hdrlen + max_linkhdr > MHLEN) && hdrlen <= MHLEN) 796 MH_ALIGN(m, hdrlen); 797 else 798 m->m_data += max_linkhdr; 799 m->m_len = hdrlen; 800 } 801 m->m_pkthdr.rcvif = NULL; 802 if (isipv6) { 803 ip6 = mtod(m, struct ip6_hdr *); 804 th = (struct tcphdr *)(ip6 + 1); 805 tcp_fillheaders(tp, ip6, th); 806 } else { 807 ip = mtod(m, struct ip *); 808 ipov = (struct ipovly *)ip; 809 th = (struct tcphdr *)(ip + 1); 810 /* this picks up the pseudo header (w/o the length) */ 811 tcp_fillheaders(tp, ip, th); 812 } 813 after_th: 814 /* 815 * Fill in fields, remembering maximum advertised 816 * window for use in delaying messages about window sizes. 817 * If resending a FIN, be sure not to use a new sequence number. 818 */ 819 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 820 tp->snd_nxt == tp->snd_max) 821 tp->snd_nxt--; 822 823 if (th != NULL) { 824 /* 825 * If we are doing retransmissions, then snd_nxt will 826 * not reflect the first unsent octet. For ACK only 827 * packets, we do not want the sequence number of the 828 * retransmitted packet, we want the sequence number 829 * of the next unsent octet. So, if there is no data 830 * (and no SYN or FIN), use snd_max instead of snd_nxt 831 * when filling in ti_seq. But if we are in persist 832 * state, snd_max might reflect one byte beyond the 833 * right edge of the window, so use snd_nxt in that 834 * case, since we know we aren't doing a retransmission. 835 * (retransmit and persist are mutually exclusive...) 836 */ 837 if (len || (flags & (TH_SYN|TH_FIN)) || 838 tcp_callout_active(tp, tp->tt_persist)) 839 th->th_seq = htonl(tp->snd_nxt); 840 else 841 th->th_seq = htonl(tp->snd_max); 842 th->th_ack = htonl(tp->rcv_nxt); 843 if (optlen) { 844 bcopy(opt, th + 1, optlen); 845 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 846 } 847 th->th_flags = flags; 848 } 849 850 /* 851 * Calculate receive window. Don't shrink window, but avoid 852 * silly window syndrome by sending a 0 window if the actual 853 * window is less then one segment. 854 */ 855 if (recvwin < (long)(so->so_rcv.ssb_hiwat / 4) && 856 recvwin < (long)tp->t_maxseg) 857 recvwin = 0; 858 if (recvwin < (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt)) 859 recvwin = (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt); 860 if (recvwin > (long)TCP_MAXWIN << tp->rcv_scale) 861 recvwin = (long)TCP_MAXWIN << tp->rcv_scale; 862 863 /* 864 * Adjust the RXWIN0SENT flag - indicate that we have advertised 865 * a 0 window. This may cause the remote transmitter to stall. This 866 * flag tells soreceive() to disable delayed acknowledgements when 867 * draining the buffer. This can occur if the receiver is attempting 868 * to read more data then can be buffered prior to transmitting on 869 * the connection. 870 */ 871 if (recvwin == 0) 872 tp->t_flags |= TF_RXWIN0SENT; 873 else 874 tp->t_flags &= ~TF_RXWIN0SENT; 875 876 if (th != NULL) 877 th->th_win = htons((u_short) (recvwin>>tp->rcv_scale)); 878 879 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 880 if (th != NULL) { 881 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 882 th->th_flags |= TH_URG; 883 } 884 } else { 885 /* 886 * If no urgent pointer to send, then we pull 887 * the urgent pointer to the left edge of the send window 888 * so that it doesn't drift into the send window on sequence 889 * number wraparound. 890 */ 891 tp->snd_up = tp->snd_una; /* drag it along */ 892 } 893 894 if (th != NULL) { 895 #ifdef TCP_SIGNATURE 896 if (tp->t_flags & TF_SIGNATURE) { 897 tcpsignature_compute(m, len, optlen, 898 (u_char *)(th + 1) + sigoff, IPSEC_DIR_OUTBOUND); 899 } 900 #endif /* TCP_SIGNATURE */ 901 902 /* 903 * Put TCP length in extended header, and then 904 * checksum extended header and data. 905 */ 906 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 907 if (isipv6) { 908 /* 909 * ip6_plen is not need to be filled now, and will be 910 * filled in ip6_output(). 911 */ 912 th->th_sum = in6_cksum(m, IPPROTO_TCP, 913 sizeof(struct ip6_hdr), 914 sizeof(struct tcphdr) + optlen + len); 915 } else { 916 m->m_pkthdr.csum_flags = CSUM_TCP; 917 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 918 if (len + optlen) { 919 th->th_sum = in_addword(th->th_sum, 920 htons((u_short)(optlen + len))); 921 } 922 923 /* 924 * IP version must be set here for ipv4/ipv6 checking 925 * later 926 */ 927 KASSERT(ip->ip_v == IPVERSION, 928 ("%s: IP version incorrect: %d", 929 __func__, ip->ip_v)); 930 } 931 } 932 933 /* 934 * In transmit state, time the transmission and arrange for 935 * the retransmit. In persist state, just set snd_max. 936 */ 937 if (!(tp->t_flags & TF_FORCE) || 938 !tcp_callout_active(tp, tp->tt_persist)) { 939 tcp_seq startseq = tp->snd_nxt; 940 941 /* 942 * Advance snd_nxt over sequence space of this segment. 943 */ 944 if (flags & (TH_SYN | TH_FIN)) { 945 if (flags & TH_SYN) 946 tp->snd_nxt++; 947 if (flags & TH_FIN) { 948 tp->snd_nxt++; 949 tp->t_flags |= TF_SENTFIN; 950 } 951 } 952 tp->snd_nxt += len; 953 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 954 tp->snd_max = tp->snd_nxt; 955 /* 956 * Time this transmission if not a retransmission and 957 * not currently timing anything. 958 */ 959 if (tp->t_rtttime == 0) { 960 tp->t_rtttime = ticks; 961 tp->t_rtseq = startseq; 962 tcpstat.tcps_segstimed++; 963 } 964 } 965 966 /* 967 * Set retransmit timer if not currently set, 968 * and not doing a pure ack or a keep-alive probe. 969 * Initial value for retransmit timer is smoothed 970 * round-trip time + 2 * round-trip time variance. 971 * Initialize shift counter which is used for backoff 972 * of retransmit time. 973 */ 974 if (!tcp_callout_active(tp, tp->tt_rexmt) && 975 tp->snd_nxt != tp->snd_una) { 976 if (tcp_callout_active(tp, tp->tt_persist)) { 977 tcp_callout_stop(tp, tp->tt_persist); 978 tp->t_rxtshift = 0; 979 } 980 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur, 981 tcp_timer_rexmt); 982 } 983 } else { 984 /* 985 * Persist case, update snd_max but since we are in 986 * persist mode (no window) we do not update snd_nxt. 987 */ 988 int xlen = len; 989 if (flags & TH_SYN) 990 panic("tcp_output: persist timer to send SYN\n"); 991 if (flags & TH_FIN) { 992 ++xlen; 993 tp->t_flags |= TF_SENTFIN; 994 } 995 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) 996 tp->snd_max = tp->snd_nxt + xlen; 997 } 998 999 if (th != NULL) { 1000 #ifdef TCPDEBUG 1001 /* Trace. */ 1002 if (so->so_options & SO_DEBUG) { 1003 tcp_trace(TA_OUTPUT, tp->t_state, tp, 1004 mtod(m, void *), th, 0); 1005 } 1006 #endif 1007 1008 /* 1009 * Fill in IP length and desired time to live and 1010 * send to IP level. There should be a better way 1011 * to handle ttl and tos; we could keep them in 1012 * the template, but need a way to checksum without them. 1013 */ 1014 /* 1015 * m->m_pkthdr.len should have been set before cksum 1016 * calcuration, because in6_cksum() need it. 1017 */ 1018 if (isipv6) { 1019 /* 1020 * we separately set hoplimit for every segment, 1021 * since the user might want to change the value 1022 * via setsockopt. Also, desired default hop 1023 * limit might be changed via Neighbor Discovery. 1024 */ 1025 ip6->ip6_hlim = in6_selecthlim(inp, 1026 (inp->in6p_route.ro_rt ? 1027 inp->in6p_route.ro_rt->rt_ifp : NULL)); 1028 1029 /* TODO: IPv6 IP6TOS_ECT bit on */ 1030 error = ip6_output(m, inp->in6p_outputopts, 1031 &inp->in6p_route, (so->so_options & SO_DONTROUTE), 1032 NULL, NULL, inp); 1033 } else { 1034 struct rtentry *rt; 1035 ip->ip_len = m->m_pkthdr.len; 1036 #ifdef INET6 1037 if (INP_CHECK_SOCKAF(so, AF_INET6)) 1038 ip->ip_ttl = in6_selecthlim(inp, 1039 (inp->in6p_route.ro_rt ? 1040 inp->in6p_route.ro_rt->rt_ifp : NULL)); 1041 else 1042 #endif 1043 ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1044 1045 ip->ip_tos = inp->inp_ip_tos; /* XXX */ 1046 /* 1047 * See if we should do MTU discovery. 1048 * We do it only if the following are true: 1049 * 1) we have a valid route to the destination 1050 * 2) the MTU is not locked (if it is, 1051 * then discovery has been disabled) 1052 */ 1053 if (path_mtu_discovery && 1054 (rt = inp->inp_route.ro_rt) && 1055 (rt->rt_flags & RTF_UP) && 1056 !(rt->rt_rmx.rmx_locks & RTV_MTU)) 1057 ip->ip_off |= IP_DF; 1058 1059 error = ip_output(m, inp->inp_options, &inp->inp_route, 1060 (so->so_options & SO_DONTROUTE) | 1061 IP_DEBUGROUTE, NULL, inp); 1062 } 1063 } else { 1064 KASSERT(error != 0, ("no error, but th not set\n")); 1065 } 1066 if (error) { 1067 1068 /* 1069 * We know that the packet was lost, so back out the 1070 * sequence number advance, if any. 1071 */ 1072 if (!(tp->t_flags & TF_FORCE) || 1073 !tcp_callout_active(tp, tp->tt_persist)) { 1074 /* 1075 * No need to check for TH_FIN here because 1076 * the TF_SENTFIN flag handles that case. 1077 */ 1078 if (!(flags & TH_SYN)) 1079 tp->snd_nxt -= len; 1080 } 1081 1082 out: 1083 if (error == ENOBUFS) { 1084 /* 1085 * If we can't send, make sure there is something 1086 * to get us going again later. 1087 * 1088 * The persist timer isn't necessarily allowed in all 1089 * states, use the rexmt timer. 1090 */ 1091 if (!tcp_callout_active(tp, tp->tt_rexmt) && 1092 !tcp_callout_active(tp, tp->tt_persist)) { 1093 tcp_callout_reset(tp, tp->tt_rexmt, 1094 tp->t_rxtcur, 1095 tcp_timer_rexmt); 1096 #if 0 1097 tp->t_rxtshift = 0; 1098 tcp_setpersist(tp); 1099 #endif 1100 } 1101 tcp_quench(inp, 0); 1102 return (0); 1103 } 1104 if (error == EMSGSIZE) { 1105 /* 1106 * ip_output() will have already fixed the route 1107 * for us. tcp_mtudisc() will, as its last action, 1108 * initiate retransmission, so it is important to 1109 * not do so here. 1110 */ 1111 tcp_mtudisc(inp, 0); 1112 return 0; 1113 } 1114 if ((error == EHOSTUNREACH || error == ENETDOWN) && 1115 TCPS_HAVERCVDSYN(tp->t_state)) { 1116 tp->t_softerror = error; 1117 return (0); 1118 } 1119 return (error); 1120 } 1121 tcpstat.tcps_sndtotal++; 1122 1123 /* 1124 * Data sent (as far as we can tell). 1125 * 1126 * If this advertises a larger window than any other segment, 1127 * then remember the size of the advertised window. 1128 * 1129 * Any pending ACK has now been sent. 1130 */ 1131 if (recvwin > 0 && SEQ_GT(tp->rcv_nxt + recvwin, tp->rcv_adv)) { 1132 tp->rcv_adv = tp->rcv_nxt + recvwin; 1133 tp->t_flags &= ~TF_RXRESIZED; 1134 } 1135 tp->last_ack_sent = tp->rcv_nxt; 1136 tp->t_flags &= ~TF_ACKNOW; 1137 if (tcp_delack_enabled) 1138 tcp_callout_stop(tp, tp->tt_delack); 1139 if (sendalot) 1140 goto again; 1141 return (0); 1142 } 1143 1144 void 1145 tcp_setpersist(struct tcpcb *tp) 1146 { 1147 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; 1148 int tt; 1149 1150 if (tp->t_state == TCPS_SYN_SENT || 1151 tp->t_state == TCPS_SYN_RECEIVED) { 1152 panic("tcp_setpersist: not established yet, current %s\n", 1153 tp->t_state == TCPS_SYN_SENT ? 1154 "SYN_SENT" : "SYN_RECEIVED"); 1155 } 1156 1157 if (tcp_callout_active(tp, tp->tt_rexmt)) 1158 panic("tcp_setpersist: retransmit pending"); 1159 /* 1160 * Start/restart persistance timer. 1161 */ 1162 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], TCPTV_PERSMIN, 1163 TCPTV_PERSMAX); 1164 tcp_callout_reset(tp, tp->tt_persist, tt, tcp_timer_persist); 1165 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 1166 tp->t_rxtshift++; 1167 } 1168 1169 static void 1170 tcp_idle_cwnd_validate(struct tcpcb *tp) 1171 { 1172 u_long initial_cwnd = tcp_initial_window(tp); 1173 u_long min_cwnd; 1174 1175 tcpstat.tcps_sndidle++; 1176 1177 /* According to RFC5681: RW=min(IW,cwnd) */ 1178 min_cwnd = min(tp->snd_cwnd, initial_cwnd); 1179 1180 if (tcp_idle_cwv) { 1181 u_long idle_time, decay_cwnd; 1182 1183 /* 1184 * RFC2861, but only after idle period. 1185 */ 1186 1187 /* 1188 * Before the congestion window is reduced, ssthresh 1189 * is set to the maximum of its current value and 3/4 1190 * cwnd. If the sender then has more data to send 1191 * than the decayed cwnd allows, the TCP will slow- 1192 * start (perform exponential increase) at least 1193 * half-way back up to the old value of cwnd. 1194 */ 1195 tp->snd_ssthresh = max(tp->snd_ssthresh, 1196 (3 * tp->snd_cwnd) / 4); 1197 1198 /* 1199 * Decay the congestion window by half for every RTT 1200 * that the flow remains inactive. 1201 * 1202 * The difference between our implementation and 1203 * RFC2861 is that we don't allow cwnd to go below 1204 * the value allowed by RFC5681 (min_cwnd). 1205 */ 1206 idle_time = ticks - tp->snd_last; 1207 decay_cwnd = tp->snd_cwnd; 1208 while (idle_time >= tp->t_rxtcur && 1209 decay_cwnd > min_cwnd) { 1210 decay_cwnd >>= 1; 1211 idle_time -= tp->t_rxtcur; 1212 } 1213 tp->snd_cwnd = max(decay_cwnd, min_cwnd); 1214 } else { 1215 /* 1216 * Slow-start from scratch to re-determine the send 1217 * congestion window. 1218 */ 1219 tp->snd_cwnd = min_cwnd; 1220 } 1221 1222 /* Restart ABC counting during congestion avoidance */ 1223 tp->snd_wacked = 0; 1224 } 1225