1 /* 2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $ 68 */ 69 70 #include "opt_inet.h" 71 #include "opt_inet6.h" 72 #include "opt_ipsec.h" 73 #include "opt_tcpdebug.h" 74 #include "opt_tcp_input.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 #include <sys/malloc.h> 81 #include <sys/mbuf.h> 82 #include <sys/proc.h> /* for proc0 declaration */ 83 #include <sys/protosw.h> 84 #include <sys/socket.h> 85 #include <sys/socketvar.h> 86 #include <sys/syslog.h> 87 #include <sys/in_cksum.h> 88 89 #include <sys/socketvar2.h> 90 91 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 92 #include <machine/stdarg.h> 93 94 #include <net/if.h> 95 #include <net/route.h> 96 97 #include <netinet/in.h> 98 #include <netinet/in_systm.h> 99 #include <netinet/ip.h> 100 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */ 101 #include <netinet/in_var.h> 102 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 103 #include <netinet/in_pcb.h> 104 #include <netinet/ip_var.h> 105 #include <netinet/ip6.h> 106 #include <netinet/icmp6.h> 107 #include <netinet6/nd6.h> 108 #include <netinet6/ip6_var.h> 109 #include <netinet6/in6_pcb.h> 110 #include <netinet/tcp.h> 111 #include <netinet/tcp_fsm.h> 112 #include <netinet/tcp_seq.h> 113 #include <netinet/tcp_timer.h> 114 #include <netinet/tcp_timer2.h> 115 #include <netinet/tcp_var.h> 116 #include <netinet6/tcp6_var.h> 117 #include <netinet/tcpip.h> 118 119 #ifdef TCPDEBUG 120 #include <netinet/tcp_debug.h> 121 122 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */ 123 struct tcphdr tcp_savetcp; 124 #endif 125 126 #ifdef FAST_IPSEC 127 #include <netproto/ipsec/ipsec.h> 128 #include <netproto/ipsec/ipsec6.h> 129 #endif 130 131 #ifdef IPSEC 132 #include <netinet6/ipsec.h> 133 #include <netinet6/ipsec6.h> 134 #include <netproto/key/key.h> 135 #endif 136 137 MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry"); 138 139 static int log_in_vain = 0; 140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 141 &log_in_vain, 0, "Log all incoming TCP connections"); 142 143 static int blackhole = 0; 144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 145 &blackhole, 0, "Do not send RST when dropping refused connections"); 146 147 int tcp_delack_enabled = 1; 148 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 149 &tcp_delack_enabled, 0, 150 "Delay ACK to try and piggyback it onto a data packet"); 151 152 #ifdef TCP_DROP_SYNFIN 153 static int drop_synfin = 0; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 155 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 156 #endif 157 158 static int tcp_do_limitedtransmit = 1; 159 SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW, 160 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)"); 161 162 static int tcp_do_early_retransmit = 1; 163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW, 164 &tcp_do_early_retransmit, 0, "Early retransmit"); 165 166 int tcp_aggregate_acks = 1; 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW, 168 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack"); 169 170 static int tcp_do_eifel_detect = 1; 171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW, 172 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)"); 173 174 static int tcp_do_abc = 1; 175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW, 176 &tcp_do_abc, 0, 177 "TCP Appropriate Byte Counting (RFC 3465)"); 178 179 /* 180 * The following value actually takes range [25ms, 250ms], 181 * given that most modern systems use 1ms ~ 10ms as the unit 182 * of timestamp option. 183 */ 184 static u_int tcp_paws_tolerance = 25; 185 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, paws_tolerance, CTLFLAG_RW, 186 &tcp_paws_tolerance, 0, "RFC1323 PAWS tolerance"); 187 188 /* 189 * Define as tunable for easy testing with SACK on and off. 190 * Warning: do not change setting in the middle of an existing active TCP flow, 191 * else strange things might happen to that flow. 192 */ 193 int tcp_do_sack = 1; 194 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 195 &tcp_do_sack, 0, "Enable SACK Algorithms"); 196 197 int tcp_do_smartsack = 1; 198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW, 199 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms"); 200 201 int tcp_do_rescuesack = 1; 202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack, CTLFLAG_RW, 203 &tcp_do_rescuesack, 0, "Rescue retransmission for SACK"); 204 205 int tcp_aggressive_rescuesack = 0; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack_agg, CTLFLAG_RW, 207 &tcp_aggressive_rescuesack, 0, "Aggressive rescue retransmission for SACK"); 208 209 int tcp_do_rfc3517bis = 0; 210 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis, CTLFLAG_RW, 211 &tcp_do_rfc3517bis, 0, "Enable RFC3517 update"); 212 213 int tcp_rfc3517bis_rxt = 0; 214 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis_rxt, CTLFLAG_RW, 215 &tcp_rfc3517bis_rxt, 0, "Enable RFC3517 retransmit update"); 216 217 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 218 "TCP Segment Reassembly Queue"); 219 220 int tcp_reass_maxseg = 0; 221 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD, 222 &tcp_reass_maxseg, 0, 223 "Global maximum number of TCP Segments in Reassembly Queue"); 224 225 int tcp_reass_qsize = 0; 226 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 227 &tcp_reass_qsize, 0, 228 "Global number of TCP Segments currently in Reassembly Queue"); 229 230 static int tcp_reass_overflows = 0; 231 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 232 &tcp_reass_overflows, 0, 233 "Global number of TCP Segment Reassembly Queue Overflows"); 234 235 int tcp_do_autorcvbuf = 1; 236 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 237 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 238 239 int tcp_autorcvbuf_inc = 16*1024; 240 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 241 &tcp_autorcvbuf_inc, 0, 242 "Incrementor step size of automatic receive buffer"); 243 244 int tcp_autorcvbuf_max = 2*1024*1024; 245 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 246 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 247 248 int tcp_sosend_agglim = 2; 249 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_agglim, CTLFLAG_RW, 250 &tcp_sosend_agglim, 0, "TCP sosend mbuf aggregation limit"); 251 252 int tcp_sosend_async = 1; 253 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_async, CTLFLAG_RW, 254 &tcp_sosend_async, 0, "TCP asynchronized pru_send"); 255 256 static int tcp_ignore_redun_dsack = 1; 257 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_redun_dsack, CTLFLAG_RW, 258 &tcp_ignore_redun_dsack, 0, "Ignore redundant DSACK"); 259 260 static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t, 261 tcp_seq); 262 static void tcp_pulloutofband(struct socket *, 263 struct tcphdr *, struct mbuf *, int); 264 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 265 struct mbuf *); 266 static void tcp_xmit_timer(struct tcpcb *, int, tcp_seq); 267 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int); 268 static void tcp_sack_rexmt(struct tcpcb *); 269 static boolean_t tcp_sack_limitedxmit(struct tcpcb *); 270 static int tcp_rmx_msl(const struct tcpcb *); 271 static void tcp_established(struct tcpcb *); 272 static boolean_t tcp_fast_recovery(struct tcpcb *, tcp_seq, 273 const struct tcpopt *); 274 275 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 276 #ifdef INET6 277 #define ND6_HINT(tp) \ 278 do { \ 279 if ((tp) && (tp)->t_inpcb && \ 280 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \ 281 (tp)->t_inpcb->in6p_route.ro_rt) \ 282 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \ 283 } while (0) 284 #else 285 #define ND6_HINT(tp) 286 #endif 287 288 /* 289 * Indicate whether this ack should be delayed. We can delay the ack if 290 * - delayed acks are enabled and 291 * - there is no delayed ack timer in progress and 292 * - our last ack wasn't a 0-sized window. We never want to delay 293 * the ack that opens up a 0-sized window. 294 */ 295 #define DELAY_ACK(tp) \ 296 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \ 297 !(tp->t_flags & TF_RXWIN0SENT)) 298 299 #define acceptable_window_update(tp, th, tiwin) \ 300 (SEQ_LT(tp->snd_wl1, th->th_seq) || \ 301 (tp->snd_wl1 == th->th_seq && \ 302 (SEQ_LT(tp->snd_wl2, th->th_ack) || \ 303 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) 304 305 #define iceildiv(n, d) (((n)+(d)-1) / (d)) 306 #define need_early_retransmit(tp, ownd) \ 307 (tcp_do_early_retransmit && \ 308 (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) && \ 309 ownd < ((tp->t_rxtthresh + 1) * tp->t_maxseg) && \ 310 tp->t_dupacks + 1 >= iceildiv(ownd, tp->t_maxseg) && \ 311 (!TCP_DO_SACK(tp) || ownd <= tp->t_maxseg || \ 312 tcp_sack_has_sacked(&tp->scb, ownd - tp->t_maxseg))) 313 314 /* 315 * Returns TRUE, if this segment can be merged with the last 316 * pending segment in the reassemble queue and this segment 317 * does not overlap with the pending segment immediately 318 * preceeding the last pending segment. 319 */ 320 static __inline boolean_t 321 tcp_paws_canreasslast(const struct tcpcb *tp, const struct tcphdr *th, int tlen) 322 { 323 const struct tseg_qent *last, *prev; 324 325 last = TAILQ_LAST(&tp->t_segq, tsegqe_head); 326 if (last == NULL) 327 return FALSE; 328 329 /* This segment comes immediately after the last pending segment */ 330 if (last->tqe_th->th_seq + last->tqe_len == th->th_seq) 331 return TRUE; 332 333 if (th->th_seq + tlen != last->tqe_th->th_seq) 334 return FALSE; 335 /* This segment comes immediately before the last pending segment */ 336 337 prev = TAILQ_PREV(last, tsegqe_head, tqe_q); 338 if (prev == NULL) { 339 /* 340 * No pending preceeding segment, we assume this segment 341 * could be reassembled. 342 */ 343 return TRUE; 344 } 345 346 /* This segment does not overlap with the preceeding segment */ 347 if (SEQ_GEQ(th->th_seq, prev->tqe_th->th_seq + prev->tqe_len)) 348 return TRUE; 349 350 return FALSE; 351 } 352 353 static __inline void 354 tcp_ncr_update_rxtthresh(struct tcpcb *tp) 355 { 356 int old_rxtthresh = tp->t_rxtthresh; 357 uint32_t ownd = tp->snd_max - tp->snd_una; 358 359 tp->t_rxtthresh = max(3, ((ownd / tp->t_maxseg) >> 1)); 360 if (tp->t_rxtthresh != old_rxtthresh) { 361 tcp_sack_update_lostseq(&tp->scb, tp->snd_una, 362 tp->t_maxseg, tp->t_rxtthresh); 363 } 364 } 365 366 static int 367 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 368 { 369 struct tseg_qent *q; 370 struct tseg_qent *p = NULL; 371 struct tseg_qent *te; 372 struct socket *so = tp->t_inpcb->inp_socket; 373 int flags; 374 375 /* 376 * Call with th == NULL after become established to 377 * force pre-ESTABLISHED data up to user socket. 378 */ 379 if (th == NULL) 380 goto present; 381 382 /* 383 * Limit the number of segments in the reassembly queue to prevent 384 * holding on to too many segments (and thus running out of mbufs). 385 * Make sure to let the missing segment through which caused this 386 * queue. Always keep one global queue entry spare to be able to 387 * process the missing segment. 388 */ 389 if (th->th_seq != tp->rcv_nxt && 390 tcp_reass_qsize + 1 >= tcp_reass_maxseg) { 391 tcp_reass_overflows++; 392 tcpstat.tcps_rcvmemdrop++; 393 m_freem(m); 394 /* no SACK block to report */ 395 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 396 return (0); 397 } 398 399 /* Allocate a new queue entry. */ 400 te = kmalloc(sizeof(struct tseg_qent), M_TSEGQ, M_INTWAIT | M_NULLOK); 401 if (te == NULL) { 402 tcpstat.tcps_rcvmemdrop++; 403 m_freem(m); 404 /* no SACK block to report */ 405 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 406 return (0); 407 } 408 atomic_add_int(&tcp_reass_qsize, 1); 409 410 /* 411 * Find a segment which begins after this one does. 412 */ 413 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) { 414 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 415 break; 416 p = q; 417 } 418 419 /* 420 * If there is a preceding segment, it may provide some of 421 * our data already. If so, drop the data from the incoming 422 * segment. If it provides all of our data, drop us. 423 */ 424 if (p != NULL) { 425 tcp_seq_diff_t i; 426 427 /* conversion to int (in i) handles seq wraparound */ 428 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 429 if (i > 0) { /* overlaps preceding segment */ 430 tp->sack_flags |= 431 (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG); 432 /* enclosing block starts w/ preceding segment */ 433 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 434 if (i >= *tlenp) { 435 /* preceding encloses incoming segment */ 436 tp->encloseblk.rblk_end = TCP_SACK_BLKEND( 437 p->tqe_th->th_seq + p->tqe_len, 438 p->tqe_th->th_flags); 439 tcpstat.tcps_rcvduppack++; 440 tcpstat.tcps_rcvdupbyte += *tlenp; 441 m_freem(m); 442 kfree(te, M_TSEGQ); 443 atomic_add_int(&tcp_reass_qsize, -1); 444 /* 445 * Try to present any queued data 446 * at the left window edge to the user. 447 * This is needed after the 3-WHS 448 * completes. 449 */ 450 goto present; /* ??? */ 451 } 452 m_adj(m, i); 453 *tlenp -= i; 454 th->th_seq += i; 455 /* incoming segment end is enclosing block end */ 456 tp->encloseblk.rblk_end = TCP_SACK_BLKEND( 457 th->th_seq + *tlenp, th->th_flags); 458 /* trim end of reported D-SACK block */ 459 tp->reportblk.rblk_end = th->th_seq; 460 } 461 } 462 tcpstat.tcps_rcvoopack++; 463 tcpstat.tcps_rcvoobyte += *tlenp; 464 465 /* 466 * While we overlap succeeding segments trim them or, 467 * if they are completely covered, dequeue them. 468 */ 469 while (q) { 470 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 471 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len; 472 tcp_seq qend_sack = TCP_SACK_BLKEND(qend, q->tqe_th->th_flags); 473 struct tseg_qent *nq; 474 475 if (i <= 0) 476 break; 477 if (!(tp->sack_flags & TSACK_F_DUPSEG)) { 478 /* first time through */ 479 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG); 480 tp->encloseblk = tp->reportblk; 481 /* report trailing duplicate D-SACK segment */ 482 tp->reportblk.rblk_start = q->tqe_th->th_seq; 483 } 484 if ((tp->sack_flags & TSACK_F_ENCLOSESEG) && 485 SEQ_GT(qend_sack, tp->encloseblk.rblk_end)) { 486 /* extend enclosing block if one exists */ 487 tp->encloseblk.rblk_end = qend_sack; 488 } 489 if (i < q->tqe_len) { 490 q->tqe_th->th_seq += i; 491 q->tqe_len -= i; 492 m_adj(q->tqe_m, i); 493 break; 494 } 495 496 nq = TAILQ_NEXT(q, tqe_q); 497 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 498 m_freem(q->tqe_m); 499 kfree(q, M_TSEGQ); 500 atomic_add_int(&tcp_reass_qsize, -1); 501 q = nq; 502 } 503 504 /* Insert the new segment queue entry into place. */ 505 te->tqe_m = m; 506 te->tqe_th = th; 507 te->tqe_len = *tlenp; 508 509 /* check if can coalesce with following segment */ 510 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) { 511 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len; 512 tcp_seq tend_sack = TCP_SACK_BLKEND(tend, te->tqe_th->th_flags); 513 514 te->tqe_len += q->tqe_len; 515 if (q->tqe_th->th_flags & TH_FIN) 516 te->tqe_th->th_flags |= TH_FIN; 517 m_cat(te->tqe_m, q->tqe_m); 518 tp->encloseblk.rblk_end = tend_sack; 519 /* 520 * When not reporting a duplicate segment, use 521 * the larger enclosing block as the SACK block. 522 */ 523 if (!(tp->sack_flags & TSACK_F_DUPSEG)) 524 tp->reportblk.rblk_end = tend_sack; 525 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 526 kfree(q, M_TSEGQ); 527 atomic_add_int(&tcp_reass_qsize, -1); 528 } 529 530 if (p == NULL) { 531 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q); 532 } else { 533 /* check if can coalesce with preceding segment */ 534 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) { 535 p->tqe_len += te->tqe_len; 536 m_cat(p->tqe_m, te->tqe_m); 537 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 538 /* 539 * When not reporting a duplicate segment, use 540 * the larger enclosing block as the SACK block. 541 */ 542 if (!(tp->sack_flags & TSACK_F_DUPSEG)) 543 tp->reportblk.rblk_start = p->tqe_th->th_seq; 544 kfree(te, M_TSEGQ); 545 atomic_add_int(&tcp_reass_qsize, -1); 546 } else { 547 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q); 548 } 549 } 550 551 present: 552 /* 553 * Present data to user, advancing rcv_nxt through 554 * completed sequence space. 555 */ 556 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 557 return (0); 558 q = TAILQ_FIRST(&tp->t_segq); 559 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt) 560 return (0); 561 tp->rcv_nxt += q->tqe_len; 562 if (!(tp->sack_flags & TSACK_F_DUPSEG)) { 563 /* no SACK block to report since ACK advanced */ 564 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 565 } 566 /* no enclosing block to report since ACK advanced */ 567 tp->sack_flags &= ~TSACK_F_ENCLOSESEG; 568 flags = q->tqe_th->th_flags & TH_FIN; 569 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 570 KASSERT(TAILQ_EMPTY(&tp->t_segq) || 571 TAILQ_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt, 572 ("segment not coalesced")); 573 if (so->so_state & SS_CANTRCVMORE) { 574 m_freem(q->tqe_m); 575 } else { 576 lwkt_gettoken(&so->so_rcv.ssb_token); 577 ssb_appendstream(&so->so_rcv, q->tqe_m); 578 lwkt_reltoken(&so->so_rcv.ssb_token); 579 } 580 kfree(q, M_TSEGQ); 581 atomic_add_int(&tcp_reass_qsize, -1); 582 ND6_HINT(tp); 583 sorwakeup(so); 584 return (flags); 585 } 586 587 /* 588 * TCP input routine, follows pages 65-76 of the 589 * protocol specification dated September, 1981 very closely. 590 */ 591 #ifdef INET6 592 int 593 tcp6_input(struct mbuf **mp, int *offp, int proto) 594 { 595 struct mbuf *m = *mp; 596 struct in6_ifaddr *ia6; 597 598 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 599 600 /* 601 * draft-itojun-ipv6-tcp-to-anycast 602 * better place to put this in? 603 */ 604 ia6 = ip6_getdstifaddr(m); 605 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 606 struct ip6_hdr *ip6; 607 608 ip6 = mtod(m, struct ip6_hdr *); 609 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 610 offsetof(struct ip6_hdr, ip6_dst)); 611 return (IPPROTO_DONE); 612 } 613 614 tcp_input(mp, offp, proto); 615 return (IPPROTO_DONE); 616 } 617 #endif 618 619 int 620 tcp_input(struct mbuf **mp, int *offp, int proto) 621 { 622 int off0; 623 struct tcphdr *th; 624 struct ip *ip = NULL; 625 struct ipovly *ipov; 626 struct inpcb *inp = NULL; 627 u_char *optp = NULL; 628 int optlen = 0; 629 int tlen, off; 630 int len = 0; 631 int drop_hdrlen; 632 struct tcpcb *tp = NULL; 633 int thflags; 634 struct socket *so = NULL; 635 int todrop, acked; 636 boolean_t ourfinisacked, needoutput = FALSE, delayed_dupack = FALSE; 637 tcp_seq th_dupack = 0; /* XXX gcc warning */ 638 u_long tiwin; 639 int recvwin; 640 struct tcpopt to; /* options in this segment */ 641 struct sockaddr_in *next_hop = NULL; 642 int rstreason; /* For badport_bandlim accounting purposes */ 643 int cpu; 644 struct ip6_hdr *ip6 = NULL; 645 struct mbuf *m; 646 #ifdef INET6 647 boolean_t isipv6; 648 #else 649 const boolean_t isipv6 = FALSE; 650 #endif 651 #ifdef TCPDEBUG 652 short ostate = 0; 653 #endif 654 655 off0 = *offp; 656 m = *mp; 657 *mp = NULL; 658 659 tcpstat.tcps_rcvtotal++; 660 661 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 662 struct m_tag *mtag; 663 664 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 665 KKASSERT(mtag != NULL); 666 next_hop = m_tag_data(mtag); 667 } 668 669 #ifdef INET6 670 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE; 671 #endif 672 673 if (isipv6) { 674 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 675 ip6 = mtod(m, struct ip6_hdr *); 676 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0; 677 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 678 tcpstat.tcps_rcvbadsum++; 679 goto drop; 680 } 681 th = (struct tcphdr *)((caddr_t)ip6 + off0); 682 683 /* 684 * Be proactive about unspecified IPv6 address in source. 685 * As we use all-zero to indicate unbounded/unconnected pcb, 686 * unspecified IPv6 address can be used to confuse us. 687 * 688 * Note that packets with unspecified IPv6 destination is 689 * already dropped in ip6_input. 690 */ 691 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 692 /* XXX stat */ 693 goto drop; 694 } 695 } else { 696 /* 697 * Get IP and TCP header together in first mbuf. 698 * Note: IP leaves IP header in first mbuf. 699 */ 700 if (off0 > sizeof(struct ip)) { 701 ip_stripoptions(m); 702 off0 = sizeof(struct ip); 703 } 704 /* already checked and pulled up in ip_demux() */ 705 KASSERT(m->m_len >= sizeof(struct tcpiphdr), 706 ("TCP header not in one mbuf: m->m_len %d", m->m_len)); 707 ip = mtod(m, struct ip *); 708 ipov = (struct ipovly *)ip; 709 th = (struct tcphdr *)((caddr_t)ip + off0); 710 tlen = ip->ip_len; 711 712 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 713 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 714 th->th_sum = m->m_pkthdr.csum_data; 715 else 716 th->th_sum = in_pseudo(ip->ip_src.s_addr, 717 ip->ip_dst.s_addr, 718 htonl(m->m_pkthdr.csum_data + 719 ip->ip_len + 720 IPPROTO_TCP)); 721 th->th_sum ^= 0xffff; 722 } else { 723 /* 724 * Checksum extended TCP header and data. 725 */ 726 len = sizeof(struct ip) + tlen; 727 bzero(ipov->ih_x1, sizeof ipov->ih_x1); 728 ipov->ih_len = (u_short)tlen; 729 ipov->ih_len = htons(ipov->ih_len); 730 th->th_sum = in_cksum(m, len); 731 } 732 if (th->th_sum) { 733 tcpstat.tcps_rcvbadsum++; 734 goto drop; 735 } 736 #ifdef INET6 737 /* Re-initialization for later version check */ 738 ip->ip_v = IPVERSION; 739 #endif 740 } 741 742 /* 743 * Check that TCP offset makes sense, 744 * pull out TCP options and adjust length. XXX 745 */ 746 off = th->th_off << 2; 747 /* already checked and pulled up in ip_demux() */ 748 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen, 749 ("bad TCP data offset %d (tlen %d)", off, tlen)); 750 tlen -= off; /* tlen is used instead of ti->ti_len */ 751 if (off > sizeof(struct tcphdr)) { 752 if (isipv6) { 753 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); 754 ip6 = mtod(m, struct ip6_hdr *); 755 th = (struct tcphdr *)((caddr_t)ip6 + off0); 756 } else { 757 /* already pulled up in ip_demux() */ 758 KASSERT(m->m_len >= sizeof(struct ip) + off, 759 ("TCP header and options not in one mbuf: " 760 "m_len %d, off %d", m->m_len, off)); 761 } 762 optlen = off - sizeof(struct tcphdr); 763 optp = (u_char *)(th + 1); 764 } 765 thflags = th->th_flags; 766 767 #ifdef TCP_DROP_SYNFIN 768 /* 769 * If the drop_synfin option is enabled, drop all packets with 770 * both the SYN and FIN bits set. This prevents e.g. nmap from 771 * identifying the TCP/IP stack. 772 * 773 * This is a violation of the TCP specification. 774 */ 775 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) 776 goto drop; 777 #endif 778 779 /* 780 * Convert TCP protocol specific fields to host format. 781 */ 782 th->th_seq = ntohl(th->th_seq); 783 th->th_ack = ntohl(th->th_ack); 784 th->th_win = ntohs(th->th_win); 785 th->th_urp = ntohs(th->th_urp); 786 787 /* 788 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options, 789 * until after ip6_savecontrol() is called and before other functions 790 * which don't want those proto headers. 791 * Because ip6_savecontrol() is going to parse the mbuf to 792 * search for data to be passed up to user-land, it wants mbuf 793 * parameters to be unchanged. 794 * XXX: the call of ip6_savecontrol() has been obsoleted based on 795 * latest version of the advanced API (20020110). 796 */ 797 drop_hdrlen = off0 + off; 798 799 /* 800 * Locate pcb for segment. 801 */ 802 findpcb: 803 /* IPFIREWALL_FORWARD section */ 804 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */ 805 /* 806 * Transparently forwarded. Pretend to be the destination. 807 * already got one like this? 808 */ 809 cpu = mycpu->gd_cpuid; 810 inp = in_pcblookup_hash(&tcbinfo[cpu], 811 ip->ip_src, th->th_sport, 812 ip->ip_dst, th->th_dport, 813 0, m->m_pkthdr.rcvif); 814 if (!inp) { 815 /* 816 * It's new. Try to find the ambushing socket. 817 */ 818 819 /* 820 * The rest of the ipfw code stores the port in 821 * host order. XXX 822 * (The IP address is still in network order.) 823 */ 824 in_port_t dport = next_hop->sin_port ? 825 htons(next_hop->sin_port) : 826 th->th_dport; 827 828 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport, 829 next_hop->sin_addr.s_addr, dport); 830 inp = in_pcblookup_hash(&tcbinfo[cpu], 831 ip->ip_src, th->th_sport, 832 next_hop->sin_addr, dport, 833 1, m->m_pkthdr.rcvif); 834 } 835 } else { 836 if (isipv6) { 837 inp = in6_pcblookup_hash(&tcbinfo[0], 838 &ip6->ip6_src, th->th_sport, 839 &ip6->ip6_dst, th->th_dport, 840 1, m->m_pkthdr.rcvif); 841 } else { 842 cpu = mycpu->gd_cpuid; 843 inp = in_pcblookup_hash(&tcbinfo[cpu], 844 ip->ip_src, th->th_sport, 845 ip->ip_dst, th->th_dport, 846 1, m->m_pkthdr.rcvif); 847 } 848 } 849 850 /* 851 * If the state is CLOSED (i.e., TCB does not exist) then 852 * all data in the incoming segment is discarded. 853 * If the TCB exists but is in CLOSED state, it is embryonic, 854 * but should either do a listen or a connect soon. 855 */ 856 if (inp == NULL) { 857 if (log_in_vain) { 858 #ifdef INET6 859 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 860 #else 861 char dbuf[sizeof "aaa.bbb.ccc.ddd"]; 862 char sbuf[sizeof "aaa.bbb.ccc.ddd"]; 863 #endif 864 if (isipv6) { 865 strcpy(dbuf, "["); 866 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst)); 867 strcat(dbuf, "]"); 868 strcpy(sbuf, "["); 869 strcat(sbuf, ip6_sprintf(&ip6->ip6_src)); 870 strcat(sbuf, "]"); 871 } else { 872 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 873 strcpy(sbuf, inet_ntoa(ip->ip_src)); 874 } 875 switch (log_in_vain) { 876 case 1: 877 if (!(thflags & TH_SYN)) 878 break; 879 case 2: 880 log(LOG_INFO, 881 "Connection attempt to TCP %s:%d " 882 "from %s:%d flags:0x%02x\n", 883 dbuf, ntohs(th->th_dport), sbuf, 884 ntohs(th->th_sport), thflags); 885 break; 886 default: 887 break; 888 } 889 } 890 if (blackhole) { 891 switch (blackhole) { 892 case 1: 893 if (thflags & TH_SYN) 894 goto drop; 895 break; 896 case 2: 897 goto drop; 898 default: 899 goto drop; 900 } 901 } 902 rstreason = BANDLIM_RST_CLOSEDPORT; 903 goto dropwithreset; 904 } 905 906 #ifdef IPSEC 907 if (isipv6) { 908 if (ipsec6_in_reject_so(m, inp->inp_socket)) { 909 ipsec6stat.in_polvio++; 910 goto drop; 911 } 912 } else { 913 if (ipsec4_in_reject_so(m, inp->inp_socket)) { 914 ipsecstat.in_polvio++; 915 goto drop; 916 } 917 } 918 #endif 919 #ifdef FAST_IPSEC 920 if (isipv6) { 921 if (ipsec6_in_reject(m, inp)) 922 goto drop; 923 } else { 924 if (ipsec4_in_reject(m, inp)) 925 goto drop; 926 } 927 #endif 928 /* Check the minimum TTL for socket. */ 929 #ifdef INET6 930 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl) 931 goto drop; 932 #endif 933 934 tp = intotcpcb(inp); 935 if (tp == NULL) { 936 rstreason = BANDLIM_RST_CLOSEDPORT; 937 goto dropwithreset; 938 } 939 if (tp->t_state <= TCPS_CLOSED) 940 goto drop; 941 942 so = inp->inp_socket; 943 944 #ifdef TCPDEBUG 945 if (so->so_options & SO_DEBUG) { 946 ostate = tp->t_state; 947 if (isipv6) 948 bcopy(ip6, tcp_saveipgen, sizeof(*ip6)); 949 else 950 bcopy(ip, tcp_saveipgen, sizeof(*ip)); 951 tcp_savetcp = *th; 952 } 953 #endif 954 955 bzero(&to, sizeof to); 956 957 if (so->so_options & SO_ACCEPTCONN) { 958 struct in_conninfo inc; 959 960 #ifdef INET6 961 inc.inc_isipv6 = (isipv6 == TRUE); 962 #endif 963 if (isipv6) { 964 inc.inc6_faddr = ip6->ip6_src; 965 inc.inc6_laddr = ip6->ip6_dst; 966 inc.inc6_route.ro_rt = NULL; /* XXX */ 967 } else { 968 inc.inc_faddr = ip->ip_src; 969 inc.inc_laddr = ip->ip_dst; 970 inc.inc_route.ro_rt = NULL; /* XXX */ 971 } 972 inc.inc_fport = th->th_sport; 973 inc.inc_lport = th->th_dport; 974 975 /* 976 * If the state is LISTEN then ignore segment if it contains 977 * a RST. If the segment contains an ACK then it is bad and 978 * send a RST. If it does not contain a SYN then it is not 979 * interesting; drop it. 980 * 981 * If the state is SYN_RECEIVED (syncache) and seg contains 982 * an ACK, but not for our SYN/ACK, send a RST. If the seg 983 * contains a RST, check the sequence number to see if it 984 * is a valid reset segment. 985 */ 986 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { 987 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) { 988 if (!syncache_expand(&inc, th, &so, m)) { 989 /* 990 * No syncache entry, or ACK was not 991 * for our SYN/ACK. Send a RST. 992 */ 993 tcpstat.tcps_badsyn++; 994 rstreason = BANDLIM_RST_OPENPORT; 995 goto dropwithreset; 996 } 997 998 /* 999 * Could not complete 3-way handshake, 1000 * connection is being closed down, and 1001 * syncache will free mbuf. 1002 */ 1003 if (so == NULL) 1004 return(IPPROTO_DONE); 1005 1006 /* 1007 * We must be in the correct protocol thread 1008 * for this connection. 1009 */ 1010 KKASSERT(so->so_port == &curthread->td_msgport); 1011 1012 /* 1013 * Socket is created in state SYN_RECEIVED. 1014 * Continue processing segment. 1015 */ 1016 inp = so->so_pcb; 1017 tp = intotcpcb(inp); 1018 /* 1019 * This is what would have happened in 1020 * tcp_output() when the SYN,ACK was sent. 1021 */ 1022 tp->snd_up = tp->snd_una; 1023 tp->snd_max = tp->snd_nxt = tp->iss + 1; 1024 tp->last_ack_sent = tp->rcv_nxt; 1025 1026 goto after_listen; 1027 } 1028 if (thflags & TH_RST) { 1029 syncache_chkrst(&inc, th); 1030 goto drop; 1031 } 1032 if (thflags & TH_ACK) { 1033 syncache_badack(&inc); 1034 tcpstat.tcps_badsyn++; 1035 rstreason = BANDLIM_RST_OPENPORT; 1036 goto dropwithreset; 1037 } 1038 goto drop; 1039 } 1040 1041 /* 1042 * Segment's flags are (SYN) or (SYN | FIN). 1043 */ 1044 #ifdef INET6 1045 /* 1046 * If deprecated address is forbidden, 1047 * we do not accept SYN to deprecated interface 1048 * address to prevent any new inbound connection from 1049 * getting established. 1050 * When we do not accept SYN, we send a TCP RST, 1051 * with deprecated source address (instead of dropping 1052 * it). We compromise it as it is much better for peer 1053 * to send a RST, and RST will be the final packet 1054 * for the exchange. 1055 * 1056 * If we do not forbid deprecated addresses, we accept 1057 * the SYN packet. RFC2462 does not suggest dropping 1058 * SYN in this case. 1059 * If we decipher RFC2462 5.5.4, it says like this: 1060 * 1. use of deprecated addr with existing 1061 * communication is okay - "SHOULD continue to be 1062 * used" 1063 * 2. use of it with new communication: 1064 * (2a) "SHOULD NOT be used if alternate address 1065 * with sufficient scope is available" 1066 * (2b) nothing mentioned otherwise. 1067 * Here we fall into (2b) case as we have no choice in 1068 * our source address selection - we must obey the peer. 1069 * 1070 * The wording in RFC2462 is confusing, and there are 1071 * multiple description text for deprecated address 1072 * handling - worse, they are not exactly the same. 1073 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1074 */ 1075 if (isipv6 && !ip6_use_deprecated) { 1076 struct in6_ifaddr *ia6; 1077 1078 if ((ia6 = ip6_getdstifaddr(m)) && 1079 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1080 tp = NULL; 1081 rstreason = BANDLIM_RST_OPENPORT; 1082 goto dropwithreset; 1083 } 1084 } 1085 #endif 1086 /* 1087 * If it is from this socket, drop it, it must be forged. 1088 * Don't bother responding if the destination was a broadcast. 1089 */ 1090 if (th->th_dport == th->th_sport) { 1091 if (isipv6) { 1092 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, 1093 &ip6->ip6_src)) 1094 goto drop; 1095 } else { 1096 if (ip->ip_dst.s_addr == ip->ip_src.s_addr) 1097 goto drop; 1098 } 1099 } 1100 /* 1101 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN 1102 * 1103 * Note that it is quite possible to receive unicast 1104 * link-layer packets with a broadcast IP address. Use 1105 * in_broadcast() to find them. 1106 */ 1107 if (m->m_flags & (M_BCAST | M_MCAST)) 1108 goto drop; 1109 if (isipv6) { 1110 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1111 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 1112 goto drop; 1113 } else { 1114 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1115 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1116 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1117 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 1118 goto drop; 1119 } 1120 /* 1121 * SYN appears to be valid; create compressed TCP state 1122 * for syncache, or perform t/tcp connection. 1123 */ 1124 if (so->so_qlen <= so->so_qlimit) { 1125 tcp_dooptions(&to, optp, optlen, TRUE, th->th_ack); 1126 if (!syncache_add(&inc, &to, th, so, m)) 1127 goto drop; 1128 1129 /* 1130 * Entry added to syncache, mbuf used to 1131 * send SYN,ACK packet. 1132 */ 1133 return(IPPROTO_DONE); 1134 } 1135 goto drop; 1136 } 1137 1138 after_listen: 1139 /* 1140 * Should not happen - syncache should pick up these connections. 1141 * 1142 * Once we are past handling listen sockets we must be in the 1143 * correct protocol processing thread. 1144 */ 1145 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state")); 1146 KKASSERT(so->so_port == &curthread->td_msgport); 1147 1148 /* Unscale the window into a 32-bit value. */ 1149 if (!(thflags & TH_SYN)) 1150 tiwin = th->th_win << tp->snd_scale; 1151 else 1152 tiwin = th->th_win; 1153 1154 /* 1155 * This is the second part of the MSS DoS prevention code (after 1156 * minmss on the sending side) and it deals with too many too small 1157 * tcp packets in a too short timeframe (1 second). 1158 * 1159 * XXX Removed. This code was crap. It does not scale to network 1160 * speed, and default values break NFS. Gone. 1161 */ 1162 /* REMOVED */ 1163 1164 /* 1165 * Segment received on connection. 1166 * 1167 * Reset idle time and keep-alive timer. Don't waste time if less 1168 * then a second has elapsed. 1169 */ 1170 if ((int)(ticks - tp->t_rcvtime) > hz) 1171 tcp_timer_keep_activity(tp, thflags); 1172 1173 /* 1174 * Process options. 1175 * XXX this is tradtitional behavior, may need to be cleaned up. 1176 */ 1177 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0, th->th_ack); 1178 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1179 if ((to.to_flags & TOF_SCALE) && (tp->t_flags & TF_REQ_SCALE)) { 1180 tp->t_flags |= TF_RCVD_SCALE; 1181 tp->snd_scale = to.to_requested_s_scale; 1182 } 1183 1184 /* 1185 * Initial send window; will be updated upon next ACK 1186 */ 1187 tp->snd_wnd = th->th_win; 1188 1189 if (to.to_flags & TOF_TS) { 1190 tp->t_flags |= TF_RCVD_TSTMP; 1191 tp->ts_recent = to.to_tsval; 1192 tp->ts_recent_age = ticks; 1193 } 1194 if (!(to.to_flags & TOF_MSS)) 1195 to.to_mss = 0; 1196 tcp_mss(tp, to.to_mss); 1197 /* 1198 * Only set the TF_SACK_PERMITTED per-connection flag 1199 * if we got a SACK_PERMITTED option from the other side 1200 * and the global tcp_do_sack variable is true. 1201 */ 1202 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED)) 1203 tp->t_flags |= TF_SACK_PERMITTED; 1204 } 1205 1206 /* 1207 * Header prediction: check for the two common cases 1208 * of a uni-directional data xfer. If the packet has 1209 * no control flags, is in-sequence, the window didn't 1210 * change and we're not retransmitting, it's a 1211 * candidate. If the length is zero and the ack moved 1212 * forward, we're the sender side of the xfer. Just 1213 * free the data acked & wake any higher level process 1214 * that was blocked waiting for space. If the length 1215 * is non-zero and the ack didn't move, we're the 1216 * receiver side. If we're getting packets in-order 1217 * (the reassembly queue is empty), add the data to 1218 * the socket buffer and note that we need a delayed ack. 1219 * Make sure that the hidden state-flags are also off. 1220 * Since we check for TCPS_ESTABLISHED above, it can only 1221 * be TH_NEEDSYN. 1222 */ 1223 if (tp->t_state == TCPS_ESTABLISHED && 1224 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1225 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && 1226 (!(to.to_flags & TOF_TS) || 1227 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1228 th->th_seq == tp->rcv_nxt && 1229 tp->snd_nxt == tp->snd_max) { 1230 1231 /* 1232 * If last ACK falls within this segment's sequence numbers, 1233 * record the timestamp. 1234 * NOTE that the test is modified according to the latest 1235 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1236 */ 1237 if ((to.to_flags & TOF_TS) && 1238 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1239 tp->ts_recent_age = ticks; 1240 tp->ts_recent = to.to_tsval; 1241 } 1242 1243 if (tlen == 0) { 1244 if (SEQ_GT(th->th_ack, tp->snd_una) && 1245 SEQ_LEQ(th->th_ack, tp->snd_max) && 1246 tp->snd_cwnd >= tp->snd_wnd && 1247 !IN_FASTRECOVERY(tp)) { 1248 /* 1249 * This is a pure ack for outstanding data. 1250 */ 1251 ++tcpstat.tcps_predack; 1252 /* 1253 * "bad retransmit" recovery 1254 * 1255 * If Eifel detection applies, then 1256 * it is deterministic, so use it 1257 * unconditionally over the old heuristic. 1258 * Otherwise, fall back to the old heuristic. 1259 */ 1260 if (tcp_do_eifel_detect && 1261 (to.to_flags & TOF_TS) && to.to_tsecr && 1262 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) { 1263 /* Eifel detection applicable. */ 1264 if (to.to_tsecr < tp->t_rexmtTS) { 1265 tcp_revert_congestion_state(tp); 1266 ++tcpstat.tcps_eifeldetected; 1267 if (tp->t_rxtshift != 1 || 1268 ticks >= tp->t_badrxtwin) 1269 ++tcpstat.tcps_rttcantdetect; 1270 } 1271 } else if (tp->t_rxtshift == 1 && 1272 ticks < tp->t_badrxtwin) { 1273 tcp_revert_congestion_state(tp); 1274 ++tcpstat.tcps_rttdetected; 1275 } 1276 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK | 1277 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT); 1278 /* 1279 * Recalculate the retransmit timer / rtt. 1280 * 1281 * Some machines (certain windows boxes) 1282 * send broken timestamp replies during the 1283 * SYN+ACK phase, ignore timestamps of 0. 1284 */ 1285 if ((to.to_flags & TOF_TS) && to.to_tsecr) { 1286 tcp_xmit_timer(tp, 1287 ticks - to.to_tsecr + 1, 1288 th->th_ack); 1289 } else if (tp->t_rtttime && 1290 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1291 tcp_xmit_timer(tp, 1292 ticks - tp->t_rtttime, 1293 th->th_ack); 1294 } 1295 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1296 acked = th->th_ack - tp->snd_una; 1297 tcpstat.tcps_rcvackpack++; 1298 tcpstat.tcps_rcvackbyte += acked; 1299 sbdrop(&so->so_snd.sb, acked); 1300 tp->snd_recover = th->th_ack - 1; 1301 tp->snd_una = th->th_ack; 1302 tp->t_dupacks = 0; 1303 /* 1304 * Update window information. 1305 */ 1306 if (tiwin != tp->snd_wnd && 1307 acceptable_window_update(tp, th, tiwin)) { 1308 /* keep track of pure window updates */ 1309 if (tp->snd_wl2 == th->th_ack && 1310 tiwin > tp->snd_wnd) 1311 tcpstat.tcps_rcvwinupd++; 1312 tp->snd_wnd = tiwin; 1313 tp->snd_wl1 = th->th_seq; 1314 tp->snd_wl2 = th->th_ack; 1315 if (tp->snd_wnd > tp->max_sndwnd) 1316 tp->max_sndwnd = tp->snd_wnd; 1317 } 1318 m_freem(m); 1319 ND6_HINT(tp); /* some progress has been done */ 1320 /* 1321 * If all outstanding data are acked, stop 1322 * retransmit timer, otherwise restart timer 1323 * using current (possibly backed-off) value. 1324 * If process is waiting for space, 1325 * wakeup/selwakeup/signal. If data 1326 * are ready to send, let tcp_output 1327 * decide between more output or persist. 1328 */ 1329 if (tp->snd_una == tp->snd_max) { 1330 tcp_callout_stop(tp, tp->tt_rexmt); 1331 } else if (!tcp_callout_active(tp, 1332 tp->tt_persist)) { 1333 tcp_callout_reset(tp, tp->tt_rexmt, 1334 tp->t_rxtcur, tcp_timer_rexmt); 1335 } 1336 sowwakeup(so); 1337 if (so->so_snd.ssb_cc > 0) 1338 tcp_output(tp); 1339 return(IPPROTO_DONE); 1340 } 1341 } else if (tiwin == tp->snd_wnd && 1342 th->th_ack == tp->snd_una && 1343 TAILQ_EMPTY(&tp->t_segq) && 1344 tlen <= ssb_space(&so->so_rcv)) { 1345 u_long newsize = 0; /* automatic sockbuf scaling */ 1346 /* 1347 * This is a pure, in-sequence data packet 1348 * with nothing on the reassembly queue and 1349 * we have enough buffer space to take it. 1350 */ 1351 ++tcpstat.tcps_preddat; 1352 tp->rcv_nxt += tlen; 1353 tcpstat.tcps_rcvpack++; 1354 tcpstat.tcps_rcvbyte += tlen; 1355 ND6_HINT(tp); /* some progress has been done */ 1356 /* 1357 * Automatic sizing of receive socket buffer. Often the send 1358 * buffer size is not optimally adjusted to the actual network 1359 * conditions at hand (delay bandwidth product). Setting the 1360 * buffer size too small limits throughput on links with high 1361 * bandwidth and high delay (eg. trans-continental/oceanic links). 1362 * 1363 * On the receive side the socket buffer memory is only rarely 1364 * used to any significant extent. This allows us to be much 1365 * more aggressive in scaling the receive socket buffer. For 1366 * the case that the buffer space is actually used to a large 1367 * extent and we run out of kernel memory we can simply drop 1368 * the new segments; TCP on the sender will just retransmit it 1369 * later. Setting the buffer size too big may only consume too 1370 * much kernel memory if the application doesn't read() from 1371 * the socket or packet loss or reordering makes use of the 1372 * reassembly queue. 1373 * 1374 * The criteria to step up the receive buffer one notch are: 1375 * 1. the number of bytes received during the time it takes 1376 * one timestamp to be reflected back to us (the RTT); 1377 * 2. received bytes per RTT is within seven eighth of the 1378 * current socket buffer size; 1379 * 3. receive buffer size has not hit maximal automatic size; 1380 * 1381 * This algorithm does one step per RTT at most and only if 1382 * we receive a bulk stream w/o packet losses or reorderings. 1383 * Shrinking the buffer during idle times is not necessary as 1384 * it doesn't consume any memory when idle. 1385 * 1386 * TODO: Only step up if the application is actually serving 1387 * the buffer to better manage the socket buffer resources. 1388 */ 1389 if (tcp_do_autorcvbuf && 1390 to.to_tsecr && 1391 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) { 1392 if (to.to_tsecr > tp->rfbuf_ts && 1393 to.to_tsecr - tp->rfbuf_ts < hz) { 1394 if (tp->rfbuf_cnt > 1395 (so->so_rcv.ssb_hiwat / 8 * 7) && 1396 so->so_rcv.ssb_hiwat < 1397 tcp_autorcvbuf_max) { 1398 newsize = 1399 ulmin(so->so_rcv.ssb_hiwat + 1400 tcp_autorcvbuf_inc, 1401 tcp_autorcvbuf_max); 1402 } 1403 /* Start over with next RTT. */ 1404 tp->rfbuf_ts = 0; 1405 tp->rfbuf_cnt = 0; 1406 } else 1407 tp->rfbuf_cnt += tlen; /* add up */ 1408 } 1409 /* 1410 * Add data to socket buffer. 1411 */ 1412 if (so->so_state & SS_CANTRCVMORE) { 1413 m_freem(m); 1414 } else { 1415 /* 1416 * Set new socket buffer size, give up when 1417 * limit is reached. 1418 * 1419 * Adjusting the size can mess up ACK 1420 * sequencing when pure window updates are 1421 * being avoided (which is the default), 1422 * so force an ack. 1423 */ 1424 lwkt_gettoken(&so->so_rcv.ssb_token); 1425 if (newsize) { 1426 tp->t_flags |= TF_RXRESIZED; 1427 if (!ssb_reserve(&so->so_rcv, newsize, 1428 so, NULL)) { 1429 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 1430 } 1431 if (newsize >= 1432 (TCP_MAXWIN << tp->rcv_scale)) { 1433 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 1434 } 1435 } 1436 m_adj(m, drop_hdrlen); /* delayed header drop */ 1437 ssb_appendstream(&so->so_rcv, m); 1438 lwkt_reltoken(&so->so_rcv.ssb_token); 1439 } 1440 sorwakeup(so); 1441 /* 1442 * This code is responsible for most of the ACKs 1443 * the TCP stack sends back after receiving a data 1444 * packet. Note that the DELAY_ACK check fails if 1445 * the delack timer is already running, which results 1446 * in an ack being sent every other packet (which is 1447 * what we want). 1448 * 1449 * We then further aggregate acks by not actually 1450 * sending one until the protocol thread has completed 1451 * processing the current backlog of packets. This 1452 * does not delay the ack any further, but allows us 1453 * to take advantage of the packet aggregation that 1454 * high speed NICs do (usually blocks of 8-10 packets) 1455 * to send a single ack rather then four or five acks, 1456 * greatly reducing the ack rate, the return channel 1457 * bandwidth, and the protocol overhead on both ends. 1458 * 1459 * Since this also has the effect of slowing down 1460 * the exponential slow-start ramp-up, systems with 1461 * very large bandwidth-delay products might want 1462 * to turn the feature off. 1463 */ 1464 if (DELAY_ACK(tp)) { 1465 tcp_callout_reset(tp, tp->tt_delack, 1466 tcp_delacktime, tcp_timer_delack); 1467 } else if (tcp_aggregate_acks) { 1468 tp->t_flags |= TF_ACKNOW; 1469 if (!(tp->t_flags & TF_ONOUTPUTQ)) { 1470 tp->t_flags |= TF_ONOUTPUTQ; 1471 tp->tt_cpu = mycpu->gd_cpuid; 1472 TAILQ_INSERT_TAIL( 1473 &tcpcbackq[tp->tt_cpu], 1474 tp, t_outputq); 1475 } 1476 } else { 1477 tp->t_flags |= TF_ACKNOW; 1478 tcp_output(tp); 1479 } 1480 return(IPPROTO_DONE); 1481 } 1482 } 1483 1484 /* 1485 * Calculate amount of space in receive window, 1486 * and then do TCP input processing. 1487 * Receive window is amount of space in rcv queue, 1488 * but not less than advertised window. 1489 */ 1490 recvwin = ssb_space(&so->so_rcv); 1491 if (recvwin < 0) 1492 recvwin = 0; 1493 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt)); 1494 1495 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1496 tp->rfbuf_ts = 0; 1497 tp->rfbuf_cnt = 0; 1498 1499 switch (tp->t_state) { 1500 /* 1501 * If the state is SYN_RECEIVED: 1502 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1503 */ 1504 case TCPS_SYN_RECEIVED: 1505 if ((thflags & TH_ACK) && 1506 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1507 SEQ_GT(th->th_ack, tp->snd_max))) { 1508 rstreason = BANDLIM_RST_OPENPORT; 1509 goto dropwithreset; 1510 } 1511 break; 1512 1513 /* 1514 * If the state is SYN_SENT: 1515 * if seg contains an ACK, but not for our SYN, drop the input. 1516 * if seg contains a RST, then drop the connection. 1517 * if seg does not contain SYN, then drop it. 1518 * Otherwise this is an acceptable SYN segment 1519 * initialize tp->rcv_nxt and tp->irs 1520 * if seg contains ack then advance tp->snd_una 1521 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1522 * arrange for segment to be acked (eventually) 1523 * continue processing rest of data/controls, beginning with URG 1524 */ 1525 case TCPS_SYN_SENT: 1526 if ((thflags & TH_ACK) && 1527 (SEQ_LEQ(th->th_ack, tp->iss) || 1528 SEQ_GT(th->th_ack, tp->snd_max))) { 1529 rstreason = BANDLIM_UNLIMITED; 1530 goto dropwithreset; 1531 } 1532 if (thflags & TH_RST) { 1533 if (thflags & TH_ACK) 1534 tp = tcp_drop(tp, ECONNREFUSED); 1535 goto drop; 1536 } 1537 if (!(thflags & TH_SYN)) 1538 goto drop; 1539 1540 tp->irs = th->th_seq; 1541 tcp_rcvseqinit(tp); 1542 if (thflags & TH_ACK) { 1543 /* Our SYN was acked. */ 1544 tcpstat.tcps_connects++; 1545 soisconnected(so); 1546 /* Do window scaling on this connection? */ 1547 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1548 (TF_RCVD_SCALE | TF_REQ_SCALE)) 1549 tp->rcv_scale = tp->request_r_scale; 1550 tp->rcv_adv += tp->rcv_wnd; 1551 tp->snd_una++; /* SYN is acked */ 1552 tcp_callout_stop(tp, tp->tt_rexmt); 1553 /* 1554 * If there's data, delay ACK; if there's also a FIN 1555 * ACKNOW will be turned on later. 1556 */ 1557 if (DELAY_ACK(tp) && tlen != 0) { 1558 tcp_callout_reset(tp, tp->tt_delack, 1559 tcp_delacktime, tcp_timer_delack); 1560 } else { 1561 tp->t_flags |= TF_ACKNOW; 1562 } 1563 /* 1564 * Received <SYN,ACK> in SYN_SENT[*] state. 1565 * Transitions: 1566 * SYN_SENT --> ESTABLISHED 1567 * SYN_SENT* --> FIN_WAIT_1 1568 */ 1569 tp->t_starttime = ticks; 1570 if (tp->t_flags & TF_NEEDFIN) { 1571 tp->t_state = TCPS_FIN_WAIT_1; 1572 tp->t_flags &= ~TF_NEEDFIN; 1573 thflags &= ~TH_SYN; 1574 } else { 1575 tcp_established(tp); 1576 } 1577 } else { 1578 /* 1579 * Received initial SYN in SYN-SENT[*] state => 1580 * simultaneous open. 1581 * Do 3-way handshake: 1582 * SYN-SENT -> SYN-RECEIVED 1583 * SYN-SENT* -> SYN-RECEIVED* 1584 */ 1585 tp->t_flags |= TF_ACKNOW; 1586 tcp_callout_stop(tp, tp->tt_rexmt); 1587 tp->t_state = TCPS_SYN_RECEIVED; 1588 } 1589 1590 /* 1591 * Advance th->th_seq to correspond to first data byte. 1592 * If data, trim to stay within window, 1593 * dropping FIN if necessary. 1594 */ 1595 th->th_seq++; 1596 if (tlen > tp->rcv_wnd) { 1597 todrop = tlen - tp->rcv_wnd; 1598 m_adj(m, -todrop); 1599 tlen = tp->rcv_wnd; 1600 thflags &= ~TH_FIN; 1601 tcpstat.tcps_rcvpackafterwin++; 1602 tcpstat.tcps_rcvbyteafterwin += todrop; 1603 } 1604 tp->snd_wl1 = th->th_seq - 1; 1605 tp->rcv_up = th->th_seq; 1606 /* 1607 * Client side of transaction: already sent SYN and data. 1608 * If the remote host used T/TCP to validate the SYN, 1609 * our data will be ACK'd; if so, enter normal data segment 1610 * processing in the middle of step 5, ack processing. 1611 * Otherwise, goto step 6. 1612 */ 1613 if (thflags & TH_ACK) 1614 goto process_ACK; 1615 1616 goto step6; 1617 1618 /* 1619 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1620 * do normal processing (we no longer bother with T/TCP). 1621 */ 1622 case TCPS_LAST_ACK: 1623 case TCPS_CLOSING: 1624 case TCPS_TIME_WAIT: 1625 break; /* continue normal processing */ 1626 } 1627 1628 /* 1629 * States other than LISTEN or SYN_SENT. 1630 * First check the RST flag and sequence number since reset segments 1631 * are exempt from the timestamp and connection count tests. This 1632 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1633 * below which allowed reset segments in half the sequence space 1634 * to fall though and be processed (which gives forged reset 1635 * segments with a random sequence number a 50 percent chance of 1636 * killing a connection). 1637 * Then check timestamp, if present. 1638 * Then check the connection count, if present. 1639 * Then check that at least some bytes of segment are within 1640 * receive window. If segment begins before rcv_nxt, 1641 * drop leading data (and SYN); if nothing left, just ack. 1642 * 1643 * 1644 * If the RST bit is set, check the sequence number to see 1645 * if this is a valid reset segment. 1646 * RFC 793 page 37: 1647 * In all states except SYN-SENT, all reset (RST) segments 1648 * are validated by checking their SEQ-fields. A reset is 1649 * valid if its sequence number is in the window. 1650 * Note: this does not take into account delayed ACKs, so 1651 * we should test against last_ack_sent instead of rcv_nxt. 1652 * The sequence number in the reset segment is normally an 1653 * echo of our outgoing acknowledgement numbers, but some hosts 1654 * send a reset with the sequence number at the rightmost edge 1655 * of our receive window, and we have to handle this case. 1656 * If we have multiple segments in flight, the intial reset 1657 * segment sequence numbers will be to the left of last_ack_sent, 1658 * but they will eventually catch up. 1659 * In any case, it never made sense to trim reset segments to 1660 * fit the receive window since RFC 1122 says: 1661 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1662 * 1663 * A TCP SHOULD allow a received RST segment to include data. 1664 * 1665 * DISCUSSION 1666 * It has been suggested that a RST segment could contain 1667 * ASCII text that encoded and explained the cause of the 1668 * RST. No standard has yet been established for such 1669 * data. 1670 * 1671 * If the reset segment passes the sequence number test examine 1672 * the state: 1673 * SYN_RECEIVED STATE: 1674 * If passive open, return to LISTEN state. 1675 * If active open, inform user that connection was refused. 1676 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1677 * Inform user that connection was reset, and close tcb. 1678 * CLOSING, LAST_ACK STATES: 1679 * Close the tcb. 1680 * TIME_WAIT STATE: 1681 * Drop the segment - see Stevens, vol. 2, p. 964 and 1682 * RFC 1337. 1683 */ 1684 if (thflags & TH_RST) { 1685 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 1686 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1687 switch (tp->t_state) { 1688 1689 case TCPS_SYN_RECEIVED: 1690 so->so_error = ECONNREFUSED; 1691 goto close; 1692 1693 case TCPS_ESTABLISHED: 1694 case TCPS_FIN_WAIT_1: 1695 case TCPS_FIN_WAIT_2: 1696 case TCPS_CLOSE_WAIT: 1697 so->so_error = ECONNRESET; 1698 close: 1699 tp->t_state = TCPS_CLOSED; 1700 tcpstat.tcps_drops++; 1701 tp = tcp_close(tp); 1702 break; 1703 1704 case TCPS_CLOSING: 1705 case TCPS_LAST_ACK: 1706 tp = tcp_close(tp); 1707 break; 1708 1709 case TCPS_TIME_WAIT: 1710 break; 1711 } 1712 } 1713 goto drop; 1714 } 1715 1716 /* 1717 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1718 * and it's less than ts_recent, drop it. 1719 */ 1720 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 && 1721 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1722 /* Check to see if ts_recent is over 24 days old. */ 1723 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1724 /* 1725 * Invalidate ts_recent. If this segment updates 1726 * ts_recent, the age will be reset later and ts_recent 1727 * will get a valid value. If it does not, setting 1728 * ts_recent to zero will at least satisfy the 1729 * requirement that zero be placed in the timestamp 1730 * echo reply when ts_recent isn't valid. The 1731 * age isn't reset until we get a valid ts_recent 1732 * because we don't want out-of-order segments to be 1733 * dropped when ts_recent is old. 1734 */ 1735 tp->ts_recent = 0; 1736 } else if (tcp_paws_tolerance && tlen != 0 && 1737 tp->t_state == TCPS_ESTABLISHED && 1738 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK&& 1739 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && 1740 th->th_ack == tp->snd_una && 1741 tiwin == tp->snd_wnd && 1742 TSTMP_GEQ(to.to_tsval + tcp_paws_tolerance, tp->ts_recent)&& 1743 (th->th_seq == tp->rcv_nxt || 1744 (SEQ_GT(th->th_seq, tp->rcv_nxt) && 1745 tcp_paws_canreasslast(tp, th, tlen)))) { 1746 /* 1747 * This tends to prevent valid new segments from being 1748 * dropped by the reordered segments sent by the fast 1749 * retransmission algorithm on the sending side, i.e. 1750 * the fast retransmitted segment w/ larger timestamp 1751 * arrives earlier than the previously sent new segments 1752 * w/ smaller timestamp. 1753 * 1754 * If following conditions are met, the segment is 1755 * accepted: 1756 * - The segment contains data 1757 * - The connection is established 1758 * - The header does not contain important flags 1759 * - SYN or FIN is not needed 1760 * - It does not acknowledge new data 1761 * - Receive window is not changed 1762 * - The timestamp is within "acceptable" range 1763 * - The new segment is what we are expecting or 1764 * the new segment could be merged w/ the last 1765 * pending segment on the reassemble queue 1766 */ 1767 tcpstat.tcps_pawsaccept++; 1768 tcpstat.tcps_pawsdrop++; 1769 } else { 1770 tcpstat.tcps_rcvduppack++; 1771 tcpstat.tcps_rcvdupbyte += tlen; 1772 tcpstat.tcps_pawsdrop++; 1773 if (tlen) 1774 goto dropafterack; 1775 goto drop; 1776 } 1777 } 1778 1779 /* 1780 * In the SYN-RECEIVED state, validate that the packet belongs to 1781 * this connection before trimming the data to fit the receive 1782 * window. Check the sequence number versus IRS since we know 1783 * the sequence numbers haven't wrapped. This is a partial fix 1784 * for the "LAND" DoS attack. 1785 */ 1786 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1787 rstreason = BANDLIM_RST_OPENPORT; 1788 goto dropwithreset; 1789 } 1790 1791 todrop = tp->rcv_nxt - th->th_seq; 1792 if (todrop > 0) { 1793 if (TCP_DO_SACK(tp)) { 1794 /* Report duplicate segment at head of packet. */ 1795 tp->reportblk.rblk_start = th->th_seq; 1796 tp->reportblk.rblk_end = TCP_SACK_BLKEND( 1797 th->th_seq + tlen, thflags); 1798 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt)) 1799 tp->reportblk.rblk_end = tp->rcv_nxt; 1800 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_SACKLEFT); 1801 tp->t_flags |= TF_ACKNOW; 1802 } 1803 if (thflags & TH_SYN) { 1804 thflags &= ~TH_SYN; 1805 th->th_seq++; 1806 if (th->th_urp > 1) 1807 th->th_urp--; 1808 else 1809 thflags &= ~TH_URG; 1810 todrop--; 1811 } 1812 /* 1813 * Following if statement from Stevens, vol. 2, p. 960. 1814 */ 1815 if (todrop > tlen || 1816 (todrop == tlen && !(thflags & TH_FIN))) { 1817 /* 1818 * Any valid FIN must be to the left of the window. 1819 * At this point the FIN must be a duplicate or out 1820 * of sequence; drop it. 1821 */ 1822 thflags &= ~TH_FIN; 1823 1824 /* 1825 * Send an ACK to resynchronize and drop any data. 1826 * But keep on processing for RST or ACK. 1827 */ 1828 tp->t_flags |= TF_ACKNOW; 1829 todrop = tlen; 1830 tcpstat.tcps_rcvduppack++; 1831 tcpstat.tcps_rcvdupbyte += todrop; 1832 } else { 1833 tcpstat.tcps_rcvpartduppack++; 1834 tcpstat.tcps_rcvpartdupbyte += todrop; 1835 } 1836 drop_hdrlen += todrop; /* drop from the top afterwards */ 1837 th->th_seq += todrop; 1838 tlen -= todrop; 1839 if (th->th_urp > todrop) 1840 th->th_urp -= todrop; 1841 else { 1842 thflags &= ~TH_URG; 1843 th->th_urp = 0; 1844 } 1845 } 1846 1847 /* 1848 * If new data are received on a connection after the 1849 * user processes are gone, then RST the other end. 1850 */ 1851 if ((so->so_state & SS_NOFDREF) && 1852 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1853 tp = tcp_close(tp); 1854 tcpstat.tcps_rcvafterclose++; 1855 rstreason = BANDLIM_UNLIMITED; 1856 goto dropwithreset; 1857 } 1858 1859 /* 1860 * If segment ends after window, drop trailing data 1861 * (and PUSH and FIN); if nothing left, just ACK. 1862 */ 1863 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1864 if (todrop > 0) { 1865 tcpstat.tcps_rcvpackafterwin++; 1866 if (todrop >= tlen) { 1867 tcpstat.tcps_rcvbyteafterwin += tlen; 1868 /* 1869 * If a new connection request is received 1870 * while in TIME_WAIT, drop the old connection 1871 * and start over if the sequence numbers 1872 * are above the previous ones. 1873 */ 1874 if (thflags & TH_SYN && 1875 tp->t_state == TCPS_TIME_WAIT && 1876 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1877 tp = tcp_close(tp); 1878 goto findpcb; 1879 } 1880 /* 1881 * If window is closed can only take segments at 1882 * window edge, and have to drop data and PUSH from 1883 * incoming segments. Continue processing, but 1884 * remember to ack. Otherwise, drop segment 1885 * and ack. 1886 */ 1887 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1888 tp->t_flags |= TF_ACKNOW; 1889 tcpstat.tcps_rcvwinprobe++; 1890 } else 1891 goto dropafterack; 1892 } else 1893 tcpstat.tcps_rcvbyteafterwin += todrop; 1894 m_adj(m, -todrop); 1895 tlen -= todrop; 1896 thflags &= ~(TH_PUSH | TH_FIN); 1897 } 1898 1899 /* 1900 * If last ACK falls within this segment's sequence numbers, 1901 * record its timestamp. 1902 * NOTE: 1903 * 1) That the test incorporates suggestions from the latest 1904 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1905 * 2) That updating only on newer timestamps interferes with 1906 * our earlier PAWS tests, so this check should be solely 1907 * predicated on the sequence space of this segment. 1908 * 3) That we modify the segment boundary check to be 1909 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN 1910 * instead of RFC1323's 1911 * Last.ACK.Sent < SEG.SEQ + SEG.LEN, 1912 * This modified check allows us to overcome RFC1323's 1913 * limitations as described in Stevens TCP/IP Illustrated 1914 * Vol. 2 p.869. In such cases, we can still calculate the 1915 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1916 */ 1917 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1918 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen 1919 + ((thflags & TH_SYN) != 0) 1920 + ((thflags & TH_FIN) != 0)))) { 1921 tp->ts_recent_age = ticks; 1922 tp->ts_recent = to.to_tsval; 1923 } 1924 1925 /* 1926 * If a SYN is in the window, then this is an 1927 * error and we send an RST and drop the connection. 1928 */ 1929 if (thflags & TH_SYN) { 1930 tp = tcp_drop(tp, ECONNRESET); 1931 rstreason = BANDLIM_UNLIMITED; 1932 goto dropwithreset; 1933 } 1934 1935 /* 1936 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1937 * flag is on (half-synchronized state), then queue data for 1938 * later processing; else drop segment and return. 1939 */ 1940 if (!(thflags & TH_ACK)) { 1941 if (tp->t_state == TCPS_SYN_RECEIVED || 1942 (tp->t_flags & TF_NEEDSYN)) 1943 goto step6; 1944 else 1945 goto drop; 1946 } 1947 1948 /* 1949 * Ack processing. 1950 */ 1951 switch (tp->t_state) { 1952 /* 1953 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter 1954 * ESTABLISHED state and continue processing. 1955 * The ACK was checked above. 1956 */ 1957 case TCPS_SYN_RECEIVED: 1958 1959 tcpstat.tcps_connects++; 1960 soisconnected(so); 1961 /* Do window scaling? */ 1962 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1963 (TF_RCVD_SCALE | TF_REQ_SCALE)) 1964 tp->rcv_scale = tp->request_r_scale; 1965 /* 1966 * Make transitions: 1967 * SYN-RECEIVED -> ESTABLISHED 1968 * SYN-RECEIVED* -> FIN-WAIT-1 1969 */ 1970 tp->t_starttime = ticks; 1971 if (tp->t_flags & TF_NEEDFIN) { 1972 tp->t_state = TCPS_FIN_WAIT_1; 1973 tp->t_flags &= ~TF_NEEDFIN; 1974 } else { 1975 tcp_established(tp); 1976 } 1977 /* 1978 * If segment contains data or ACK, will call tcp_reass() 1979 * later; if not, do so now to pass queued data to user. 1980 */ 1981 if (tlen == 0 && !(thflags & TH_FIN)) 1982 tcp_reass(tp, NULL, NULL, NULL); 1983 /* fall into ... */ 1984 1985 /* 1986 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1987 * ACKs. If the ack is in the range 1988 * tp->snd_una < th->th_ack <= tp->snd_max 1989 * then advance tp->snd_una to th->th_ack and drop 1990 * data from the retransmission queue. If this ACK reflects 1991 * more up to date window information we update our window information. 1992 */ 1993 case TCPS_ESTABLISHED: 1994 case TCPS_FIN_WAIT_1: 1995 case TCPS_FIN_WAIT_2: 1996 case TCPS_CLOSE_WAIT: 1997 case TCPS_CLOSING: 1998 case TCPS_LAST_ACK: 1999 case TCPS_TIME_WAIT: 2000 2001 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2002 if (TCP_DO_SACK(tp)) 2003 tcp_sack_update_scoreboard(tp, &to); 2004 if (!tcp_callout_active(tp, tp->tt_rexmt) || 2005 th->th_ack != tp->snd_una) { 2006 if (tlen == 0 && tiwin == tp->snd_wnd) 2007 tcpstat.tcps_rcvdupack++; 2008 tp->t_dupacks = 0; 2009 break; 2010 } 2011 if (tlen != 0 || tiwin != tp->snd_wnd) { 2012 if (!tcp_do_rfc3517bis || 2013 !TCP_DO_SACK(tp) || 2014 (to.to_flags & 2015 (TOF_SACK | TOF_SACK_REDUNDANT)) 2016 != TOF_SACK) { 2017 tp->t_dupacks = 0; 2018 } else { 2019 delayed_dupack = TRUE; 2020 th_dupack = th->th_ack; 2021 } 2022 break; 2023 } 2024 if (tcp_fast_recovery(tp, th->th_ack, &to)) 2025 goto drop; 2026 else 2027 break; 2028 } 2029 2030 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una")); 2031 tp->t_dupacks = 0; 2032 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2033 /* 2034 * Detected optimistic ACK attack. 2035 * Force slow-start to de-synchronize attack. 2036 */ 2037 tp->snd_cwnd = tp->t_maxseg; 2038 tp->snd_wacked = 0; 2039 2040 tcpstat.tcps_rcvacktoomuch++; 2041 goto dropafterack; 2042 } 2043 /* 2044 * If we reach this point, ACK is not a duplicate, 2045 * i.e., it ACKs something we sent. 2046 */ 2047 if (tp->t_flags & TF_NEEDSYN) { 2048 /* 2049 * T/TCP: Connection was half-synchronized, and our 2050 * SYN has been ACK'd (so connection is now fully 2051 * synchronized). Go to non-starred state, 2052 * increment snd_una for ACK of SYN, and check if 2053 * we can do window scaling. 2054 */ 2055 tp->t_flags &= ~TF_NEEDSYN; 2056 tp->snd_una++; 2057 /* Do window scaling? */ 2058 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 2059 (TF_RCVD_SCALE | TF_REQ_SCALE)) 2060 tp->rcv_scale = tp->request_r_scale; 2061 } 2062 2063 process_ACK: 2064 acked = th->th_ack - tp->snd_una; 2065 tcpstat.tcps_rcvackpack++; 2066 tcpstat.tcps_rcvackbyte += acked; 2067 2068 if (tcp_do_eifel_detect && acked > 0 && 2069 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) && 2070 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) { 2071 /* Eifel detection applicable. */ 2072 if (to.to_tsecr < tp->t_rexmtTS) { 2073 ++tcpstat.tcps_eifeldetected; 2074 tcp_revert_congestion_state(tp); 2075 if (tp->t_rxtshift != 1 || 2076 ticks >= tp->t_badrxtwin) 2077 ++tcpstat.tcps_rttcantdetect; 2078 } 2079 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2080 /* 2081 * If we just performed our first retransmit, 2082 * and the ACK arrives within our recovery window, 2083 * then it was a mistake to do the retransmit 2084 * in the first place. Recover our original cwnd 2085 * and ssthresh, and proceed to transmit where we 2086 * left off. 2087 */ 2088 tcp_revert_congestion_state(tp); 2089 ++tcpstat.tcps_rttdetected; 2090 } 2091 2092 /* 2093 * If we have a timestamp reply, update smoothed 2094 * round trip time. If no timestamp is present but 2095 * transmit timer is running and timed sequence 2096 * number was acked, update smoothed round trip time. 2097 * Since we now have an rtt measurement, cancel the 2098 * timer backoff (cf., Phil Karn's retransmit alg.). 2099 * Recompute the initial retransmit timer. 2100 * 2101 * Some machines (certain windows boxes) send broken 2102 * timestamp replies during the SYN+ACK phase, ignore 2103 * timestamps of 0. 2104 */ 2105 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) 2106 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1, th->th_ack); 2107 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) 2108 tcp_xmit_timer(tp, ticks - tp->t_rtttime, th->th_ack); 2109 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2110 2111 /* 2112 * If no data (only SYN) was ACK'd, 2113 * skip rest of ACK processing. 2114 */ 2115 if (acked == 0) 2116 goto step6; 2117 2118 /* Stop looking for an acceptable ACK since one was received. */ 2119 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK | 2120 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT); 2121 2122 if (acked > so->so_snd.ssb_cc) { 2123 tp->snd_wnd -= so->so_snd.ssb_cc; 2124 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc); 2125 ourfinisacked = TRUE; 2126 } else { 2127 sbdrop(&so->so_snd.sb, acked); 2128 tp->snd_wnd -= acked; 2129 ourfinisacked = FALSE; 2130 } 2131 sowwakeup(so); 2132 2133 /* 2134 * Update window information. 2135 */ 2136 if (acceptable_window_update(tp, th, tiwin)) { 2137 /* keep track of pure window updates */ 2138 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2139 tiwin > tp->snd_wnd) 2140 tcpstat.tcps_rcvwinupd++; 2141 tp->snd_wnd = tiwin; 2142 tp->snd_wl1 = th->th_seq; 2143 tp->snd_wl2 = th->th_ack; 2144 if (tp->snd_wnd > tp->max_sndwnd) 2145 tp->max_sndwnd = tp->snd_wnd; 2146 needoutput = TRUE; 2147 } 2148 2149 tp->snd_una = th->th_ack; 2150 if (TCP_DO_SACK(tp)) 2151 tcp_sack_update_scoreboard(tp, &to); 2152 if (IN_FASTRECOVERY(tp)) { 2153 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2154 EXIT_FASTRECOVERY(tp); 2155 needoutput = TRUE; 2156 /* 2157 * If the congestion window was inflated 2158 * to account for the other side's 2159 * cached packets, retract it. 2160 */ 2161 if (!TCP_DO_SACK(tp)) 2162 tp->snd_cwnd = tp->snd_ssthresh; 2163 2164 /* 2165 * Window inflation should have left us 2166 * with approximately snd_ssthresh outstanding 2167 * data. But, in case we would be inclined 2168 * to send a burst, better do it using 2169 * slow start. 2170 */ 2171 if (SEQ_GT(th->th_ack + tp->snd_cwnd, 2172 tp->snd_max + 2 * tp->t_maxseg)) 2173 tp->snd_cwnd = 2174 (tp->snd_max - tp->snd_una) + 2175 2 * tp->t_maxseg; 2176 2177 tp->snd_wacked = 0; 2178 } else { 2179 if (TCP_DO_SACK(tp)) { 2180 tp->snd_max_rexmt = tp->snd_max; 2181 tcp_sack_rexmt(tp); 2182 } else { 2183 tcp_newreno_partial_ack(tp, th, acked); 2184 } 2185 needoutput = FALSE; 2186 } 2187 } else { 2188 /* 2189 * Open the congestion window. When in slow-start, 2190 * open exponentially: maxseg per packet. Otherwise, 2191 * open linearly: maxseg per window. 2192 */ 2193 if (tp->snd_cwnd <= tp->snd_ssthresh) { 2194 u_int abc_sslimit = 2195 (SEQ_LT(tp->snd_nxt, tp->snd_max) ? 2196 tp->t_maxseg : 2 * tp->t_maxseg); 2197 2198 /* slow-start */ 2199 tp->snd_cwnd += tcp_do_abc ? 2200 min(acked, abc_sslimit) : tp->t_maxseg; 2201 } else { 2202 /* linear increase */ 2203 tp->snd_wacked += tcp_do_abc ? acked : 2204 tp->t_maxseg; 2205 if (tp->snd_wacked >= tp->snd_cwnd) { 2206 tp->snd_wacked -= tp->snd_cwnd; 2207 tp->snd_cwnd += tp->t_maxseg; 2208 } 2209 } 2210 tp->snd_cwnd = min(tp->snd_cwnd, 2211 TCP_MAXWIN << tp->snd_scale); 2212 tp->snd_recover = th->th_ack - 1; 2213 } 2214 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2215 tp->snd_nxt = tp->snd_una; 2216 2217 /* 2218 * If all outstanding data is acked, stop retransmit 2219 * timer and remember to restart (more output or persist). 2220 * If there is more data to be acked, restart retransmit 2221 * timer, using current (possibly backed-off) value. 2222 */ 2223 if (th->th_ack == tp->snd_max) { 2224 tcp_callout_stop(tp, tp->tt_rexmt); 2225 needoutput = TRUE; 2226 } else if (!tcp_callout_active(tp, tp->tt_persist)) { 2227 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur, 2228 tcp_timer_rexmt); 2229 } 2230 2231 switch (tp->t_state) { 2232 /* 2233 * In FIN_WAIT_1 STATE in addition to the processing 2234 * for the ESTABLISHED state if our FIN is now acknowledged 2235 * then enter FIN_WAIT_2. 2236 */ 2237 case TCPS_FIN_WAIT_1: 2238 if (ourfinisacked) { 2239 /* 2240 * If we can't receive any more 2241 * data, then closing user can proceed. 2242 * Starting the timer is contrary to the 2243 * specification, but if we don't get a FIN 2244 * we'll hang forever. 2245 */ 2246 if (so->so_state & SS_CANTRCVMORE) { 2247 soisdisconnected(so); 2248 tcp_callout_reset(tp, tp->tt_2msl, 2249 tp->t_maxidle, tcp_timer_2msl); 2250 } 2251 tp->t_state = TCPS_FIN_WAIT_2; 2252 } 2253 break; 2254 2255 /* 2256 * In CLOSING STATE in addition to the processing for 2257 * the ESTABLISHED state if the ACK acknowledges our FIN 2258 * then enter the TIME-WAIT state, otherwise ignore 2259 * the segment. 2260 */ 2261 case TCPS_CLOSING: 2262 if (ourfinisacked) { 2263 tp->t_state = TCPS_TIME_WAIT; 2264 tcp_canceltimers(tp); 2265 tcp_callout_reset(tp, tp->tt_2msl, 2266 2 * tcp_rmx_msl(tp), 2267 tcp_timer_2msl); 2268 soisdisconnected(so); 2269 } 2270 break; 2271 2272 /* 2273 * In LAST_ACK, we may still be waiting for data to drain 2274 * and/or to be acked, as well as for the ack of our FIN. 2275 * If our FIN is now acknowledged, delete the TCB, 2276 * enter the closed state and return. 2277 */ 2278 case TCPS_LAST_ACK: 2279 if (ourfinisacked) { 2280 tp = tcp_close(tp); 2281 goto drop; 2282 } 2283 break; 2284 2285 /* 2286 * In TIME_WAIT state the only thing that should arrive 2287 * is a retransmission of the remote FIN. Acknowledge 2288 * it and restart the finack timer. 2289 */ 2290 case TCPS_TIME_WAIT: 2291 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2292 tcp_timer_2msl); 2293 goto dropafterack; 2294 } 2295 } 2296 2297 step6: 2298 /* 2299 * Update window information. 2300 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2301 */ 2302 if ((thflags & TH_ACK) && 2303 acceptable_window_update(tp, th, tiwin)) { 2304 /* keep track of pure window updates */ 2305 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2306 tiwin > tp->snd_wnd) 2307 tcpstat.tcps_rcvwinupd++; 2308 tp->snd_wnd = tiwin; 2309 tp->snd_wl1 = th->th_seq; 2310 tp->snd_wl2 = th->th_ack; 2311 if (tp->snd_wnd > tp->max_sndwnd) 2312 tp->max_sndwnd = tp->snd_wnd; 2313 needoutput = TRUE; 2314 } 2315 2316 /* 2317 * Process segments with URG. 2318 */ 2319 if ((thflags & TH_URG) && th->th_urp && 2320 !TCPS_HAVERCVDFIN(tp->t_state)) { 2321 /* 2322 * This is a kludge, but if we receive and accept 2323 * random urgent pointers, we'll crash in 2324 * soreceive. It's hard to imagine someone 2325 * actually wanting to send this much urgent data. 2326 */ 2327 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) { 2328 th->th_urp = 0; /* XXX */ 2329 thflags &= ~TH_URG; /* XXX */ 2330 goto dodata; /* XXX */ 2331 } 2332 /* 2333 * If this segment advances the known urgent pointer, 2334 * then mark the data stream. This should not happen 2335 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2336 * a FIN has been received from the remote side. 2337 * In these states we ignore the URG. 2338 * 2339 * According to RFC961 (Assigned Protocols), 2340 * the urgent pointer points to the last octet 2341 * of urgent data. We continue, however, 2342 * to consider it to indicate the first octet 2343 * of data past the urgent section as the original 2344 * spec states (in one of two places). 2345 */ 2346 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 2347 tp->rcv_up = th->th_seq + th->th_urp; 2348 so->so_oobmark = so->so_rcv.ssb_cc + 2349 (tp->rcv_up - tp->rcv_nxt) - 1; 2350 if (so->so_oobmark == 0) 2351 sosetstate(so, SS_RCVATMARK); 2352 sohasoutofband(so); 2353 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2354 } 2355 /* 2356 * Remove out of band data so doesn't get presented to user. 2357 * This can happen independent of advancing the URG pointer, 2358 * but if two URG's are pending at once, some out-of-band 2359 * data may creep in... ick. 2360 */ 2361 if (th->th_urp <= (u_long)tlen && 2362 !(so->so_options & SO_OOBINLINE)) { 2363 /* hdr drop is delayed */ 2364 tcp_pulloutofband(so, th, m, drop_hdrlen); 2365 } 2366 } else { 2367 /* 2368 * If no out of band data is expected, 2369 * pull receive urgent pointer along 2370 * with the receive window. 2371 */ 2372 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2373 tp->rcv_up = tp->rcv_nxt; 2374 } 2375 2376 dodata: /* XXX */ 2377 /* 2378 * Process the segment text, merging it into the TCP sequencing queue, 2379 * and arranging for acknowledgment of receipt if necessary. 2380 * This process logically involves adjusting tp->rcv_wnd as data 2381 * is presented to the user (this happens in tcp_usrreq.c, 2382 * case PRU_RCVD). If a FIN has already been received on this 2383 * connection then we just ignore the text. 2384 */ 2385 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) { 2386 m_adj(m, drop_hdrlen); /* delayed header drop */ 2387 /* 2388 * Insert segment which includes th into TCP reassembly queue 2389 * with control block tp. Set thflags to whether reassembly now 2390 * includes a segment with FIN. This handles the common case 2391 * inline (segment is the next to be received on an established 2392 * connection, and the queue is empty), avoiding linkage into 2393 * and removal from the queue and repetition of various 2394 * conversions. 2395 * Set DELACK for segments received in order, but ack 2396 * immediately when segments are out of order (so 2397 * fast retransmit can work). 2398 */ 2399 if (th->th_seq == tp->rcv_nxt && 2400 TAILQ_EMPTY(&tp->t_segq) && 2401 TCPS_HAVEESTABLISHED(tp->t_state)) { 2402 if (DELAY_ACK(tp)) { 2403 tcp_callout_reset(tp, tp->tt_delack, 2404 tcp_delacktime, tcp_timer_delack); 2405 } else { 2406 tp->t_flags |= TF_ACKNOW; 2407 } 2408 tp->rcv_nxt += tlen; 2409 thflags = th->th_flags & TH_FIN; 2410 tcpstat.tcps_rcvpack++; 2411 tcpstat.tcps_rcvbyte += tlen; 2412 ND6_HINT(tp); 2413 if (so->so_state & SS_CANTRCVMORE) { 2414 m_freem(m); 2415 } else { 2416 lwkt_gettoken(&so->so_rcv.ssb_token); 2417 ssb_appendstream(&so->so_rcv, m); 2418 lwkt_reltoken(&so->so_rcv.ssb_token); 2419 } 2420 sorwakeup(so); 2421 } else { 2422 if (!(tp->sack_flags & TSACK_F_DUPSEG)) { 2423 /* Initialize SACK report block. */ 2424 tp->reportblk.rblk_start = th->th_seq; 2425 tp->reportblk.rblk_end = TCP_SACK_BLKEND( 2426 th->th_seq + tlen, thflags); 2427 } 2428 thflags = tcp_reass(tp, th, &tlen, m); 2429 tp->t_flags |= TF_ACKNOW; 2430 } 2431 2432 /* 2433 * Note the amount of data that peer has sent into 2434 * our window, in order to estimate the sender's 2435 * buffer size. 2436 */ 2437 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2438 } else { 2439 m_freem(m); 2440 thflags &= ~TH_FIN; 2441 } 2442 2443 /* 2444 * If FIN is received ACK the FIN and let the user know 2445 * that the connection is closing. 2446 */ 2447 if (thflags & TH_FIN) { 2448 if (!TCPS_HAVERCVDFIN(tp->t_state)) { 2449 socantrcvmore(so); 2450 /* 2451 * If connection is half-synchronized 2452 * (ie NEEDSYN flag on) then delay ACK, 2453 * so it may be piggybacked when SYN is sent. 2454 * Otherwise, since we received a FIN then no 2455 * more input can be expected, send ACK now. 2456 */ 2457 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) { 2458 tcp_callout_reset(tp, tp->tt_delack, 2459 tcp_delacktime, tcp_timer_delack); 2460 } else { 2461 tp->t_flags |= TF_ACKNOW; 2462 } 2463 tp->rcv_nxt++; 2464 } 2465 2466 switch (tp->t_state) { 2467 /* 2468 * In SYN_RECEIVED and ESTABLISHED STATES 2469 * enter the CLOSE_WAIT state. 2470 */ 2471 case TCPS_SYN_RECEIVED: 2472 tp->t_starttime = ticks; 2473 /*FALLTHROUGH*/ 2474 case TCPS_ESTABLISHED: 2475 tp->t_state = TCPS_CLOSE_WAIT; 2476 break; 2477 2478 /* 2479 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2480 * enter the CLOSING state. 2481 */ 2482 case TCPS_FIN_WAIT_1: 2483 tp->t_state = TCPS_CLOSING; 2484 break; 2485 2486 /* 2487 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2488 * starting the time-wait timer, turning off the other 2489 * standard timers. 2490 */ 2491 case TCPS_FIN_WAIT_2: 2492 tp->t_state = TCPS_TIME_WAIT; 2493 tcp_canceltimers(tp); 2494 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2495 tcp_timer_2msl); 2496 soisdisconnected(so); 2497 break; 2498 2499 /* 2500 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2501 */ 2502 case TCPS_TIME_WAIT: 2503 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2504 tcp_timer_2msl); 2505 break; 2506 } 2507 } 2508 2509 #ifdef TCPDEBUG 2510 if (so->so_options & SO_DEBUG) 2511 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2512 #endif 2513 2514 /* 2515 * Delayed duplicated ACK processing 2516 */ 2517 if (delayed_dupack && tcp_fast_recovery(tp, th_dupack, &to)) 2518 needoutput = FALSE; 2519 2520 /* 2521 * Return any desired output. 2522 */ 2523 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2524 tcp_output(tp); 2525 tcp_sack_report_cleanup(tp); 2526 return(IPPROTO_DONE); 2527 2528 dropafterack: 2529 /* 2530 * Generate an ACK dropping incoming segment if it occupies 2531 * sequence space, where the ACK reflects our state. 2532 * 2533 * We can now skip the test for the RST flag since all 2534 * paths to this code happen after packets containing 2535 * RST have been dropped. 2536 * 2537 * In the SYN-RECEIVED state, don't send an ACK unless the 2538 * segment we received passes the SYN-RECEIVED ACK test. 2539 * If it fails send a RST. This breaks the loop in the 2540 * "LAND" DoS attack, and also prevents an ACK storm 2541 * between two listening ports that have been sent forged 2542 * SYN segments, each with the source address of the other. 2543 */ 2544 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2545 (SEQ_GT(tp->snd_una, th->th_ack) || 2546 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2547 rstreason = BANDLIM_RST_OPENPORT; 2548 goto dropwithreset; 2549 } 2550 #ifdef TCPDEBUG 2551 if (so->so_options & SO_DEBUG) 2552 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2553 #endif 2554 m_freem(m); 2555 tp->t_flags |= TF_ACKNOW; 2556 tcp_output(tp); 2557 tcp_sack_report_cleanup(tp); 2558 return(IPPROTO_DONE); 2559 2560 dropwithreset: 2561 /* 2562 * Generate a RST, dropping incoming segment. 2563 * Make ACK acceptable to originator of segment. 2564 * Don't bother to respond if destination was broadcast/multicast. 2565 */ 2566 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) 2567 goto drop; 2568 if (isipv6) { 2569 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2570 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2571 goto drop; 2572 } else { 2573 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2574 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2575 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2576 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2577 goto drop; 2578 } 2579 /* IPv6 anycast check is done at tcp6_input() */ 2580 2581 /* 2582 * Perform bandwidth limiting. 2583 */ 2584 #ifdef ICMP_BANDLIM 2585 if (badport_bandlim(rstreason) < 0) 2586 goto drop; 2587 #endif 2588 2589 #ifdef TCPDEBUG 2590 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2591 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2592 #endif 2593 if (thflags & TH_ACK) 2594 /* mtod() below is safe as long as hdr dropping is delayed */ 2595 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack, 2596 TH_RST); 2597 else { 2598 if (thflags & TH_SYN) 2599 tlen++; 2600 /* mtod() below is safe as long as hdr dropping is delayed */ 2601 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen, 2602 (tcp_seq)0, TH_RST | TH_ACK); 2603 } 2604 if (tp != NULL) 2605 tcp_sack_report_cleanup(tp); 2606 return(IPPROTO_DONE); 2607 2608 drop: 2609 /* 2610 * Drop space held by incoming segment and return. 2611 */ 2612 #ifdef TCPDEBUG 2613 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2614 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2615 #endif 2616 m_freem(m); 2617 if (tp != NULL) 2618 tcp_sack_report_cleanup(tp); 2619 return(IPPROTO_DONE); 2620 } 2621 2622 /* 2623 * Parse TCP options and place in tcpopt. 2624 */ 2625 static void 2626 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn, 2627 tcp_seq ack) 2628 { 2629 int opt, optlen, i; 2630 2631 to->to_flags = 0; 2632 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2633 opt = cp[0]; 2634 if (opt == TCPOPT_EOL) 2635 break; 2636 if (opt == TCPOPT_NOP) 2637 optlen = 1; 2638 else { 2639 if (cnt < 2) 2640 break; 2641 optlen = cp[1]; 2642 if (optlen < 2 || optlen > cnt) 2643 break; 2644 } 2645 switch (opt) { 2646 case TCPOPT_MAXSEG: 2647 if (optlen != TCPOLEN_MAXSEG) 2648 continue; 2649 if (!is_syn) 2650 continue; 2651 to->to_flags |= TOF_MSS; 2652 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss); 2653 to->to_mss = ntohs(to->to_mss); 2654 break; 2655 case TCPOPT_WINDOW: 2656 if (optlen != TCPOLEN_WINDOW) 2657 continue; 2658 if (!is_syn) 2659 continue; 2660 to->to_flags |= TOF_SCALE; 2661 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); 2662 break; 2663 case TCPOPT_TIMESTAMP: 2664 if (optlen != TCPOLEN_TIMESTAMP) 2665 continue; 2666 to->to_flags |= TOF_TS; 2667 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval); 2668 to->to_tsval = ntohl(to->to_tsval); 2669 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr); 2670 to->to_tsecr = ntohl(to->to_tsecr); 2671 /* 2672 * If echoed timestamp is later than the current time, 2673 * fall back to non RFC1323 RTT calculation. 2674 */ 2675 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks)) 2676 to->to_tsecr = 0; 2677 break; 2678 case TCPOPT_SACK_PERMITTED: 2679 if (optlen != TCPOLEN_SACK_PERMITTED) 2680 continue; 2681 if (!is_syn) 2682 continue; 2683 to->to_flags |= TOF_SACK_PERMITTED; 2684 break; 2685 case TCPOPT_SACK: 2686 if ((optlen - 2) & 0x07) /* not multiple of 8 */ 2687 continue; 2688 to->to_nsackblocks = (optlen - 2) / 8; 2689 to->to_sackblocks = (struct raw_sackblock *) (cp + 2); 2690 to->to_flags |= TOF_SACK; 2691 for (i = 0; i < to->to_nsackblocks; i++) { 2692 struct raw_sackblock *r = &to->to_sackblocks[i]; 2693 2694 r->rblk_start = ntohl(r->rblk_start); 2695 r->rblk_end = ntohl(r->rblk_end); 2696 2697 if (SEQ_LEQ(r->rblk_end, r->rblk_start)) { 2698 /* 2699 * Invalid SACK block; discard all 2700 * SACK blocks 2701 */ 2702 tcpstat.tcps_rcvbadsackopt++; 2703 to->to_nsackblocks = 0; 2704 to->to_sackblocks = NULL; 2705 to->to_flags &= ~TOF_SACK; 2706 break; 2707 } 2708 } 2709 if ((to->to_flags & TOF_SACK) && 2710 tcp_sack_ndsack_blocks(to->to_sackblocks, 2711 to->to_nsackblocks, ack)) 2712 to->to_flags |= TOF_DSACK; 2713 break; 2714 #ifdef TCP_SIGNATURE 2715 /* 2716 * XXX In order to reply to a host which has set the 2717 * TCP_SIGNATURE option in its initial SYN, we have to 2718 * record the fact that the option was observed here 2719 * for the syncache code to perform the correct response. 2720 */ 2721 case TCPOPT_SIGNATURE: 2722 if (optlen != TCPOLEN_SIGNATURE) 2723 continue; 2724 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); 2725 break; 2726 #endif /* TCP_SIGNATURE */ 2727 default: 2728 continue; 2729 } 2730 } 2731 } 2732 2733 /* 2734 * Pull out of band byte out of a segment so 2735 * it doesn't appear in the user's data queue. 2736 * It is still reflected in the segment length for 2737 * sequencing purposes. 2738 * "off" is the delayed to be dropped hdrlen. 2739 */ 2740 static void 2741 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off) 2742 { 2743 int cnt = off + th->th_urp - 1; 2744 2745 while (cnt >= 0) { 2746 if (m->m_len > cnt) { 2747 char *cp = mtod(m, caddr_t) + cnt; 2748 struct tcpcb *tp = sototcpcb(so); 2749 2750 tp->t_iobc = *cp; 2751 tp->t_oobflags |= TCPOOB_HAVEDATA; 2752 bcopy(cp + 1, cp, m->m_len - cnt - 1); 2753 m->m_len--; 2754 if (m->m_flags & M_PKTHDR) 2755 m->m_pkthdr.len--; 2756 return; 2757 } 2758 cnt -= m->m_len; 2759 m = m->m_next; 2760 if (m == NULL) 2761 break; 2762 } 2763 panic("tcp_pulloutofband"); 2764 } 2765 2766 /* 2767 * Collect new round-trip time estimate 2768 * and update averages and current timeout. 2769 */ 2770 static void 2771 tcp_xmit_timer(struct tcpcb *tp, int rtt, tcp_seq ack) 2772 { 2773 int rebaserto = 0; 2774 2775 tcpstat.tcps_rttupdated++; 2776 tp->t_rttupdated++; 2777 if ((tp->rxt_flags & TRXT_F_REBASERTO) && 2778 SEQ_GT(ack, tp->snd_max_prev)) { 2779 #ifdef DEBUG_EIFEL_RESPONSE 2780 kprintf("srtt/rttvar, prev %d/%d, cur %d/%d, ", 2781 tp->t_srtt_prev, tp->t_rttvar_prev, 2782 tp->t_srtt, tp->t_rttvar); 2783 #endif 2784 2785 tcpstat.tcps_eifelresponse++; 2786 rebaserto = 1; 2787 tp->rxt_flags &= ~TRXT_F_REBASERTO; 2788 tp->t_srtt = max(tp->t_srtt_prev, (rtt << TCP_RTT_SHIFT)); 2789 tp->t_rttvar = max(tp->t_rttvar_prev, 2790 (rtt << (TCP_RTTVAR_SHIFT - 1))); 2791 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2792 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2793 2794 #ifdef DEBUG_EIFEL_RESPONSE 2795 kprintf("new %d/%d ", tp->t_srtt, tp->t_rttvar); 2796 #endif 2797 } else if (tp->t_srtt != 0) { 2798 int delta; 2799 2800 /* 2801 * srtt is stored as fixed point with 5 bits after the 2802 * binary point (i.e., scaled by 8). The following magic 2803 * is equivalent to the smoothing algorithm in rfc793 with 2804 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2805 * point). Adjust rtt to origin 0. 2806 */ 2807 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2808 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2809 2810 if ((tp->t_srtt += delta) <= 0) 2811 tp->t_srtt = 1; 2812 2813 /* 2814 * We accumulate a smoothed rtt variance (actually, a 2815 * smoothed mean difference), then set the retransmit 2816 * timer to smoothed rtt + 4 times the smoothed variance. 2817 * rttvar is stored as fixed point with 4 bits after the 2818 * binary point (scaled by 16). The following is 2819 * equivalent to rfc793 smoothing with an alpha of .75 2820 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2821 * rfc793's wired-in beta. 2822 */ 2823 if (delta < 0) 2824 delta = -delta; 2825 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2826 if ((tp->t_rttvar += delta) <= 0) 2827 tp->t_rttvar = 1; 2828 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2829 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2830 } else { 2831 /* 2832 * No rtt measurement yet - use the unsmoothed rtt. 2833 * Set the variance to half the rtt (so our first 2834 * retransmit happens at 3*rtt). 2835 */ 2836 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2837 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2838 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2839 } 2840 tp->t_rtttime = 0; 2841 tp->t_rxtshift = 0; 2842 2843 #ifdef DEBUG_EIFEL_RESPONSE 2844 if (rebaserto) { 2845 kprintf("| rxtcur prev %d, old %d, ", 2846 tp->t_rxtcur_prev, tp->t_rxtcur); 2847 } 2848 #endif 2849 2850 /* 2851 * the retransmit should happen at rtt + 4 * rttvar. 2852 * Because of the way we do the smoothing, srtt and rttvar 2853 * will each average +1/2 tick of bias. When we compute 2854 * the retransmit timer, we want 1/2 tick of rounding and 2855 * 1 extra tick because of +-1/2 tick uncertainty in the 2856 * firing of the timer. The bias will give us exactly the 2857 * 1.5 tick we need. But, because the bias is 2858 * statistical, we have to test that we don't drop below 2859 * the minimum feasible timer (which is 2 ticks). 2860 */ 2861 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2862 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2863 2864 if (rebaserto) { 2865 if (tp->t_rxtcur < tp->t_rxtcur_prev + tcp_eifel_rtoinc) { 2866 /* 2867 * RFC4015 requires that the new RTO is at least 2868 * 2*G (tcp_eifel_rtoinc) greater then the RTO 2869 * (t_rxtcur_prev) when the spurious retransmit 2870 * timeout happens. 2871 * 2872 * The above condition could be true, if the SRTT 2873 * and RTTVAR used to calculate t_rxtcur_prev 2874 * resulted in a value less than t_rttmin. So 2875 * simply increasing SRTT by tcp_eifel_rtoinc when 2876 * preparing for the Eifel response in 2877 * tcp_save_congestion_state() could not ensure 2878 * that the new RTO will be tcp_eifel_rtoinc greater 2879 * t_rxtcur_prev. 2880 */ 2881 tp->t_rxtcur = tp->t_rxtcur_prev + tcp_eifel_rtoinc; 2882 } 2883 #ifdef DEBUG_EIFEL_RESPONSE 2884 kprintf("new %d\n", tp->t_rxtcur); 2885 #endif 2886 } 2887 2888 /* 2889 * We received an ack for a packet that wasn't retransmitted; 2890 * it is probably safe to discard any error indications we've 2891 * received recently. This isn't quite right, but close enough 2892 * for now (a route might have failed after we sent a segment, 2893 * and the return path might not be symmetrical). 2894 */ 2895 tp->t_softerror = 0; 2896 } 2897 2898 /* 2899 * Determine a reasonable value for maxseg size. 2900 * If the route is known, check route for mtu. 2901 * If none, use an mss that can be handled on the outgoing 2902 * interface without forcing IP to fragment; if bigger than 2903 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2904 * to utilize large mbufs. If no route is found, route has no mtu, 2905 * or the destination isn't local, use a default, hopefully conservative 2906 * size (usually 512 or the default IP max size, but no more than the mtu 2907 * of the interface), as we can't discover anything about intervening 2908 * gateways or networks. We also initialize the congestion/slow start 2909 * window to be a single segment if the destination isn't local. 2910 * While looking at the routing entry, we also initialize other path-dependent 2911 * parameters from pre-set or cached values in the routing entry. 2912 * 2913 * Also take into account the space needed for options that we 2914 * send regularly. Make maxseg shorter by that amount to assure 2915 * that we can send maxseg amount of data even when the options 2916 * are present. Store the upper limit of the length of options plus 2917 * data in maxopd. 2918 * 2919 * NOTE that this routine is only called when we process an incoming 2920 * segment, for outgoing segments only tcp_mssopt is called. 2921 */ 2922 void 2923 tcp_mss(struct tcpcb *tp, int offer) 2924 { 2925 struct rtentry *rt; 2926 struct ifnet *ifp; 2927 int rtt, mss; 2928 u_long bufsize; 2929 struct inpcb *inp = tp->t_inpcb; 2930 struct socket *so; 2931 #ifdef INET6 2932 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 2933 size_t min_protoh = isipv6 ? 2934 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 2935 sizeof(struct tcpiphdr); 2936 #else 2937 const boolean_t isipv6 = FALSE; 2938 const size_t min_protoh = sizeof(struct tcpiphdr); 2939 #endif 2940 2941 if (isipv6) 2942 rt = tcp_rtlookup6(&inp->inp_inc); 2943 else 2944 rt = tcp_rtlookup(&inp->inp_inc); 2945 if (rt == NULL) { 2946 tp->t_maxopd = tp->t_maxseg = 2947 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 2948 return; 2949 } 2950 ifp = rt->rt_ifp; 2951 so = inp->inp_socket; 2952 2953 /* 2954 * Offer == 0 means that there was no MSS on the SYN segment, 2955 * in this case we use either the interface mtu or tcp_mssdflt. 2956 * 2957 * An offer which is too large will be cut down later. 2958 */ 2959 if (offer == 0) { 2960 if (isipv6) { 2961 if (in6_localaddr(&inp->in6p_faddr)) { 2962 offer = ND_IFINFO(rt->rt_ifp)->linkmtu - 2963 min_protoh; 2964 } else { 2965 offer = tcp_v6mssdflt; 2966 } 2967 } else { 2968 if (in_localaddr(inp->inp_faddr)) 2969 offer = ifp->if_mtu - min_protoh; 2970 else 2971 offer = tcp_mssdflt; 2972 } 2973 } 2974 2975 /* 2976 * Prevent DoS attack with too small MSS. Round up 2977 * to at least minmss. 2978 * 2979 * Sanity check: make sure that maxopd will be large 2980 * enough to allow some data on segments even is the 2981 * all the option space is used (40bytes). Otherwise 2982 * funny things may happen in tcp_output. 2983 */ 2984 offer = max(offer, tcp_minmss); 2985 offer = max(offer, 64); 2986 2987 rt->rt_rmx.rmx_mssopt = offer; 2988 2989 /* 2990 * While we're here, check if there's an initial rtt 2991 * or rttvar. Convert from the route-table units 2992 * to scaled multiples of the slow timeout timer. 2993 */ 2994 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 2995 /* 2996 * XXX the lock bit for RTT indicates that the value 2997 * is also a minimum value; this is subject to time. 2998 */ 2999 if (rt->rt_rmx.rmx_locks & RTV_RTT) 3000 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); 3001 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 3002 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3003 tcpstat.tcps_usedrtt++; 3004 if (rt->rt_rmx.rmx_rttvar) { 3005 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 3006 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 3007 tcpstat.tcps_usedrttvar++; 3008 } else { 3009 /* default variation is +- 1 rtt */ 3010 tp->t_rttvar = 3011 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3012 } 3013 TCPT_RANGESET(tp->t_rxtcur, 3014 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3015 tp->t_rttmin, TCPTV_REXMTMAX); 3016 } 3017 3018 /* 3019 * if there's an mtu associated with the route, use it 3020 * else, use the link mtu. Take the smaller of mss or offer 3021 * as our final mss. 3022 */ 3023 if (rt->rt_rmx.rmx_mtu) { 3024 mss = rt->rt_rmx.rmx_mtu - min_protoh; 3025 } else { 3026 if (isipv6) 3027 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh; 3028 else 3029 mss = ifp->if_mtu - min_protoh; 3030 } 3031 mss = min(mss, offer); 3032 3033 /* 3034 * maxopd stores the maximum length of data AND options 3035 * in a segment; maxseg is the amount of data in a normal 3036 * segment. We need to store this value (maxopd) apart 3037 * from maxseg, because now every segment carries options 3038 * and thus we normally have somewhat less data in segments. 3039 */ 3040 tp->t_maxopd = mss; 3041 3042 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 3043 ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3044 mss -= TCPOLEN_TSTAMP_APPA; 3045 3046 #if (MCLBYTES & (MCLBYTES - 1)) == 0 3047 if (mss > MCLBYTES) 3048 mss &= ~(MCLBYTES-1); 3049 #else 3050 if (mss > MCLBYTES) 3051 mss = mss / MCLBYTES * MCLBYTES; 3052 #endif 3053 /* 3054 * If there's a pipesize, change the socket buffer 3055 * to that size. Make the socket buffers an integral 3056 * number of mss units; if the mss is larger than 3057 * the socket buffer, decrease the mss. 3058 */ 3059 #ifdef RTV_SPIPE 3060 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) 3061 #endif 3062 bufsize = so->so_snd.ssb_hiwat; 3063 if (bufsize < mss) 3064 mss = bufsize; 3065 else { 3066 bufsize = roundup(bufsize, mss); 3067 if (bufsize > sb_max) 3068 bufsize = sb_max; 3069 if (bufsize > so->so_snd.ssb_hiwat) 3070 ssb_reserve(&so->so_snd, bufsize, so, NULL); 3071 } 3072 tp->t_maxseg = mss; 3073 3074 #ifdef RTV_RPIPE 3075 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) 3076 #endif 3077 bufsize = so->so_rcv.ssb_hiwat; 3078 if (bufsize > mss) { 3079 bufsize = roundup(bufsize, mss); 3080 if (bufsize > sb_max) 3081 bufsize = sb_max; 3082 if (bufsize > so->so_rcv.ssb_hiwat) { 3083 lwkt_gettoken(&so->so_rcv.ssb_token); 3084 ssb_reserve(&so->so_rcv, bufsize, so, NULL); 3085 lwkt_reltoken(&so->so_rcv.ssb_token); 3086 } 3087 } 3088 3089 /* 3090 * Set the slow-start flight size 3091 * 3092 * NOTE: t_maxseg must have been configured! 3093 */ 3094 tp->snd_cwnd = tcp_initial_window(tp); 3095 3096 if (rt->rt_rmx.rmx_ssthresh) { 3097 /* 3098 * There's some sort of gateway or interface 3099 * buffer limit on the path. Use this to set 3100 * the slow start threshhold, but set the 3101 * threshold to no less than 2*mss. 3102 */ 3103 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 3104 tcpstat.tcps_usedssthresh++; 3105 } 3106 } 3107 3108 /* 3109 * Determine the MSS option to send on an outgoing SYN. 3110 */ 3111 int 3112 tcp_mssopt(struct tcpcb *tp) 3113 { 3114 struct rtentry *rt; 3115 #ifdef INET6 3116 boolean_t isipv6 = 3117 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3118 int min_protoh = isipv6 ? 3119 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 3120 sizeof(struct tcpiphdr); 3121 #else 3122 const boolean_t isipv6 = FALSE; 3123 const size_t min_protoh = sizeof(struct tcpiphdr); 3124 #endif 3125 3126 if (isipv6) 3127 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc); 3128 else 3129 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc); 3130 if (rt == NULL) 3131 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 3132 3133 return (rt->rt_ifp->if_mtu - min_protoh); 3134 } 3135 3136 /* 3137 * When a partial ack arrives, force the retransmission of the 3138 * next unacknowledged segment. Do not exit Fast Recovery. 3139 * 3140 * Implement the Slow-but-Steady variant of NewReno by restarting the 3141 * the retransmission timer. Turn it off here so it can be restarted 3142 * later in tcp_output(). 3143 */ 3144 static void 3145 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked) 3146 { 3147 tcp_seq old_snd_nxt = tp->snd_nxt; 3148 u_long ocwnd = tp->snd_cwnd; 3149 3150 tcp_callout_stop(tp, tp->tt_rexmt); 3151 tp->t_rtttime = 0; 3152 tp->snd_nxt = th->th_ack; 3153 /* Set snd_cwnd to one segment beyond acknowledged offset. */ 3154 tp->snd_cwnd = tp->t_maxseg; 3155 tp->t_flags |= TF_ACKNOW; 3156 tcp_output(tp); 3157 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3158 tp->snd_nxt = old_snd_nxt; 3159 /* partial window deflation */ 3160 if (ocwnd > acked) 3161 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg; 3162 else 3163 tp->snd_cwnd = tp->t_maxseg; 3164 } 3165 3166 /* 3167 * In contrast to the Slow-but-Steady NewReno variant, 3168 * we do not reset the retransmission timer for SACK retransmissions, 3169 * except when retransmitting snd_una. 3170 */ 3171 static void 3172 tcp_sack_rexmt(struct tcpcb *tp) 3173 { 3174 tcp_seq old_snd_nxt = tp->snd_nxt; 3175 u_long ocwnd = tp->snd_cwnd; 3176 uint32_t pipe; 3177 int nseg = 0; /* consecutive new segments */ 3178 int nseg_rexmt = 0; /* retransmitted segments */ 3179 #define MAXBURST 4 /* limit burst of new packets on partial ack */ 3180 3181 tp->t_rtttime = 0; 3182 pipe = tcp_sack_compute_pipe(tp); 3183 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg && 3184 (!tcp_do_smartsack || nseg < MAXBURST)) { 3185 tcp_seq old_snd_max, old_rexmt_high, nextrexmt; 3186 uint32_t sent, seglen; 3187 boolean_t rescue; 3188 int error; 3189 3190 old_rexmt_high = tp->rexmt_high; 3191 if (!tcp_sack_nextseg(tp, &nextrexmt, &seglen, &rescue)) { 3192 tp->rexmt_high = old_rexmt_high; 3193 break; 3194 } 3195 3196 /* 3197 * If the next tranmission is a rescue retranmission, 3198 * we check whether we have already sent some data 3199 * (either new segments or retransmitted segments) 3200 * into the the network or not. Since the idea of rescue 3201 * retransmission is to sustain ACK clock, as long as 3202 * some segments are in the network, ACK clock will be 3203 * kept ticking. 3204 */ 3205 if (rescue && (nseg_rexmt > 0 || nseg > 0)) { 3206 tp->rexmt_high = old_rexmt_high; 3207 break; 3208 } 3209 3210 if (nextrexmt == tp->snd_max) 3211 ++nseg; 3212 else 3213 ++nseg_rexmt; 3214 tp->snd_nxt = nextrexmt; 3215 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen; 3216 old_snd_max = tp->snd_max; 3217 if (nextrexmt == tp->snd_una) 3218 tcp_callout_stop(tp, tp->tt_rexmt); 3219 error = tcp_output(tp); 3220 if (error != 0) { 3221 tp->rexmt_high = old_rexmt_high; 3222 break; 3223 } 3224 sent = tp->snd_nxt - nextrexmt; 3225 if (sent <= 0) { 3226 tp->rexmt_high = old_rexmt_high; 3227 break; 3228 } 3229 pipe += sent; 3230 tcpstat.tcps_sndsackpack++; 3231 tcpstat.tcps_sndsackbyte += sent; 3232 3233 if (rescue) { 3234 tcpstat.tcps_sackrescue++; 3235 tp->rexmt_rescue = tp->snd_nxt; 3236 tp->sack_flags |= TSACK_F_SACKRESCUED; 3237 break; 3238 } 3239 if (SEQ_LT(nextrexmt, old_snd_max) && 3240 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) { 3241 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max); 3242 if (tcp_aggressive_rescuesack && 3243 (tp->sack_flags & TSACK_F_SACKRESCUED) && 3244 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) { 3245 /* Drag RescueRxt along with HighRxt */ 3246 tp->rexmt_rescue = tp->rexmt_high; 3247 } 3248 } 3249 } 3250 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3251 tp->snd_nxt = old_snd_nxt; 3252 tp->snd_cwnd = ocwnd; 3253 } 3254 3255 /* 3256 * Return TRUE, if some new segments are sent 3257 */ 3258 static boolean_t 3259 tcp_sack_limitedxmit(struct tcpcb *tp) 3260 { 3261 tcp_seq oldsndnxt = tp->snd_nxt; 3262 tcp_seq oldsndmax = tp->snd_max; 3263 u_long ocwnd = tp->snd_cwnd; 3264 uint32_t pipe; 3265 boolean_t ret = FALSE; 3266 3267 tp->rexmt_high = tp->snd_una - 1; 3268 pipe = tcp_sack_compute_pipe(tp); 3269 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg) { 3270 uint32_t sent; 3271 tcp_seq next; 3272 int error; 3273 3274 next = tp->snd_nxt = tp->snd_max; 3275 tp->snd_cwnd = tp->snd_nxt - tp->snd_una + tp->t_maxseg; 3276 3277 error = tcp_output(tp); 3278 if (error) 3279 break; 3280 3281 sent = tp->snd_nxt - next; 3282 if (sent <= 0) 3283 break; 3284 pipe += sent; 3285 ++tcpstat.tcps_sndlimited; 3286 ret = TRUE; 3287 } 3288 3289 if (SEQ_LT(oldsndnxt, oldsndmax)) { 3290 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una), 3291 ("snd_una moved in other threads")); 3292 tp->snd_nxt = oldsndnxt; 3293 } 3294 tp->snd_cwnd = ocwnd; 3295 3296 if (ret && TCP_DO_NCR(tp)) 3297 tcp_ncr_update_rxtthresh(tp); 3298 3299 return ret; 3300 } 3301 3302 /* 3303 * Reset idle time and keep-alive timer, typically called when a valid 3304 * tcp packet is received but may also be called when FASTKEEP is set 3305 * to prevent the previous long-timeout from calculating to a drop. 3306 * 3307 * Only update t_rcvtime for non-SYN packets. 3308 * 3309 * Handle the case where one side thinks the connection is established 3310 * but the other side has, say, rebooted without cleaning out the 3311 * connection. The SYNs could be construed as an attack and wind 3312 * up ignored, but in case it isn't an attack we can validate the 3313 * connection by forcing a keepalive. 3314 */ 3315 void 3316 tcp_timer_keep_activity(struct tcpcb *tp, int thflags) 3317 { 3318 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3319 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) { 3320 tp->t_flags |= TF_KEEPALIVE; 3321 tcp_callout_reset(tp, tp->tt_keep, hz / 2, 3322 tcp_timer_keep); 3323 } else { 3324 tp->t_rcvtime = ticks; 3325 tp->t_flags &= ~TF_KEEPALIVE; 3326 tcp_callout_reset(tp, tp->tt_keep, 3327 tp->t_keepidle, 3328 tcp_timer_keep); 3329 } 3330 } 3331 } 3332 3333 static int 3334 tcp_rmx_msl(const struct tcpcb *tp) 3335 { 3336 struct rtentry *rt; 3337 struct inpcb *inp = tp->t_inpcb; 3338 int msl; 3339 #ifdef INET6 3340 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3341 #else 3342 const boolean_t isipv6 = FALSE; 3343 #endif 3344 3345 if (isipv6) 3346 rt = tcp_rtlookup6(&inp->inp_inc); 3347 else 3348 rt = tcp_rtlookup(&inp->inp_inc); 3349 if (rt == NULL || rt->rt_rmx.rmx_msl == 0) 3350 return tcp_msl; 3351 3352 msl = (rt->rt_rmx.rmx_msl * hz) / 1000; 3353 if (msl == 0) 3354 msl = 1; 3355 3356 return msl; 3357 } 3358 3359 static void 3360 tcp_established(struct tcpcb *tp) 3361 { 3362 tp->t_state = TCPS_ESTABLISHED; 3363 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep); 3364 3365 if (tp->t_rxtsyn > 0) { 3366 /* 3367 * RFC6298: 3368 * "If the timer expires awaiting the ACK of a SYN segment 3369 * and the TCP implementation is using an RTO less than 3 3370 * seconds, the RTO MUST be re-initialized to 3 seconds 3371 * when data transmission begins" 3372 */ 3373 if (tp->t_rxtcur < TCPTV_RTOBASE3) 3374 tp->t_rxtcur = TCPTV_RTOBASE3; 3375 } 3376 } 3377 3378 /* 3379 * Returns TRUE, if the ACK should be dropped 3380 */ 3381 static boolean_t 3382 tcp_fast_recovery(struct tcpcb *tp, tcp_seq th_ack, const struct tcpopt *to) 3383 { 3384 tcpstat.tcps_rcvdupack++; 3385 3386 /* 3387 * We have outstanding data (other than a window probe), 3388 * this is a completely duplicate ack (ie, window info 3389 * didn't change), the ack is the biggest we've seen and 3390 * we've seen exactly our rexmt threshhold of them, so 3391 * assume a packet has been dropped and retransmit it. 3392 * Kludge snd_nxt & the congestion window so we send only 3393 * this one packet. 3394 */ 3395 if (IN_FASTRECOVERY(tp)) { 3396 if (TCP_DO_SACK(tp)) { 3397 /* No artifical cwnd inflation. */ 3398 tcp_sack_rexmt(tp); 3399 } else { 3400 /* 3401 * Dup acks mean that packets have left 3402 * the network (they're now cached at the 3403 * receiver) so bump cwnd by the amount in 3404 * the receiver to keep a constant cwnd 3405 * packets in the network. 3406 */ 3407 tp->snd_cwnd += tp->t_maxseg; 3408 tcp_output(tp); 3409 } 3410 return TRUE; 3411 } else if (SEQ_LT(th_ack, tp->snd_recover)) { 3412 tp->t_dupacks = 0; 3413 return FALSE; 3414 } else if (tcp_ignore_redun_dsack && TCP_DO_SACK(tp) && 3415 (to->to_flags & (TOF_DSACK | TOF_SACK_REDUNDANT)) == 3416 (TOF_DSACK | TOF_SACK_REDUNDANT)) { 3417 /* 3418 * If the ACK carries DSACK and other SACK blocks 3419 * carry information that we have already known, 3420 * don't count this ACK as duplicate ACK. This 3421 * prevents spurious early retransmit and fast 3422 * retransmit. This also meets the requirement of 3423 * RFC3042 that new segments should not be sent if 3424 * the SACK blocks do not contain new information 3425 * (XXX we actually loosen the requirment that only 3426 * DSACK is checked here). 3427 * 3428 * This kind of ACKs are usually sent after spurious 3429 * retransmit. 3430 */ 3431 /* Do nothing; don't change t_dupacks */ 3432 return TRUE; 3433 } else if (tp->t_dupacks == 0 && TCP_DO_NCR(tp)) { 3434 tcp_ncr_update_rxtthresh(tp); 3435 } 3436 3437 if (++tp->t_dupacks == tp->t_rxtthresh) { 3438 tcp_seq old_snd_nxt; 3439 u_int win; 3440 3441 fastretransmit: 3442 if (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) { 3443 tcp_save_congestion_state(tp); 3444 tp->rxt_flags |= TRXT_F_FASTREXMT; 3445 } 3446 /* 3447 * We know we're losing at the current window size, 3448 * so do congestion avoidance: set ssthresh to half 3449 * the current window and pull our congestion window 3450 * back to the new ssthresh. 3451 */ 3452 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; 3453 if (win < 2) 3454 win = 2; 3455 tp->snd_ssthresh = win * tp->t_maxseg; 3456 ENTER_FASTRECOVERY(tp); 3457 tp->snd_recover = tp->snd_max; 3458 tcp_callout_stop(tp, tp->tt_rexmt); 3459 tp->t_rtttime = 0; 3460 old_snd_nxt = tp->snd_nxt; 3461 tp->snd_nxt = th_ack; 3462 tp->snd_cwnd = tp->t_maxseg; 3463 tcp_output(tp); 3464 ++tcpstat.tcps_sndfastrexmit; 3465 tp->snd_cwnd = tp->snd_ssthresh; 3466 tp->rexmt_high = tp->snd_nxt; 3467 tp->sack_flags &= ~TSACK_F_SACKRESCUED; 3468 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3469 tp->snd_nxt = old_snd_nxt; 3470 KASSERT(tp->snd_limited <= 2, ("tp->snd_limited too big")); 3471 if (TCP_DO_SACK(tp)) { 3472 tcp_sack_rexmt(tp); 3473 } else { 3474 tp->snd_cwnd += tp->t_maxseg * 3475 (tp->t_dupacks - tp->snd_limited); 3476 } 3477 } else if ((tcp_do_rfc3517bis && TCP_DO_SACK(tp)) || TCP_DO_NCR(tp)) { 3478 if (tcp_rfc3517bis_rxt && tcp_do_rfc3517bis && 3479 tcp_sack_islost(&tp->scb, tp->snd_una)) 3480 goto fastretransmit; 3481 if (tcp_do_limitedtransmit || TCP_DO_NCR(tp)) { 3482 /* outstanding data */ 3483 uint32_t ownd = tp->snd_max - tp->snd_una; 3484 3485 if (!tcp_sack_limitedxmit(tp) && 3486 need_early_retransmit(tp, ownd)) { 3487 ++tcpstat.tcps_sndearlyrexmit; 3488 tp->rxt_flags |= TRXT_F_EARLYREXMT; 3489 goto fastretransmit; 3490 } 3491 } 3492 } else if (tcp_do_limitedtransmit) { 3493 u_long oldcwnd = tp->snd_cwnd; 3494 tcp_seq oldsndmax = tp->snd_max; 3495 tcp_seq oldsndnxt = tp->snd_nxt; 3496 /* outstanding data */ 3497 uint32_t ownd = tp->snd_max - tp->snd_una; 3498 u_int sent; 3499 3500 KASSERT(tp->t_dupacks == 1 || tp->t_dupacks == 2, 3501 ("dupacks not 1 or 2")); 3502 if (tp->t_dupacks == 1) 3503 tp->snd_limited = 0; 3504 tp->snd_nxt = tp->snd_max; 3505 tp->snd_cwnd = ownd + 3506 (tp->t_dupacks - tp->snd_limited) * tp->t_maxseg; 3507 tcp_output(tp); 3508 3509 if (SEQ_LT(oldsndnxt, oldsndmax)) { 3510 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una), 3511 ("snd_una moved in other threads")); 3512 tp->snd_nxt = oldsndnxt; 3513 } 3514 tp->snd_cwnd = oldcwnd; 3515 sent = tp->snd_max - oldsndmax; 3516 if (sent > tp->t_maxseg) { 3517 KASSERT((tp->t_dupacks == 2 && tp->snd_limited == 0) || 3518 (sent == tp->t_maxseg + 1 && 3519 (tp->t_flags & TF_SENTFIN)), 3520 ("sent too much")); 3521 KASSERT(sent <= tp->t_maxseg * 2, 3522 ("sent too many segments")); 3523 tp->snd_limited = 2; 3524 tcpstat.tcps_sndlimited += 2; 3525 } else if (sent > 0) { 3526 ++tp->snd_limited; 3527 ++tcpstat.tcps_sndlimited; 3528 } else if (need_early_retransmit(tp, ownd)) { 3529 ++tcpstat.tcps_sndearlyrexmit; 3530 tp->rxt_flags |= TRXT_F_EARLYREXMT; 3531 goto fastretransmit; 3532 } 3533 } 3534 return TRUE; 3535 } 3536