1 /* 2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $ 68 */ 69 70 #include "opt_inet.h" 71 #include "opt_inet6.h" 72 #include "opt_ipsec.h" 73 #include "opt_tcpdebug.h" 74 #include "opt_tcp_input.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 #include <sys/malloc.h> 81 #include <sys/mbuf.h> 82 #include <sys/proc.h> /* for proc0 declaration */ 83 #include <sys/protosw.h> 84 #include <sys/socket.h> 85 #include <sys/socketvar.h> 86 #include <sys/syslog.h> 87 #include <sys/in_cksum.h> 88 89 #include <sys/socketvar2.h> 90 91 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 92 #include <machine/stdarg.h> 93 94 #include <net/if.h> 95 #include <net/route.h> 96 97 #include <netinet/in.h> 98 #include <netinet/in_systm.h> 99 #include <netinet/ip.h> 100 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */ 101 #include <netinet/in_var.h> 102 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 103 #include <netinet/in_pcb.h> 104 #include <netinet/ip_var.h> 105 #include <netinet/ip6.h> 106 #include <netinet/icmp6.h> 107 #include <netinet6/nd6.h> 108 #include <netinet6/ip6_var.h> 109 #include <netinet6/in6_pcb.h> 110 #include <netinet/tcp.h> 111 #include <netinet/tcp_fsm.h> 112 #include <netinet/tcp_seq.h> 113 #include <netinet/tcp_timer.h> 114 #include <netinet/tcp_timer2.h> 115 #include <netinet/tcp_var.h> 116 #include <netinet6/tcp6_var.h> 117 #include <netinet/tcpip.h> 118 119 #ifdef TCPDEBUG 120 #include <netinet/tcp_debug.h> 121 122 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */ 123 struct tcphdr tcp_savetcp; 124 #endif 125 126 #ifdef FAST_IPSEC 127 #include <netproto/ipsec/ipsec.h> 128 #include <netproto/ipsec/ipsec6.h> 129 #endif 130 131 #ifdef IPSEC 132 #include <netinet6/ipsec.h> 133 #include <netinet6/ipsec6.h> 134 #include <netproto/key/key.h> 135 #endif 136 137 MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry"); 138 139 static int log_in_vain = 0; 140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 141 &log_in_vain, 0, "Log all incoming TCP connections"); 142 143 static int blackhole = 0; 144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 145 &blackhole, 0, "Do not send RST when dropping refused connections"); 146 147 int tcp_delack_enabled = 1; 148 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 149 &tcp_delack_enabled, 0, 150 "Delay ACK to try and piggyback it onto a data packet"); 151 152 #ifdef TCP_DROP_SYNFIN 153 static int drop_synfin = 0; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 155 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 156 #endif 157 158 static int tcp_do_limitedtransmit = 1; 159 SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW, 160 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)"); 161 162 static int tcp_do_early_retransmit = 1; 163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW, 164 &tcp_do_early_retransmit, 0, "Early retransmit"); 165 166 int tcp_aggregate_acks = 1; 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW, 168 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack"); 169 170 static int tcp_do_eifel_detect = 1; 171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW, 172 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)"); 173 174 static int tcp_do_abc = 1; 175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW, 176 &tcp_do_abc, 0, 177 "TCP Appropriate Byte Counting (RFC 3465)"); 178 179 /* 180 * The following value actually takes range [25ms, 250ms], 181 * given that most modern systems use 1ms ~ 10ms as the unit 182 * of timestamp option. 183 */ 184 static u_int tcp_paws_tolerance = 25; 185 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, paws_tolerance, CTLFLAG_RW, 186 &tcp_paws_tolerance, 0, "RFC1323 PAWS tolerance"); 187 188 /* 189 * Define as tunable for easy testing with SACK on and off. 190 * Warning: do not change setting in the middle of an existing active TCP flow, 191 * else strange things might happen to that flow. 192 */ 193 int tcp_do_sack = 1; 194 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 195 &tcp_do_sack, 0, "Enable SACK Algorithms"); 196 197 int tcp_do_smartsack = 1; 198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW, 199 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms"); 200 201 int tcp_do_rescuesack = 1; 202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack, CTLFLAG_RW, 203 &tcp_do_rescuesack, 0, "Rescue retransmission for SACK"); 204 205 int tcp_aggressive_rescuesack = 0; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack_agg, CTLFLAG_RW, 207 &tcp_aggressive_rescuesack, 0, "Aggressive rescue retransmission for SACK"); 208 209 static int tcp_force_sackrxt = 1; 210 SYSCTL_INT(_net_inet_tcp, OID_AUTO, force_sackrxt, CTLFLAG_RW, 211 &tcp_force_sackrxt, 0, "Allowed forced SACK retransmit burst"); 212 213 int tcp_do_rfc3517bis = 1; 214 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis, CTLFLAG_RW, 215 &tcp_do_rfc3517bis, 0, "Enable RFC3517 update"); 216 217 int tcp_rfc3517bis_rxt = 0; 218 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis_rxt, CTLFLAG_RW, 219 &tcp_rfc3517bis_rxt, 0, "Enable RFC3517 retransmit update"); 220 221 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 222 "TCP Segment Reassembly Queue"); 223 224 int tcp_reass_maxseg = 0; 225 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD, 226 &tcp_reass_maxseg, 0, 227 "Global maximum number of TCP Segments in Reassembly Queue"); 228 229 int tcp_reass_qsize = 0; 230 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 231 &tcp_reass_qsize, 0, 232 "Global number of TCP Segments currently in Reassembly Queue"); 233 234 static int tcp_reass_overflows = 0; 235 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 236 &tcp_reass_overflows, 0, 237 "Global number of TCP Segment Reassembly Queue Overflows"); 238 239 int tcp_do_autorcvbuf = 1; 240 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 241 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 242 243 int tcp_autorcvbuf_inc = 16*1024; 244 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 245 &tcp_autorcvbuf_inc, 0, 246 "Incrementor step size of automatic receive buffer"); 247 248 int tcp_autorcvbuf_max = 2*1024*1024; 249 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 250 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 251 252 int tcp_sosend_agglim = 2; 253 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_agglim, CTLFLAG_RW, 254 &tcp_sosend_agglim, 0, "TCP sosend mbuf aggregation limit"); 255 256 int tcp_sosend_async = 1; 257 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_async, CTLFLAG_RW, 258 &tcp_sosend_async, 0, "TCP asynchronized pru_send"); 259 260 static int tcp_ignore_redun_dsack = 1; 261 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_redun_dsack, CTLFLAG_RW, 262 &tcp_ignore_redun_dsack, 0, "Ignore redundant DSACK"); 263 264 static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t, 265 tcp_seq); 266 static void tcp_pulloutofband(struct socket *, 267 struct tcphdr *, struct mbuf *, int); 268 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 269 struct mbuf *); 270 static void tcp_xmit_timer(struct tcpcb *, int, tcp_seq); 271 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int); 272 static void tcp_sack_rexmt(struct tcpcb *, boolean_t); 273 static boolean_t tcp_sack_limitedxmit(struct tcpcb *); 274 static int tcp_rmx_msl(const struct tcpcb *); 275 static void tcp_established(struct tcpcb *); 276 static boolean_t tcp_recv_dupack(struct tcpcb *, tcp_seq, 277 const struct tcpopt *); 278 279 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 280 #ifdef INET6 281 #define ND6_HINT(tp) \ 282 do { \ 283 if ((tp) && (tp)->t_inpcb && \ 284 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \ 285 (tp)->t_inpcb->in6p_route.ro_rt) \ 286 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \ 287 } while (0) 288 #else 289 #define ND6_HINT(tp) 290 #endif 291 292 /* 293 * Indicate whether this ack should be delayed. We can delay the ack if 294 * - delayed acks are enabled and 295 * - there is no delayed ack timer in progress and 296 * - our last ack wasn't a 0-sized window. We never want to delay 297 * the ack that opens up a 0-sized window. 298 */ 299 #define DELAY_ACK(tp) \ 300 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \ 301 !(tp->t_flags & TF_RXWIN0SENT)) 302 303 #define acceptable_window_update(tp, th, tiwin) \ 304 (SEQ_LT(tp->snd_wl1, th->th_seq) || \ 305 (tp->snd_wl1 == th->th_seq && \ 306 (SEQ_LT(tp->snd_wl2, th->th_ack) || \ 307 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) 308 309 #define iceildiv(n, d) (((n)+(d)-1) / (d)) 310 #define need_early_retransmit(tp, ownd) \ 311 (tcp_do_early_retransmit && \ 312 (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) && \ 313 ownd < ((tp->t_rxtthresh + 1) * tp->t_maxseg) && \ 314 tp->t_dupacks + 1 >= iceildiv(ownd, tp->t_maxseg) && \ 315 (!TCP_DO_SACK(tp) || ownd <= tp->t_maxseg || \ 316 tcp_sack_has_sacked(&tp->scb, ownd - tp->t_maxseg))) 317 318 /* 319 * Returns TRUE, if this segment can be merged with the last 320 * pending segment in the reassemble queue and this segment 321 * does not overlap with the pending segment immediately 322 * preceeding the last pending segment. 323 */ 324 static __inline boolean_t 325 tcp_paws_canreasslast(const struct tcpcb *tp, const struct tcphdr *th, int tlen) 326 { 327 const struct tseg_qent *last, *prev; 328 329 last = TAILQ_LAST(&tp->t_segq, tsegqe_head); 330 if (last == NULL) 331 return FALSE; 332 333 /* This segment comes immediately after the last pending segment */ 334 if (last->tqe_th->th_seq + last->tqe_len == th->th_seq) 335 return TRUE; 336 337 if (th->th_seq + tlen != last->tqe_th->th_seq) 338 return FALSE; 339 /* This segment comes immediately before the last pending segment */ 340 341 prev = TAILQ_PREV(last, tsegqe_head, tqe_q); 342 if (prev == NULL) { 343 /* 344 * No pending preceeding segment, we assume this segment 345 * could be reassembled. 346 */ 347 return TRUE; 348 } 349 350 /* This segment does not overlap with the preceeding segment */ 351 if (SEQ_GEQ(th->th_seq, prev->tqe_th->th_seq + prev->tqe_len)) 352 return TRUE; 353 354 return FALSE; 355 } 356 357 static __inline void 358 tcp_ncr_update_rxtthresh(struct tcpcb *tp) 359 { 360 int old_rxtthresh = tp->t_rxtthresh; 361 uint32_t ownd = tp->snd_max - tp->snd_una; 362 363 tp->t_rxtthresh = max(3, ((ownd / tp->t_maxseg) >> 1)); 364 if (tp->t_rxtthresh != old_rxtthresh) { 365 tcp_sack_update_lostseq(&tp->scb, tp->snd_una, 366 tp->t_maxseg, tp->t_rxtthresh); 367 } 368 } 369 370 static int 371 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 372 { 373 struct tseg_qent *q; 374 struct tseg_qent *p = NULL; 375 struct tseg_qent *te; 376 struct socket *so = tp->t_inpcb->inp_socket; 377 int flags; 378 379 /* 380 * Call with th == NULL after become established to 381 * force pre-ESTABLISHED data up to user socket. 382 */ 383 if (th == NULL) 384 goto present; 385 386 /* 387 * Limit the number of segments in the reassembly queue to prevent 388 * holding on to too many segments (and thus running out of mbufs). 389 * Make sure to let the missing segment through which caused this 390 * queue. Always keep one global queue entry spare to be able to 391 * process the missing segment. 392 */ 393 if (th->th_seq != tp->rcv_nxt && 394 tcp_reass_qsize + 1 >= tcp_reass_maxseg) { 395 tcp_reass_overflows++; 396 tcpstat.tcps_rcvmemdrop++; 397 m_freem(m); 398 /* no SACK block to report */ 399 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 400 return (0); 401 } 402 403 /* Allocate a new queue entry. */ 404 te = kmalloc(sizeof(struct tseg_qent), M_TSEGQ, M_INTWAIT | M_NULLOK); 405 if (te == NULL) { 406 tcpstat.tcps_rcvmemdrop++; 407 m_freem(m); 408 /* no SACK block to report */ 409 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 410 return (0); 411 } 412 atomic_add_int(&tcp_reass_qsize, 1); 413 414 /* 415 * Find a segment which begins after this one does. 416 */ 417 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) { 418 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 419 break; 420 p = q; 421 } 422 423 /* 424 * If there is a preceding segment, it may provide some of 425 * our data already. If so, drop the data from the incoming 426 * segment. If it provides all of our data, drop us. 427 */ 428 if (p != NULL) { 429 tcp_seq_diff_t i; 430 431 /* conversion to int (in i) handles seq wraparound */ 432 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 433 if (i > 0) { /* overlaps preceding segment */ 434 tp->sack_flags |= 435 (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG); 436 /* enclosing block starts w/ preceding segment */ 437 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 438 if (i >= *tlenp) { 439 /* preceding encloses incoming segment */ 440 tp->encloseblk.rblk_end = TCP_SACK_BLKEND( 441 p->tqe_th->th_seq + p->tqe_len, 442 p->tqe_th->th_flags); 443 tcpstat.tcps_rcvduppack++; 444 tcpstat.tcps_rcvdupbyte += *tlenp; 445 m_freem(m); 446 kfree(te, M_TSEGQ); 447 atomic_add_int(&tcp_reass_qsize, -1); 448 /* 449 * Try to present any queued data 450 * at the left window edge to the user. 451 * This is needed after the 3-WHS 452 * completes. 453 */ 454 goto present; /* ??? */ 455 } 456 m_adj(m, i); 457 *tlenp -= i; 458 th->th_seq += i; 459 /* incoming segment end is enclosing block end */ 460 tp->encloseblk.rblk_end = TCP_SACK_BLKEND( 461 th->th_seq + *tlenp, th->th_flags); 462 /* trim end of reported D-SACK block */ 463 tp->reportblk.rblk_end = th->th_seq; 464 } 465 } 466 tcpstat.tcps_rcvoopack++; 467 tcpstat.tcps_rcvoobyte += *tlenp; 468 469 /* 470 * While we overlap succeeding segments trim them or, 471 * if they are completely covered, dequeue them. 472 */ 473 while (q) { 474 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 475 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len; 476 tcp_seq qend_sack = TCP_SACK_BLKEND(qend, q->tqe_th->th_flags); 477 struct tseg_qent *nq; 478 479 if (i <= 0) 480 break; 481 if (!(tp->sack_flags & TSACK_F_DUPSEG)) { 482 /* first time through */ 483 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG); 484 tp->encloseblk = tp->reportblk; 485 /* report trailing duplicate D-SACK segment */ 486 tp->reportblk.rblk_start = q->tqe_th->th_seq; 487 } 488 if ((tp->sack_flags & TSACK_F_ENCLOSESEG) && 489 SEQ_GT(qend_sack, tp->encloseblk.rblk_end)) { 490 /* extend enclosing block if one exists */ 491 tp->encloseblk.rblk_end = qend_sack; 492 } 493 if (i < q->tqe_len) { 494 q->tqe_th->th_seq += i; 495 q->tqe_len -= i; 496 m_adj(q->tqe_m, i); 497 break; 498 } 499 500 nq = TAILQ_NEXT(q, tqe_q); 501 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 502 m_freem(q->tqe_m); 503 kfree(q, M_TSEGQ); 504 atomic_add_int(&tcp_reass_qsize, -1); 505 q = nq; 506 } 507 508 /* Insert the new segment queue entry into place. */ 509 te->tqe_m = m; 510 te->tqe_th = th; 511 te->tqe_len = *tlenp; 512 513 /* check if can coalesce with following segment */ 514 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) { 515 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len; 516 tcp_seq tend_sack = TCP_SACK_BLKEND(tend, te->tqe_th->th_flags); 517 518 te->tqe_len += q->tqe_len; 519 if (q->tqe_th->th_flags & TH_FIN) 520 te->tqe_th->th_flags |= TH_FIN; 521 m_cat(te->tqe_m, q->tqe_m); 522 tp->encloseblk.rblk_end = tend_sack; 523 /* 524 * When not reporting a duplicate segment, use 525 * the larger enclosing block as the SACK block. 526 */ 527 if (!(tp->sack_flags & TSACK_F_DUPSEG)) 528 tp->reportblk.rblk_end = tend_sack; 529 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 530 kfree(q, M_TSEGQ); 531 atomic_add_int(&tcp_reass_qsize, -1); 532 } 533 534 if (p == NULL) { 535 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q); 536 } else { 537 /* check if can coalesce with preceding segment */ 538 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) { 539 p->tqe_len += te->tqe_len; 540 m_cat(p->tqe_m, te->tqe_m); 541 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 542 /* 543 * When not reporting a duplicate segment, use 544 * the larger enclosing block as the SACK block. 545 */ 546 if (!(tp->sack_flags & TSACK_F_DUPSEG)) 547 tp->reportblk.rblk_start = p->tqe_th->th_seq; 548 kfree(te, M_TSEGQ); 549 atomic_add_int(&tcp_reass_qsize, -1); 550 } else { 551 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q); 552 } 553 } 554 555 present: 556 /* 557 * Present data to user, advancing rcv_nxt through 558 * completed sequence space. 559 */ 560 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 561 return (0); 562 q = TAILQ_FIRST(&tp->t_segq); 563 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt) 564 return (0); 565 tp->rcv_nxt += q->tqe_len; 566 if (!(tp->sack_flags & TSACK_F_DUPSEG)) { 567 /* no SACK block to report since ACK advanced */ 568 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 569 } 570 /* no enclosing block to report since ACK advanced */ 571 tp->sack_flags &= ~TSACK_F_ENCLOSESEG; 572 flags = q->tqe_th->th_flags & TH_FIN; 573 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 574 KASSERT(TAILQ_EMPTY(&tp->t_segq) || 575 TAILQ_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt, 576 ("segment not coalesced")); 577 if (so->so_state & SS_CANTRCVMORE) { 578 m_freem(q->tqe_m); 579 } else { 580 lwkt_gettoken(&so->so_rcv.ssb_token); 581 ssb_appendstream(&so->so_rcv, q->tqe_m); 582 lwkt_reltoken(&so->so_rcv.ssb_token); 583 } 584 kfree(q, M_TSEGQ); 585 atomic_add_int(&tcp_reass_qsize, -1); 586 ND6_HINT(tp); 587 sorwakeup(so); 588 return (flags); 589 } 590 591 /* 592 * TCP input routine, follows pages 65-76 of the 593 * protocol specification dated September, 1981 very closely. 594 */ 595 #ifdef INET6 596 int 597 tcp6_input(struct mbuf **mp, int *offp, int proto) 598 { 599 struct mbuf *m = *mp; 600 struct in6_ifaddr *ia6; 601 602 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 603 604 /* 605 * draft-itojun-ipv6-tcp-to-anycast 606 * better place to put this in? 607 */ 608 ia6 = ip6_getdstifaddr(m); 609 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 610 struct ip6_hdr *ip6; 611 612 ip6 = mtod(m, struct ip6_hdr *); 613 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 614 offsetof(struct ip6_hdr, ip6_dst)); 615 return (IPPROTO_DONE); 616 } 617 618 tcp_input(mp, offp, proto); 619 return (IPPROTO_DONE); 620 } 621 #endif 622 623 int 624 tcp_input(struct mbuf **mp, int *offp, int proto) 625 { 626 int off0; 627 struct tcphdr *th; 628 struct ip *ip = NULL; 629 struct ipovly *ipov; 630 struct inpcb *inp = NULL; 631 u_char *optp = NULL; 632 int optlen = 0; 633 int tlen, off; 634 int len = 0; 635 int drop_hdrlen; 636 struct tcpcb *tp = NULL; 637 int thflags; 638 struct socket *so = NULL; 639 int todrop, acked; 640 boolean_t ourfinisacked, needoutput = FALSE, delayed_dupack = FALSE; 641 tcp_seq th_dupack = 0; /* XXX gcc warning */ 642 u_long tiwin; 643 int recvwin; 644 struct tcpopt to; /* options in this segment */ 645 struct sockaddr_in *next_hop = NULL; 646 int rstreason; /* For badport_bandlim accounting purposes */ 647 int cpu; 648 struct ip6_hdr *ip6 = NULL; 649 struct mbuf *m; 650 #ifdef INET6 651 boolean_t isipv6; 652 #else 653 const boolean_t isipv6 = FALSE; 654 #endif 655 #ifdef TCPDEBUG 656 short ostate = 0; 657 #endif 658 659 off0 = *offp; 660 m = *mp; 661 *mp = NULL; 662 663 tcpstat.tcps_rcvtotal++; 664 665 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 666 struct m_tag *mtag; 667 668 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 669 KKASSERT(mtag != NULL); 670 next_hop = m_tag_data(mtag); 671 } 672 673 #ifdef INET6 674 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE; 675 #endif 676 677 if (isipv6) { 678 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 679 ip6 = mtod(m, struct ip6_hdr *); 680 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0; 681 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 682 tcpstat.tcps_rcvbadsum++; 683 goto drop; 684 } 685 th = (struct tcphdr *)((caddr_t)ip6 + off0); 686 687 /* 688 * Be proactive about unspecified IPv6 address in source. 689 * As we use all-zero to indicate unbounded/unconnected pcb, 690 * unspecified IPv6 address can be used to confuse us. 691 * 692 * Note that packets with unspecified IPv6 destination is 693 * already dropped in ip6_input. 694 */ 695 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 696 /* XXX stat */ 697 goto drop; 698 } 699 } else { 700 /* 701 * Get IP and TCP header together in first mbuf. 702 * Note: IP leaves IP header in first mbuf. 703 */ 704 if (off0 > sizeof(struct ip)) { 705 ip_stripoptions(m); 706 off0 = sizeof(struct ip); 707 } 708 /* already checked and pulled up in ip_demux() */ 709 KASSERT(m->m_len >= sizeof(struct tcpiphdr), 710 ("TCP header not in one mbuf: m->m_len %d", m->m_len)); 711 ip = mtod(m, struct ip *); 712 ipov = (struct ipovly *)ip; 713 th = (struct tcphdr *)((caddr_t)ip + off0); 714 tlen = ip->ip_len; 715 716 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 717 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 718 th->th_sum = m->m_pkthdr.csum_data; 719 else 720 th->th_sum = in_pseudo(ip->ip_src.s_addr, 721 ip->ip_dst.s_addr, 722 htonl(m->m_pkthdr.csum_data + 723 ip->ip_len + 724 IPPROTO_TCP)); 725 th->th_sum ^= 0xffff; 726 } else { 727 /* 728 * Checksum extended TCP header and data. 729 */ 730 len = sizeof(struct ip) + tlen; 731 bzero(ipov->ih_x1, sizeof ipov->ih_x1); 732 ipov->ih_len = (u_short)tlen; 733 ipov->ih_len = htons(ipov->ih_len); 734 th->th_sum = in_cksum(m, len); 735 } 736 if (th->th_sum) { 737 tcpstat.tcps_rcvbadsum++; 738 goto drop; 739 } 740 #ifdef INET6 741 /* Re-initialization for later version check */ 742 ip->ip_v = IPVERSION; 743 #endif 744 } 745 746 /* 747 * Check that TCP offset makes sense, 748 * pull out TCP options and adjust length. XXX 749 */ 750 off = th->th_off << 2; 751 /* already checked and pulled up in ip_demux() */ 752 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen, 753 ("bad TCP data offset %d (tlen %d)", off, tlen)); 754 tlen -= off; /* tlen is used instead of ti->ti_len */ 755 if (off > sizeof(struct tcphdr)) { 756 if (isipv6) { 757 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); 758 ip6 = mtod(m, struct ip6_hdr *); 759 th = (struct tcphdr *)((caddr_t)ip6 + off0); 760 } else { 761 /* already pulled up in ip_demux() */ 762 KASSERT(m->m_len >= sizeof(struct ip) + off, 763 ("TCP header and options not in one mbuf: " 764 "m_len %d, off %d", m->m_len, off)); 765 } 766 optlen = off - sizeof(struct tcphdr); 767 optp = (u_char *)(th + 1); 768 } 769 thflags = th->th_flags; 770 771 #ifdef TCP_DROP_SYNFIN 772 /* 773 * If the drop_synfin option is enabled, drop all packets with 774 * both the SYN and FIN bits set. This prevents e.g. nmap from 775 * identifying the TCP/IP stack. 776 * 777 * This is a violation of the TCP specification. 778 */ 779 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) 780 goto drop; 781 #endif 782 783 /* 784 * Convert TCP protocol specific fields to host format. 785 */ 786 th->th_seq = ntohl(th->th_seq); 787 th->th_ack = ntohl(th->th_ack); 788 th->th_win = ntohs(th->th_win); 789 th->th_urp = ntohs(th->th_urp); 790 791 /* 792 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options, 793 * until after ip6_savecontrol() is called and before other functions 794 * which don't want those proto headers. 795 * Because ip6_savecontrol() is going to parse the mbuf to 796 * search for data to be passed up to user-land, it wants mbuf 797 * parameters to be unchanged. 798 * XXX: the call of ip6_savecontrol() has been obsoleted based on 799 * latest version of the advanced API (20020110). 800 */ 801 drop_hdrlen = off0 + off; 802 803 /* 804 * Locate pcb for segment. 805 */ 806 findpcb: 807 /* IPFIREWALL_FORWARD section */ 808 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */ 809 /* 810 * Transparently forwarded. Pretend to be the destination. 811 * already got one like this? 812 */ 813 cpu = mycpu->gd_cpuid; 814 inp = in_pcblookup_hash(&tcbinfo[cpu], 815 ip->ip_src, th->th_sport, 816 ip->ip_dst, th->th_dport, 817 0, m->m_pkthdr.rcvif); 818 if (!inp) { 819 /* 820 * It's new. Try to find the ambushing socket. 821 */ 822 823 /* 824 * The rest of the ipfw code stores the port in 825 * host order. XXX 826 * (The IP address is still in network order.) 827 */ 828 in_port_t dport = next_hop->sin_port ? 829 htons(next_hop->sin_port) : 830 th->th_dport; 831 832 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport, 833 next_hop->sin_addr.s_addr, dport); 834 inp = in_pcblookup_hash(&tcbinfo[cpu], 835 ip->ip_src, th->th_sport, 836 next_hop->sin_addr, dport, 837 1, m->m_pkthdr.rcvif); 838 } 839 } else { 840 if (isipv6) { 841 inp = in6_pcblookup_hash(&tcbinfo[0], 842 &ip6->ip6_src, th->th_sport, 843 &ip6->ip6_dst, th->th_dport, 844 1, m->m_pkthdr.rcvif); 845 } else { 846 cpu = mycpu->gd_cpuid; 847 inp = in_pcblookup_hash(&tcbinfo[cpu], 848 ip->ip_src, th->th_sport, 849 ip->ip_dst, th->th_dport, 850 1, m->m_pkthdr.rcvif); 851 } 852 } 853 854 /* 855 * If the state is CLOSED (i.e., TCB does not exist) then 856 * all data in the incoming segment is discarded. 857 * If the TCB exists but is in CLOSED state, it is embryonic, 858 * but should either do a listen or a connect soon. 859 */ 860 if (inp == NULL) { 861 if (log_in_vain) { 862 #ifdef INET6 863 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 864 #else 865 char dbuf[sizeof "aaa.bbb.ccc.ddd"]; 866 char sbuf[sizeof "aaa.bbb.ccc.ddd"]; 867 #endif 868 if (isipv6) { 869 strcpy(dbuf, "["); 870 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst)); 871 strcat(dbuf, "]"); 872 strcpy(sbuf, "["); 873 strcat(sbuf, ip6_sprintf(&ip6->ip6_src)); 874 strcat(sbuf, "]"); 875 } else { 876 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 877 strcpy(sbuf, inet_ntoa(ip->ip_src)); 878 } 879 switch (log_in_vain) { 880 case 1: 881 if (!(thflags & TH_SYN)) 882 break; 883 case 2: 884 log(LOG_INFO, 885 "Connection attempt to TCP %s:%d " 886 "from %s:%d flags:0x%02x\n", 887 dbuf, ntohs(th->th_dport), sbuf, 888 ntohs(th->th_sport), thflags); 889 break; 890 default: 891 break; 892 } 893 } 894 if (blackhole) { 895 switch (blackhole) { 896 case 1: 897 if (thflags & TH_SYN) 898 goto drop; 899 break; 900 case 2: 901 goto drop; 902 default: 903 goto drop; 904 } 905 } 906 rstreason = BANDLIM_RST_CLOSEDPORT; 907 goto dropwithreset; 908 } 909 910 #ifdef IPSEC 911 if (isipv6) { 912 if (ipsec6_in_reject_so(m, inp->inp_socket)) { 913 ipsec6stat.in_polvio++; 914 goto drop; 915 } 916 } else { 917 if (ipsec4_in_reject_so(m, inp->inp_socket)) { 918 ipsecstat.in_polvio++; 919 goto drop; 920 } 921 } 922 #endif 923 #ifdef FAST_IPSEC 924 if (isipv6) { 925 if (ipsec6_in_reject(m, inp)) 926 goto drop; 927 } else { 928 if (ipsec4_in_reject(m, inp)) 929 goto drop; 930 } 931 #endif 932 /* Check the minimum TTL for socket. */ 933 #ifdef INET6 934 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl) 935 goto drop; 936 #endif 937 938 tp = intotcpcb(inp); 939 if (tp == NULL) { 940 rstreason = BANDLIM_RST_CLOSEDPORT; 941 goto dropwithreset; 942 } 943 if (tp->t_state <= TCPS_CLOSED) 944 goto drop; 945 946 so = inp->inp_socket; 947 948 #ifdef TCPDEBUG 949 if (so->so_options & SO_DEBUG) { 950 ostate = tp->t_state; 951 if (isipv6) 952 bcopy(ip6, tcp_saveipgen, sizeof(*ip6)); 953 else 954 bcopy(ip, tcp_saveipgen, sizeof(*ip)); 955 tcp_savetcp = *th; 956 } 957 #endif 958 959 bzero(&to, sizeof to); 960 961 if (so->so_options & SO_ACCEPTCONN) { 962 struct in_conninfo inc; 963 964 #ifdef INET6 965 inc.inc_isipv6 = (isipv6 == TRUE); 966 #endif 967 if (isipv6) { 968 inc.inc6_faddr = ip6->ip6_src; 969 inc.inc6_laddr = ip6->ip6_dst; 970 inc.inc6_route.ro_rt = NULL; /* XXX */ 971 } else { 972 inc.inc_faddr = ip->ip_src; 973 inc.inc_laddr = ip->ip_dst; 974 inc.inc_route.ro_rt = NULL; /* XXX */ 975 } 976 inc.inc_fport = th->th_sport; 977 inc.inc_lport = th->th_dport; 978 979 /* 980 * If the state is LISTEN then ignore segment if it contains 981 * a RST. If the segment contains an ACK then it is bad and 982 * send a RST. If it does not contain a SYN then it is not 983 * interesting; drop it. 984 * 985 * If the state is SYN_RECEIVED (syncache) and seg contains 986 * an ACK, but not for our SYN/ACK, send a RST. If the seg 987 * contains a RST, check the sequence number to see if it 988 * is a valid reset segment. 989 */ 990 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { 991 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) { 992 if (!syncache_expand(&inc, th, &so, m)) { 993 /* 994 * No syncache entry, or ACK was not 995 * for our SYN/ACK. Send a RST. 996 */ 997 tcpstat.tcps_badsyn++; 998 rstreason = BANDLIM_RST_OPENPORT; 999 goto dropwithreset; 1000 } 1001 1002 /* 1003 * Could not complete 3-way handshake, 1004 * connection is being closed down, and 1005 * syncache will free mbuf. 1006 */ 1007 if (so == NULL) 1008 return(IPPROTO_DONE); 1009 1010 /* 1011 * We must be in the correct protocol thread 1012 * for this connection. 1013 */ 1014 KKASSERT(so->so_port == &curthread->td_msgport); 1015 1016 /* 1017 * Socket is created in state SYN_RECEIVED. 1018 * Continue processing segment. 1019 */ 1020 inp = so->so_pcb; 1021 tp = intotcpcb(inp); 1022 /* 1023 * This is what would have happened in 1024 * tcp_output() when the SYN,ACK was sent. 1025 */ 1026 tp->snd_up = tp->snd_una; 1027 tp->snd_max = tp->snd_nxt = tp->iss + 1; 1028 tp->last_ack_sent = tp->rcv_nxt; 1029 1030 goto after_listen; 1031 } 1032 if (thflags & TH_RST) { 1033 syncache_chkrst(&inc, th); 1034 goto drop; 1035 } 1036 if (thflags & TH_ACK) { 1037 syncache_badack(&inc); 1038 tcpstat.tcps_badsyn++; 1039 rstreason = BANDLIM_RST_OPENPORT; 1040 goto dropwithreset; 1041 } 1042 goto drop; 1043 } 1044 1045 /* 1046 * Segment's flags are (SYN) or (SYN | FIN). 1047 */ 1048 #ifdef INET6 1049 /* 1050 * If deprecated address is forbidden, 1051 * we do not accept SYN to deprecated interface 1052 * address to prevent any new inbound connection from 1053 * getting established. 1054 * When we do not accept SYN, we send a TCP RST, 1055 * with deprecated source address (instead of dropping 1056 * it). We compromise it as it is much better for peer 1057 * to send a RST, and RST will be the final packet 1058 * for the exchange. 1059 * 1060 * If we do not forbid deprecated addresses, we accept 1061 * the SYN packet. RFC2462 does not suggest dropping 1062 * SYN in this case. 1063 * If we decipher RFC2462 5.5.4, it says like this: 1064 * 1. use of deprecated addr with existing 1065 * communication is okay - "SHOULD continue to be 1066 * used" 1067 * 2. use of it with new communication: 1068 * (2a) "SHOULD NOT be used if alternate address 1069 * with sufficient scope is available" 1070 * (2b) nothing mentioned otherwise. 1071 * Here we fall into (2b) case as we have no choice in 1072 * our source address selection - we must obey the peer. 1073 * 1074 * The wording in RFC2462 is confusing, and there are 1075 * multiple description text for deprecated address 1076 * handling - worse, they are not exactly the same. 1077 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1078 */ 1079 if (isipv6 && !ip6_use_deprecated) { 1080 struct in6_ifaddr *ia6; 1081 1082 if ((ia6 = ip6_getdstifaddr(m)) && 1083 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1084 tp = NULL; 1085 rstreason = BANDLIM_RST_OPENPORT; 1086 goto dropwithreset; 1087 } 1088 } 1089 #endif 1090 /* 1091 * If it is from this socket, drop it, it must be forged. 1092 * Don't bother responding if the destination was a broadcast. 1093 */ 1094 if (th->th_dport == th->th_sport) { 1095 if (isipv6) { 1096 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, 1097 &ip6->ip6_src)) 1098 goto drop; 1099 } else { 1100 if (ip->ip_dst.s_addr == ip->ip_src.s_addr) 1101 goto drop; 1102 } 1103 } 1104 /* 1105 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN 1106 * 1107 * Note that it is quite possible to receive unicast 1108 * link-layer packets with a broadcast IP address. Use 1109 * in_broadcast() to find them. 1110 */ 1111 if (m->m_flags & (M_BCAST | M_MCAST)) 1112 goto drop; 1113 if (isipv6) { 1114 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1115 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 1116 goto drop; 1117 } else { 1118 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1119 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1120 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1121 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 1122 goto drop; 1123 } 1124 /* 1125 * SYN appears to be valid; create compressed TCP state 1126 * for syncache, or perform t/tcp connection. 1127 */ 1128 if (so->so_qlen <= so->so_qlimit) { 1129 tcp_dooptions(&to, optp, optlen, TRUE, th->th_ack); 1130 if (!syncache_add(&inc, &to, th, so, m)) 1131 goto drop; 1132 1133 /* 1134 * Entry added to syncache, mbuf used to 1135 * send SYN,ACK packet. 1136 */ 1137 return(IPPROTO_DONE); 1138 } 1139 goto drop; 1140 } 1141 1142 after_listen: 1143 /* 1144 * Should not happen - syncache should pick up these connections. 1145 * 1146 * Once we are past handling listen sockets we must be in the 1147 * correct protocol processing thread. 1148 */ 1149 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state")); 1150 KKASSERT(so->so_port == &curthread->td_msgport); 1151 1152 /* Unscale the window into a 32-bit value. */ 1153 if (!(thflags & TH_SYN)) 1154 tiwin = th->th_win << tp->snd_scale; 1155 else 1156 tiwin = th->th_win; 1157 1158 /* 1159 * This is the second part of the MSS DoS prevention code (after 1160 * minmss on the sending side) and it deals with too many too small 1161 * tcp packets in a too short timeframe (1 second). 1162 * 1163 * XXX Removed. This code was crap. It does not scale to network 1164 * speed, and default values break NFS. Gone. 1165 */ 1166 /* REMOVED */ 1167 1168 /* 1169 * Segment received on connection. 1170 * 1171 * Reset idle time and keep-alive timer. Don't waste time if less 1172 * then a second has elapsed. 1173 */ 1174 if ((int)(ticks - tp->t_rcvtime) > hz) 1175 tcp_timer_keep_activity(tp, thflags); 1176 1177 /* 1178 * Process options. 1179 * XXX this is tradtitional behavior, may need to be cleaned up. 1180 */ 1181 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0, th->th_ack); 1182 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1183 if ((to.to_flags & TOF_SCALE) && (tp->t_flags & TF_REQ_SCALE)) { 1184 tp->t_flags |= TF_RCVD_SCALE; 1185 tp->snd_scale = to.to_requested_s_scale; 1186 } 1187 1188 /* 1189 * Initial send window; will be updated upon next ACK 1190 */ 1191 tp->snd_wnd = th->th_win; 1192 1193 if (to.to_flags & TOF_TS) { 1194 tp->t_flags |= TF_RCVD_TSTMP; 1195 tp->ts_recent = to.to_tsval; 1196 tp->ts_recent_age = ticks; 1197 } 1198 if (!(to.to_flags & TOF_MSS)) 1199 to.to_mss = 0; 1200 tcp_mss(tp, to.to_mss); 1201 /* 1202 * Only set the TF_SACK_PERMITTED per-connection flag 1203 * if we got a SACK_PERMITTED option from the other side 1204 * and the global tcp_do_sack variable is true. 1205 */ 1206 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED)) 1207 tp->t_flags |= TF_SACK_PERMITTED; 1208 } 1209 1210 /* 1211 * Header prediction: check for the two common cases 1212 * of a uni-directional data xfer. If the packet has 1213 * no control flags, is in-sequence, the window didn't 1214 * change and we're not retransmitting, it's a 1215 * candidate. If the length is zero and the ack moved 1216 * forward, we're the sender side of the xfer. Just 1217 * free the data acked & wake any higher level process 1218 * that was blocked waiting for space. If the length 1219 * is non-zero and the ack didn't move, we're the 1220 * receiver side. If we're getting packets in-order 1221 * (the reassembly queue is empty), add the data to 1222 * the socket buffer and note that we need a delayed ack. 1223 * Make sure that the hidden state-flags are also off. 1224 * Since we check for TCPS_ESTABLISHED above, it can only 1225 * be TH_NEEDSYN. 1226 */ 1227 if (tp->t_state == TCPS_ESTABLISHED && 1228 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1229 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && 1230 (!(to.to_flags & TOF_TS) || 1231 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1232 th->th_seq == tp->rcv_nxt && 1233 tp->snd_nxt == tp->snd_max) { 1234 1235 /* 1236 * If last ACK falls within this segment's sequence numbers, 1237 * record the timestamp. 1238 * NOTE that the test is modified according to the latest 1239 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1240 */ 1241 if ((to.to_flags & TOF_TS) && 1242 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1243 tp->ts_recent_age = ticks; 1244 tp->ts_recent = to.to_tsval; 1245 } 1246 1247 if (tlen == 0) { 1248 if (SEQ_GT(th->th_ack, tp->snd_una) && 1249 SEQ_LEQ(th->th_ack, tp->snd_max) && 1250 tp->snd_cwnd >= tp->snd_wnd && 1251 !IN_FASTRECOVERY(tp)) { 1252 /* 1253 * This is a pure ack for outstanding data. 1254 */ 1255 ++tcpstat.tcps_predack; 1256 /* 1257 * "bad retransmit" recovery 1258 * 1259 * If Eifel detection applies, then 1260 * it is deterministic, so use it 1261 * unconditionally over the old heuristic. 1262 * Otherwise, fall back to the old heuristic. 1263 */ 1264 if (tcp_do_eifel_detect && 1265 (to.to_flags & TOF_TS) && to.to_tsecr && 1266 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) { 1267 /* Eifel detection applicable. */ 1268 if (to.to_tsecr < tp->t_rexmtTS) { 1269 tcp_revert_congestion_state(tp); 1270 ++tcpstat.tcps_eifeldetected; 1271 if (tp->t_rxtshift != 1 || 1272 ticks >= tp->t_badrxtwin) 1273 ++tcpstat.tcps_rttcantdetect; 1274 } 1275 } else if (tp->t_rxtshift == 1 && 1276 ticks < tp->t_badrxtwin) { 1277 tcp_revert_congestion_state(tp); 1278 ++tcpstat.tcps_rttdetected; 1279 } 1280 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK | 1281 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT); 1282 /* 1283 * Recalculate the retransmit timer / rtt. 1284 * 1285 * Some machines (certain windows boxes) 1286 * send broken timestamp replies during the 1287 * SYN+ACK phase, ignore timestamps of 0. 1288 */ 1289 if ((to.to_flags & TOF_TS) && to.to_tsecr) { 1290 tcp_xmit_timer(tp, 1291 ticks - to.to_tsecr + 1, 1292 th->th_ack); 1293 } else if (tp->t_rtttime && 1294 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1295 tcp_xmit_timer(tp, 1296 ticks - tp->t_rtttime, 1297 th->th_ack); 1298 } 1299 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1300 acked = th->th_ack - tp->snd_una; 1301 tcpstat.tcps_rcvackpack++; 1302 tcpstat.tcps_rcvackbyte += acked; 1303 sbdrop(&so->so_snd.sb, acked); 1304 tp->snd_recover = th->th_ack - 1; 1305 tp->snd_una = th->th_ack; 1306 tp->t_dupacks = 0; 1307 /* 1308 * Update window information. 1309 */ 1310 if (tiwin != tp->snd_wnd && 1311 acceptable_window_update(tp, th, tiwin)) { 1312 /* keep track of pure window updates */ 1313 if (tp->snd_wl2 == th->th_ack && 1314 tiwin > tp->snd_wnd) 1315 tcpstat.tcps_rcvwinupd++; 1316 tp->snd_wnd = tiwin; 1317 tp->snd_wl1 = th->th_seq; 1318 tp->snd_wl2 = th->th_ack; 1319 if (tp->snd_wnd > tp->max_sndwnd) 1320 tp->max_sndwnd = tp->snd_wnd; 1321 } 1322 m_freem(m); 1323 ND6_HINT(tp); /* some progress has been done */ 1324 /* 1325 * If all outstanding data are acked, stop 1326 * retransmit timer, otherwise restart timer 1327 * using current (possibly backed-off) value. 1328 * If process is waiting for space, 1329 * wakeup/selwakeup/signal. If data 1330 * are ready to send, let tcp_output 1331 * decide between more output or persist. 1332 */ 1333 if (tp->snd_una == tp->snd_max) { 1334 tcp_callout_stop(tp, tp->tt_rexmt); 1335 } else if (!tcp_callout_active(tp, 1336 tp->tt_persist)) { 1337 tcp_callout_reset(tp, tp->tt_rexmt, 1338 tp->t_rxtcur, tcp_timer_rexmt); 1339 } 1340 sowwakeup(so); 1341 if (so->so_snd.ssb_cc > 0) 1342 tcp_output(tp); 1343 return(IPPROTO_DONE); 1344 } 1345 } else if (tiwin == tp->snd_wnd && 1346 th->th_ack == tp->snd_una && 1347 TAILQ_EMPTY(&tp->t_segq) && 1348 tlen <= ssb_space(&so->so_rcv)) { 1349 u_long newsize = 0; /* automatic sockbuf scaling */ 1350 /* 1351 * This is a pure, in-sequence data packet 1352 * with nothing on the reassembly queue and 1353 * we have enough buffer space to take it. 1354 */ 1355 ++tcpstat.tcps_preddat; 1356 tp->rcv_nxt += tlen; 1357 tcpstat.tcps_rcvpack++; 1358 tcpstat.tcps_rcvbyte += tlen; 1359 ND6_HINT(tp); /* some progress has been done */ 1360 /* 1361 * Automatic sizing of receive socket buffer. Often the send 1362 * buffer size is not optimally adjusted to the actual network 1363 * conditions at hand (delay bandwidth product). Setting the 1364 * buffer size too small limits throughput on links with high 1365 * bandwidth and high delay (eg. trans-continental/oceanic links). 1366 * 1367 * On the receive side the socket buffer memory is only rarely 1368 * used to any significant extent. This allows us to be much 1369 * more aggressive in scaling the receive socket buffer. For 1370 * the case that the buffer space is actually used to a large 1371 * extent and we run out of kernel memory we can simply drop 1372 * the new segments; TCP on the sender will just retransmit it 1373 * later. Setting the buffer size too big may only consume too 1374 * much kernel memory if the application doesn't read() from 1375 * the socket or packet loss or reordering makes use of the 1376 * reassembly queue. 1377 * 1378 * The criteria to step up the receive buffer one notch are: 1379 * 1. the number of bytes received during the time it takes 1380 * one timestamp to be reflected back to us (the RTT); 1381 * 2. received bytes per RTT is within seven eighth of the 1382 * current socket buffer size; 1383 * 3. receive buffer size has not hit maximal automatic size; 1384 * 1385 * This algorithm does one step per RTT at most and only if 1386 * we receive a bulk stream w/o packet losses or reorderings. 1387 * Shrinking the buffer during idle times is not necessary as 1388 * it doesn't consume any memory when idle. 1389 * 1390 * TODO: Only step up if the application is actually serving 1391 * the buffer to better manage the socket buffer resources. 1392 */ 1393 if (tcp_do_autorcvbuf && 1394 to.to_tsecr && 1395 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) { 1396 if (to.to_tsecr > tp->rfbuf_ts && 1397 to.to_tsecr - tp->rfbuf_ts < hz) { 1398 if (tp->rfbuf_cnt > 1399 (so->so_rcv.ssb_hiwat / 8 * 7) && 1400 so->so_rcv.ssb_hiwat < 1401 tcp_autorcvbuf_max) { 1402 newsize = 1403 ulmin(so->so_rcv.ssb_hiwat + 1404 tcp_autorcvbuf_inc, 1405 tcp_autorcvbuf_max); 1406 } 1407 /* Start over with next RTT. */ 1408 tp->rfbuf_ts = 0; 1409 tp->rfbuf_cnt = 0; 1410 } else 1411 tp->rfbuf_cnt += tlen; /* add up */ 1412 } 1413 /* 1414 * Add data to socket buffer. 1415 */ 1416 if (so->so_state & SS_CANTRCVMORE) { 1417 m_freem(m); 1418 } else { 1419 /* 1420 * Set new socket buffer size, give up when 1421 * limit is reached. 1422 * 1423 * Adjusting the size can mess up ACK 1424 * sequencing when pure window updates are 1425 * being avoided (which is the default), 1426 * so force an ack. 1427 */ 1428 lwkt_gettoken(&so->so_rcv.ssb_token); 1429 if (newsize) { 1430 tp->t_flags |= TF_RXRESIZED; 1431 if (!ssb_reserve(&so->so_rcv, newsize, 1432 so, NULL)) { 1433 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 1434 } 1435 if (newsize >= 1436 (TCP_MAXWIN << tp->rcv_scale)) { 1437 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 1438 } 1439 } 1440 m_adj(m, drop_hdrlen); /* delayed header drop */ 1441 ssb_appendstream(&so->so_rcv, m); 1442 lwkt_reltoken(&so->so_rcv.ssb_token); 1443 } 1444 sorwakeup(so); 1445 /* 1446 * This code is responsible for most of the ACKs 1447 * the TCP stack sends back after receiving a data 1448 * packet. Note that the DELAY_ACK check fails if 1449 * the delack timer is already running, which results 1450 * in an ack being sent every other packet (which is 1451 * what we want). 1452 * 1453 * We then further aggregate acks by not actually 1454 * sending one until the protocol thread has completed 1455 * processing the current backlog of packets. This 1456 * does not delay the ack any further, but allows us 1457 * to take advantage of the packet aggregation that 1458 * high speed NICs do (usually blocks of 8-10 packets) 1459 * to send a single ack rather then four or five acks, 1460 * greatly reducing the ack rate, the return channel 1461 * bandwidth, and the protocol overhead on both ends. 1462 * 1463 * Since this also has the effect of slowing down 1464 * the exponential slow-start ramp-up, systems with 1465 * very large bandwidth-delay products might want 1466 * to turn the feature off. 1467 */ 1468 if (DELAY_ACK(tp)) { 1469 tcp_callout_reset(tp, tp->tt_delack, 1470 tcp_delacktime, tcp_timer_delack); 1471 } else if (tcp_aggregate_acks) { 1472 tp->t_flags |= TF_ACKNOW; 1473 if (!(tp->t_flags & TF_ONOUTPUTQ)) { 1474 tp->t_flags |= TF_ONOUTPUTQ; 1475 tp->tt_cpu = mycpu->gd_cpuid; 1476 TAILQ_INSERT_TAIL( 1477 &tcpcbackq[tp->tt_cpu], 1478 tp, t_outputq); 1479 } 1480 } else { 1481 tp->t_flags |= TF_ACKNOW; 1482 tcp_output(tp); 1483 } 1484 return(IPPROTO_DONE); 1485 } 1486 } 1487 1488 /* 1489 * Calculate amount of space in receive window, 1490 * and then do TCP input processing. 1491 * Receive window is amount of space in rcv queue, 1492 * but not less than advertised window. 1493 */ 1494 recvwin = ssb_space(&so->so_rcv); 1495 if (recvwin < 0) 1496 recvwin = 0; 1497 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt)); 1498 1499 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1500 tp->rfbuf_ts = 0; 1501 tp->rfbuf_cnt = 0; 1502 1503 switch (tp->t_state) { 1504 /* 1505 * If the state is SYN_RECEIVED: 1506 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1507 */ 1508 case TCPS_SYN_RECEIVED: 1509 if ((thflags & TH_ACK) && 1510 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1511 SEQ_GT(th->th_ack, tp->snd_max))) { 1512 rstreason = BANDLIM_RST_OPENPORT; 1513 goto dropwithreset; 1514 } 1515 break; 1516 1517 /* 1518 * If the state is SYN_SENT: 1519 * if seg contains an ACK, but not for our SYN, drop the input. 1520 * if seg contains a RST, then drop the connection. 1521 * if seg does not contain SYN, then drop it. 1522 * Otherwise this is an acceptable SYN segment 1523 * initialize tp->rcv_nxt and tp->irs 1524 * if seg contains ack then advance tp->snd_una 1525 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1526 * arrange for segment to be acked (eventually) 1527 * continue processing rest of data/controls, beginning with URG 1528 */ 1529 case TCPS_SYN_SENT: 1530 if ((thflags & TH_ACK) && 1531 (SEQ_LEQ(th->th_ack, tp->iss) || 1532 SEQ_GT(th->th_ack, tp->snd_max))) { 1533 rstreason = BANDLIM_UNLIMITED; 1534 goto dropwithreset; 1535 } 1536 if (thflags & TH_RST) { 1537 if (thflags & TH_ACK) 1538 tp = tcp_drop(tp, ECONNREFUSED); 1539 goto drop; 1540 } 1541 if (!(thflags & TH_SYN)) 1542 goto drop; 1543 1544 tp->irs = th->th_seq; 1545 tcp_rcvseqinit(tp); 1546 if (thflags & TH_ACK) { 1547 /* Our SYN was acked. */ 1548 tcpstat.tcps_connects++; 1549 soisconnected(so); 1550 /* Do window scaling on this connection? */ 1551 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1552 (TF_RCVD_SCALE | TF_REQ_SCALE)) 1553 tp->rcv_scale = tp->request_r_scale; 1554 tp->rcv_adv += tp->rcv_wnd; 1555 tp->snd_una++; /* SYN is acked */ 1556 tcp_callout_stop(tp, tp->tt_rexmt); 1557 /* 1558 * If there's data, delay ACK; if there's also a FIN 1559 * ACKNOW will be turned on later. 1560 */ 1561 if (DELAY_ACK(tp) && tlen != 0) { 1562 tcp_callout_reset(tp, tp->tt_delack, 1563 tcp_delacktime, tcp_timer_delack); 1564 } else { 1565 tp->t_flags |= TF_ACKNOW; 1566 } 1567 /* 1568 * Received <SYN,ACK> in SYN_SENT[*] state. 1569 * Transitions: 1570 * SYN_SENT --> ESTABLISHED 1571 * SYN_SENT* --> FIN_WAIT_1 1572 */ 1573 tp->t_starttime = ticks; 1574 if (tp->t_flags & TF_NEEDFIN) { 1575 tp->t_state = TCPS_FIN_WAIT_1; 1576 tp->t_flags &= ~TF_NEEDFIN; 1577 thflags &= ~TH_SYN; 1578 } else { 1579 tcp_established(tp); 1580 } 1581 } else { 1582 /* 1583 * Received initial SYN in SYN-SENT[*] state => 1584 * simultaneous open. 1585 * Do 3-way handshake: 1586 * SYN-SENT -> SYN-RECEIVED 1587 * SYN-SENT* -> SYN-RECEIVED* 1588 */ 1589 tp->t_flags |= TF_ACKNOW; 1590 tcp_callout_stop(tp, tp->tt_rexmt); 1591 tp->t_state = TCPS_SYN_RECEIVED; 1592 } 1593 1594 /* 1595 * Advance th->th_seq to correspond to first data byte. 1596 * If data, trim to stay within window, 1597 * dropping FIN if necessary. 1598 */ 1599 th->th_seq++; 1600 if (tlen > tp->rcv_wnd) { 1601 todrop = tlen - tp->rcv_wnd; 1602 m_adj(m, -todrop); 1603 tlen = tp->rcv_wnd; 1604 thflags &= ~TH_FIN; 1605 tcpstat.tcps_rcvpackafterwin++; 1606 tcpstat.tcps_rcvbyteafterwin += todrop; 1607 } 1608 tp->snd_wl1 = th->th_seq - 1; 1609 tp->rcv_up = th->th_seq; 1610 /* 1611 * Client side of transaction: already sent SYN and data. 1612 * If the remote host used T/TCP to validate the SYN, 1613 * our data will be ACK'd; if so, enter normal data segment 1614 * processing in the middle of step 5, ack processing. 1615 * Otherwise, goto step 6. 1616 */ 1617 if (thflags & TH_ACK) 1618 goto process_ACK; 1619 1620 goto step6; 1621 1622 /* 1623 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1624 * do normal processing (we no longer bother with T/TCP). 1625 */ 1626 case TCPS_LAST_ACK: 1627 case TCPS_CLOSING: 1628 case TCPS_TIME_WAIT: 1629 break; /* continue normal processing */ 1630 } 1631 1632 /* 1633 * States other than LISTEN or SYN_SENT. 1634 * First check the RST flag and sequence number since reset segments 1635 * are exempt from the timestamp and connection count tests. This 1636 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1637 * below which allowed reset segments in half the sequence space 1638 * to fall though and be processed (which gives forged reset 1639 * segments with a random sequence number a 50 percent chance of 1640 * killing a connection). 1641 * Then check timestamp, if present. 1642 * Then check the connection count, if present. 1643 * Then check that at least some bytes of segment are within 1644 * receive window. If segment begins before rcv_nxt, 1645 * drop leading data (and SYN); if nothing left, just ack. 1646 * 1647 * 1648 * If the RST bit is set, check the sequence number to see 1649 * if this is a valid reset segment. 1650 * RFC 793 page 37: 1651 * In all states except SYN-SENT, all reset (RST) segments 1652 * are validated by checking their SEQ-fields. A reset is 1653 * valid if its sequence number is in the window. 1654 * Note: this does not take into account delayed ACKs, so 1655 * we should test against last_ack_sent instead of rcv_nxt. 1656 * The sequence number in the reset segment is normally an 1657 * echo of our outgoing acknowledgement numbers, but some hosts 1658 * send a reset with the sequence number at the rightmost edge 1659 * of our receive window, and we have to handle this case. 1660 * If we have multiple segments in flight, the intial reset 1661 * segment sequence numbers will be to the left of last_ack_sent, 1662 * but they will eventually catch up. 1663 * In any case, it never made sense to trim reset segments to 1664 * fit the receive window since RFC 1122 says: 1665 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1666 * 1667 * A TCP SHOULD allow a received RST segment to include data. 1668 * 1669 * DISCUSSION 1670 * It has been suggested that a RST segment could contain 1671 * ASCII text that encoded and explained the cause of the 1672 * RST. No standard has yet been established for such 1673 * data. 1674 * 1675 * If the reset segment passes the sequence number test examine 1676 * the state: 1677 * SYN_RECEIVED STATE: 1678 * If passive open, return to LISTEN state. 1679 * If active open, inform user that connection was refused. 1680 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1681 * Inform user that connection was reset, and close tcb. 1682 * CLOSING, LAST_ACK STATES: 1683 * Close the tcb. 1684 * TIME_WAIT STATE: 1685 * Drop the segment - see Stevens, vol. 2, p. 964 and 1686 * RFC 1337. 1687 */ 1688 if (thflags & TH_RST) { 1689 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 1690 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1691 switch (tp->t_state) { 1692 1693 case TCPS_SYN_RECEIVED: 1694 so->so_error = ECONNREFUSED; 1695 goto close; 1696 1697 case TCPS_ESTABLISHED: 1698 case TCPS_FIN_WAIT_1: 1699 case TCPS_FIN_WAIT_2: 1700 case TCPS_CLOSE_WAIT: 1701 so->so_error = ECONNRESET; 1702 close: 1703 tp->t_state = TCPS_CLOSED; 1704 tcpstat.tcps_drops++; 1705 tp = tcp_close(tp); 1706 break; 1707 1708 case TCPS_CLOSING: 1709 case TCPS_LAST_ACK: 1710 tp = tcp_close(tp); 1711 break; 1712 1713 case TCPS_TIME_WAIT: 1714 break; 1715 } 1716 } 1717 goto drop; 1718 } 1719 1720 /* 1721 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1722 * and it's less than ts_recent, drop it. 1723 */ 1724 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 && 1725 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1726 /* Check to see if ts_recent is over 24 days old. */ 1727 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1728 /* 1729 * Invalidate ts_recent. If this segment updates 1730 * ts_recent, the age will be reset later and ts_recent 1731 * will get a valid value. If it does not, setting 1732 * ts_recent to zero will at least satisfy the 1733 * requirement that zero be placed in the timestamp 1734 * echo reply when ts_recent isn't valid. The 1735 * age isn't reset until we get a valid ts_recent 1736 * because we don't want out-of-order segments to be 1737 * dropped when ts_recent is old. 1738 */ 1739 tp->ts_recent = 0; 1740 } else if (tcp_paws_tolerance && tlen != 0 && 1741 tp->t_state == TCPS_ESTABLISHED && 1742 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK&& 1743 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && 1744 th->th_ack == tp->snd_una && 1745 tiwin == tp->snd_wnd && 1746 TSTMP_GEQ(to.to_tsval + tcp_paws_tolerance, tp->ts_recent)&& 1747 (th->th_seq == tp->rcv_nxt || 1748 (SEQ_GT(th->th_seq, tp->rcv_nxt) && 1749 tcp_paws_canreasslast(tp, th, tlen)))) { 1750 /* 1751 * This tends to prevent valid new segments from being 1752 * dropped by the reordered segments sent by the fast 1753 * retransmission algorithm on the sending side, i.e. 1754 * the fast retransmitted segment w/ larger timestamp 1755 * arrives earlier than the previously sent new segments 1756 * w/ smaller timestamp. 1757 * 1758 * If following conditions are met, the segment is 1759 * accepted: 1760 * - The segment contains data 1761 * - The connection is established 1762 * - The header does not contain important flags 1763 * - SYN or FIN is not needed 1764 * - It does not acknowledge new data 1765 * - Receive window is not changed 1766 * - The timestamp is within "acceptable" range 1767 * - The new segment is what we are expecting or 1768 * the new segment could be merged w/ the last 1769 * pending segment on the reassemble queue 1770 */ 1771 tcpstat.tcps_pawsaccept++; 1772 tcpstat.tcps_pawsdrop++; 1773 } else { 1774 tcpstat.tcps_rcvduppack++; 1775 tcpstat.tcps_rcvdupbyte += tlen; 1776 tcpstat.tcps_pawsdrop++; 1777 if (tlen) 1778 goto dropafterack; 1779 goto drop; 1780 } 1781 } 1782 1783 /* 1784 * In the SYN-RECEIVED state, validate that the packet belongs to 1785 * this connection before trimming the data to fit the receive 1786 * window. Check the sequence number versus IRS since we know 1787 * the sequence numbers haven't wrapped. This is a partial fix 1788 * for the "LAND" DoS attack. 1789 */ 1790 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1791 rstreason = BANDLIM_RST_OPENPORT; 1792 goto dropwithreset; 1793 } 1794 1795 todrop = tp->rcv_nxt - th->th_seq; 1796 if (todrop > 0) { 1797 if (TCP_DO_SACK(tp)) { 1798 /* Report duplicate segment at head of packet. */ 1799 tp->reportblk.rblk_start = th->th_seq; 1800 tp->reportblk.rblk_end = TCP_SACK_BLKEND( 1801 th->th_seq + tlen, thflags); 1802 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt)) 1803 tp->reportblk.rblk_end = tp->rcv_nxt; 1804 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_SACKLEFT); 1805 tp->t_flags |= TF_ACKNOW; 1806 } 1807 if (thflags & TH_SYN) { 1808 thflags &= ~TH_SYN; 1809 th->th_seq++; 1810 if (th->th_urp > 1) 1811 th->th_urp--; 1812 else 1813 thflags &= ~TH_URG; 1814 todrop--; 1815 } 1816 /* 1817 * Following if statement from Stevens, vol. 2, p. 960. 1818 */ 1819 if (todrop > tlen || 1820 (todrop == tlen && !(thflags & TH_FIN))) { 1821 /* 1822 * Any valid FIN must be to the left of the window. 1823 * At this point the FIN must be a duplicate or out 1824 * of sequence; drop it. 1825 */ 1826 thflags &= ~TH_FIN; 1827 1828 /* 1829 * Send an ACK to resynchronize and drop any data. 1830 * But keep on processing for RST or ACK. 1831 */ 1832 tp->t_flags |= TF_ACKNOW; 1833 todrop = tlen; 1834 tcpstat.tcps_rcvduppack++; 1835 tcpstat.tcps_rcvdupbyte += todrop; 1836 } else { 1837 tcpstat.tcps_rcvpartduppack++; 1838 tcpstat.tcps_rcvpartdupbyte += todrop; 1839 } 1840 drop_hdrlen += todrop; /* drop from the top afterwards */ 1841 th->th_seq += todrop; 1842 tlen -= todrop; 1843 if (th->th_urp > todrop) 1844 th->th_urp -= todrop; 1845 else { 1846 thflags &= ~TH_URG; 1847 th->th_urp = 0; 1848 } 1849 } 1850 1851 /* 1852 * If new data are received on a connection after the 1853 * user processes are gone, then RST the other end. 1854 */ 1855 if ((so->so_state & SS_NOFDREF) && 1856 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1857 tp = tcp_close(tp); 1858 tcpstat.tcps_rcvafterclose++; 1859 rstreason = BANDLIM_UNLIMITED; 1860 goto dropwithreset; 1861 } 1862 1863 /* 1864 * If segment ends after window, drop trailing data 1865 * (and PUSH and FIN); if nothing left, just ACK. 1866 */ 1867 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1868 if (todrop > 0) { 1869 tcpstat.tcps_rcvpackafterwin++; 1870 if (todrop >= tlen) { 1871 tcpstat.tcps_rcvbyteafterwin += tlen; 1872 /* 1873 * If a new connection request is received 1874 * while in TIME_WAIT, drop the old connection 1875 * and start over if the sequence numbers 1876 * are above the previous ones. 1877 */ 1878 if (thflags & TH_SYN && 1879 tp->t_state == TCPS_TIME_WAIT && 1880 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1881 tp = tcp_close(tp); 1882 goto findpcb; 1883 } 1884 /* 1885 * If window is closed can only take segments at 1886 * window edge, and have to drop data and PUSH from 1887 * incoming segments. Continue processing, but 1888 * remember to ack. Otherwise, drop segment 1889 * and ack. 1890 */ 1891 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1892 tp->t_flags |= TF_ACKNOW; 1893 tcpstat.tcps_rcvwinprobe++; 1894 } else 1895 goto dropafterack; 1896 } else 1897 tcpstat.tcps_rcvbyteafterwin += todrop; 1898 m_adj(m, -todrop); 1899 tlen -= todrop; 1900 thflags &= ~(TH_PUSH | TH_FIN); 1901 } 1902 1903 /* 1904 * If last ACK falls within this segment's sequence numbers, 1905 * record its timestamp. 1906 * NOTE: 1907 * 1) That the test incorporates suggestions from the latest 1908 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1909 * 2) That updating only on newer timestamps interferes with 1910 * our earlier PAWS tests, so this check should be solely 1911 * predicated on the sequence space of this segment. 1912 * 3) That we modify the segment boundary check to be 1913 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN 1914 * instead of RFC1323's 1915 * Last.ACK.Sent < SEG.SEQ + SEG.LEN, 1916 * This modified check allows us to overcome RFC1323's 1917 * limitations as described in Stevens TCP/IP Illustrated 1918 * Vol. 2 p.869. In such cases, we can still calculate the 1919 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1920 */ 1921 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1922 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen 1923 + ((thflags & TH_SYN) != 0) 1924 + ((thflags & TH_FIN) != 0)))) { 1925 tp->ts_recent_age = ticks; 1926 tp->ts_recent = to.to_tsval; 1927 } 1928 1929 /* 1930 * If a SYN is in the window, then this is an 1931 * error and we send an RST and drop the connection. 1932 */ 1933 if (thflags & TH_SYN) { 1934 tp = tcp_drop(tp, ECONNRESET); 1935 rstreason = BANDLIM_UNLIMITED; 1936 goto dropwithreset; 1937 } 1938 1939 /* 1940 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1941 * flag is on (half-synchronized state), then queue data for 1942 * later processing; else drop segment and return. 1943 */ 1944 if (!(thflags & TH_ACK)) { 1945 if (tp->t_state == TCPS_SYN_RECEIVED || 1946 (tp->t_flags & TF_NEEDSYN)) 1947 goto step6; 1948 else 1949 goto drop; 1950 } 1951 1952 /* 1953 * Ack processing. 1954 */ 1955 switch (tp->t_state) { 1956 /* 1957 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter 1958 * ESTABLISHED state and continue processing. 1959 * The ACK was checked above. 1960 */ 1961 case TCPS_SYN_RECEIVED: 1962 1963 tcpstat.tcps_connects++; 1964 soisconnected(so); 1965 /* Do window scaling? */ 1966 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1967 (TF_RCVD_SCALE | TF_REQ_SCALE)) 1968 tp->rcv_scale = tp->request_r_scale; 1969 /* 1970 * Make transitions: 1971 * SYN-RECEIVED -> ESTABLISHED 1972 * SYN-RECEIVED* -> FIN-WAIT-1 1973 */ 1974 tp->t_starttime = ticks; 1975 if (tp->t_flags & TF_NEEDFIN) { 1976 tp->t_state = TCPS_FIN_WAIT_1; 1977 tp->t_flags &= ~TF_NEEDFIN; 1978 } else { 1979 tcp_established(tp); 1980 } 1981 /* 1982 * If segment contains data or ACK, will call tcp_reass() 1983 * later; if not, do so now to pass queued data to user. 1984 */ 1985 if (tlen == 0 && !(thflags & TH_FIN)) 1986 tcp_reass(tp, NULL, NULL, NULL); 1987 /* fall into ... */ 1988 1989 /* 1990 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1991 * ACKs. If the ack is in the range 1992 * tp->snd_una < th->th_ack <= tp->snd_max 1993 * then advance tp->snd_una to th->th_ack and drop 1994 * data from the retransmission queue. If this ACK reflects 1995 * more up to date window information we update our window information. 1996 */ 1997 case TCPS_ESTABLISHED: 1998 case TCPS_FIN_WAIT_1: 1999 case TCPS_FIN_WAIT_2: 2000 case TCPS_CLOSE_WAIT: 2001 case TCPS_CLOSING: 2002 case TCPS_LAST_ACK: 2003 case TCPS_TIME_WAIT: 2004 2005 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2006 if (TCP_DO_SACK(tp)) 2007 tcp_sack_update_scoreboard(tp, &to); 2008 if (!tcp_callout_active(tp, tp->tt_rexmt) || 2009 th->th_ack != tp->snd_una) { 2010 if (tlen == 0 && tiwin == tp->snd_wnd) 2011 tcpstat.tcps_rcvdupack++; 2012 tp->t_dupacks = 0; 2013 break; 2014 } 2015 if (tlen != 0 || tiwin != tp->snd_wnd) { 2016 if (!tcp_do_rfc3517bis || 2017 !TCP_DO_SACK(tp) || 2018 (to.to_flags & 2019 (TOF_SACK | TOF_SACK_REDUNDANT)) 2020 != TOF_SACK) { 2021 tp->t_dupacks = 0; 2022 } else { 2023 delayed_dupack = TRUE; 2024 th_dupack = th->th_ack; 2025 } 2026 break; 2027 } 2028 if (tcp_recv_dupack(tp, th->th_ack, &to)) 2029 goto drop; 2030 else 2031 break; 2032 } 2033 2034 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una")); 2035 tp->t_dupacks = 0; 2036 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2037 /* 2038 * Detected optimistic ACK attack. 2039 * Force slow-start to de-synchronize attack. 2040 */ 2041 tp->snd_cwnd = tp->t_maxseg; 2042 tp->snd_wacked = 0; 2043 2044 tcpstat.tcps_rcvacktoomuch++; 2045 goto dropafterack; 2046 } 2047 /* 2048 * If we reach this point, ACK is not a duplicate, 2049 * i.e., it ACKs something we sent. 2050 */ 2051 if (tp->t_flags & TF_NEEDSYN) { 2052 /* 2053 * T/TCP: Connection was half-synchronized, and our 2054 * SYN has been ACK'd (so connection is now fully 2055 * synchronized). Go to non-starred state, 2056 * increment snd_una for ACK of SYN, and check if 2057 * we can do window scaling. 2058 */ 2059 tp->t_flags &= ~TF_NEEDSYN; 2060 tp->snd_una++; 2061 /* Do window scaling? */ 2062 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 2063 (TF_RCVD_SCALE | TF_REQ_SCALE)) 2064 tp->rcv_scale = tp->request_r_scale; 2065 } 2066 2067 process_ACK: 2068 acked = th->th_ack - tp->snd_una; 2069 tcpstat.tcps_rcvackpack++; 2070 tcpstat.tcps_rcvackbyte += acked; 2071 2072 if (tcp_do_eifel_detect && acked > 0 && 2073 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) && 2074 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) { 2075 /* Eifel detection applicable. */ 2076 if (to.to_tsecr < tp->t_rexmtTS) { 2077 ++tcpstat.tcps_eifeldetected; 2078 tcp_revert_congestion_state(tp); 2079 if (tp->t_rxtshift != 1 || 2080 ticks >= tp->t_badrxtwin) 2081 ++tcpstat.tcps_rttcantdetect; 2082 } 2083 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2084 /* 2085 * If we just performed our first retransmit, 2086 * and the ACK arrives within our recovery window, 2087 * then it was a mistake to do the retransmit 2088 * in the first place. Recover our original cwnd 2089 * and ssthresh, and proceed to transmit where we 2090 * left off. 2091 */ 2092 tcp_revert_congestion_state(tp); 2093 ++tcpstat.tcps_rttdetected; 2094 } 2095 2096 /* 2097 * If we have a timestamp reply, update smoothed 2098 * round trip time. If no timestamp is present but 2099 * transmit timer is running and timed sequence 2100 * number was acked, update smoothed round trip time. 2101 * Since we now have an rtt measurement, cancel the 2102 * timer backoff (cf., Phil Karn's retransmit alg.). 2103 * Recompute the initial retransmit timer. 2104 * 2105 * Some machines (certain windows boxes) send broken 2106 * timestamp replies during the SYN+ACK phase, ignore 2107 * timestamps of 0. 2108 */ 2109 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) 2110 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1, th->th_ack); 2111 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) 2112 tcp_xmit_timer(tp, ticks - tp->t_rtttime, th->th_ack); 2113 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2114 2115 /* 2116 * If no data (only SYN) was ACK'd, 2117 * skip rest of ACK processing. 2118 */ 2119 if (acked == 0) 2120 goto step6; 2121 2122 /* Stop looking for an acceptable ACK since one was received. */ 2123 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK | 2124 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT); 2125 2126 if (acked > so->so_snd.ssb_cc) { 2127 tp->snd_wnd -= so->so_snd.ssb_cc; 2128 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc); 2129 ourfinisacked = TRUE; 2130 } else { 2131 sbdrop(&so->so_snd.sb, acked); 2132 tp->snd_wnd -= acked; 2133 ourfinisacked = FALSE; 2134 } 2135 sowwakeup(so); 2136 2137 /* 2138 * Update window information. 2139 */ 2140 if (acceptable_window_update(tp, th, tiwin)) { 2141 /* keep track of pure window updates */ 2142 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2143 tiwin > tp->snd_wnd) 2144 tcpstat.tcps_rcvwinupd++; 2145 tp->snd_wnd = tiwin; 2146 tp->snd_wl1 = th->th_seq; 2147 tp->snd_wl2 = th->th_ack; 2148 if (tp->snd_wnd > tp->max_sndwnd) 2149 tp->max_sndwnd = tp->snd_wnd; 2150 needoutput = TRUE; 2151 } 2152 2153 tp->snd_una = th->th_ack; 2154 if (TCP_DO_SACK(tp)) 2155 tcp_sack_update_scoreboard(tp, &to); 2156 if (IN_FASTRECOVERY(tp)) { 2157 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2158 EXIT_FASTRECOVERY(tp); 2159 needoutput = TRUE; 2160 /* 2161 * If the congestion window was inflated 2162 * to account for the other side's 2163 * cached packets, retract it. 2164 */ 2165 if (!TCP_DO_SACK(tp)) 2166 tp->snd_cwnd = tp->snd_ssthresh; 2167 2168 /* 2169 * Window inflation should have left us 2170 * with approximately snd_ssthresh outstanding 2171 * data. But, in case we would be inclined 2172 * to send a burst, better do it using 2173 * slow start. 2174 */ 2175 if (SEQ_GT(th->th_ack + tp->snd_cwnd, 2176 tp->snd_max + 2 * tp->t_maxseg)) 2177 tp->snd_cwnd = 2178 (tp->snd_max - tp->snd_una) + 2179 2 * tp->t_maxseg; 2180 2181 tp->snd_wacked = 0; 2182 } else { 2183 if (TCP_DO_SACK(tp)) { 2184 tp->snd_max_rexmt = tp->snd_max; 2185 tcp_sack_rexmt(tp, 2186 tp->snd_una == tp->rexmt_high); 2187 } else { 2188 tcp_newreno_partial_ack(tp, th, acked); 2189 } 2190 needoutput = FALSE; 2191 } 2192 } else { 2193 /* 2194 * Open the congestion window. When in slow-start, 2195 * open exponentially: maxseg per packet. Otherwise, 2196 * open linearly: maxseg per window. 2197 */ 2198 if (tp->snd_cwnd <= tp->snd_ssthresh) { 2199 u_int abc_sslimit = 2200 (SEQ_LT(tp->snd_nxt, tp->snd_max) ? 2201 tp->t_maxseg : 2 * tp->t_maxseg); 2202 2203 /* slow-start */ 2204 tp->snd_cwnd += tcp_do_abc ? 2205 min(acked, abc_sslimit) : tp->t_maxseg; 2206 } else { 2207 /* linear increase */ 2208 tp->snd_wacked += tcp_do_abc ? acked : 2209 tp->t_maxseg; 2210 if (tp->snd_wacked >= tp->snd_cwnd) { 2211 tp->snd_wacked -= tp->snd_cwnd; 2212 tp->snd_cwnd += tp->t_maxseg; 2213 } 2214 } 2215 tp->snd_cwnd = min(tp->snd_cwnd, 2216 TCP_MAXWIN << tp->snd_scale); 2217 tp->snd_recover = th->th_ack - 1; 2218 } 2219 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2220 tp->snd_nxt = tp->snd_una; 2221 2222 /* 2223 * If all outstanding data is acked, stop retransmit 2224 * timer and remember to restart (more output or persist). 2225 * If there is more data to be acked, restart retransmit 2226 * timer, using current (possibly backed-off) value. 2227 */ 2228 if (th->th_ack == tp->snd_max) { 2229 tcp_callout_stop(tp, tp->tt_rexmt); 2230 needoutput = TRUE; 2231 } else if (!tcp_callout_active(tp, tp->tt_persist)) { 2232 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur, 2233 tcp_timer_rexmt); 2234 } 2235 2236 switch (tp->t_state) { 2237 /* 2238 * In FIN_WAIT_1 STATE in addition to the processing 2239 * for the ESTABLISHED state if our FIN is now acknowledged 2240 * then enter FIN_WAIT_2. 2241 */ 2242 case TCPS_FIN_WAIT_1: 2243 if (ourfinisacked) { 2244 /* 2245 * If we can't receive any more 2246 * data, then closing user can proceed. 2247 * Starting the timer is contrary to the 2248 * specification, but if we don't get a FIN 2249 * we'll hang forever. 2250 */ 2251 if (so->so_state & SS_CANTRCVMORE) { 2252 soisdisconnected(so); 2253 tcp_callout_reset(tp, tp->tt_2msl, 2254 tp->t_maxidle, tcp_timer_2msl); 2255 } 2256 tp->t_state = TCPS_FIN_WAIT_2; 2257 } 2258 break; 2259 2260 /* 2261 * In CLOSING STATE in addition to the processing for 2262 * the ESTABLISHED state if the ACK acknowledges our FIN 2263 * then enter the TIME-WAIT state, otherwise ignore 2264 * the segment. 2265 */ 2266 case TCPS_CLOSING: 2267 if (ourfinisacked) { 2268 tp->t_state = TCPS_TIME_WAIT; 2269 tcp_canceltimers(tp); 2270 tcp_callout_reset(tp, tp->tt_2msl, 2271 2 * tcp_rmx_msl(tp), 2272 tcp_timer_2msl); 2273 soisdisconnected(so); 2274 } 2275 break; 2276 2277 /* 2278 * In LAST_ACK, we may still be waiting for data to drain 2279 * and/or to be acked, as well as for the ack of our FIN. 2280 * If our FIN is now acknowledged, delete the TCB, 2281 * enter the closed state and return. 2282 */ 2283 case TCPS_LAST_ACK: 2284 if (ourfinisacked) { 2285 tp = tcp_close(tp); 2286 goto drop; 2287 } 2288 break; 2289 2290 /* 2291 * In TIME_WAIT state the only thing that should arrive 2292 * is a retransmission of the remote FIN. Acknowledge 2293 * it and restart the finack timer. 2294 */ 2295 case TCPS_TIME_WAIT: 2296 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2297 tcp_timer_2msl); 2298 goto dropafterack; 2299 } 2300 } 2301 2302 step6: 2303 /* 2304 * Update window information. 2305 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2306 */ 2307 if ((thflags & TH_ACK) && 2308 acceptable_window_update(tp, th, tiwin)) { 2309 /* keep track of pure window updates */ 2310 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2311 tiwin > tp->snd_wnd) 2312 tcpstat.tcps_rcvwinupd++; 2313 tp->snd_wnd = tiwin; 2314 tp->snd_wl1 = th->th_seq; 2315 tp->snd_wl2 = th->th_ack; 2316 if (tp->snd_wnd > tp->max_sndwnd) 2317 tp->max_sndwnd = tp->snd_wnd; 2318 needoutput = TRUE; 2319 } 2320 2321 /* 2322 * Process segments with URG. 2323 */ 2324 if ((thflags & TH_URG) && th->th_urp && 2325 !TCPS_HAVERCVDFIN(tp->t_state)) { 2326 /* 2327 * This is a kludge, but if we receive and accept 2328 * random urgent pointers, we'll crash in 2329 * soreceive. It's hard to imagine someone 2330 * actually wanting to send this much urgent data. 2331 */ 2332 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) { 2333 th->th_urp = 0; /* XXX */ 2334 thflags &= ~TH_URG; /* XXX */ 2335 goto dodata; /* XXX */ 2336 } 2337 /* 2338 * If this segment advances the known urgent pointer, 2339 * then mark the data stream. This should not happen 2340 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2341 * a FIN has been received from the remote side. 2342 * In these states we ignore the URG. 2343 * 2344 * According to RFC961 (Assigned Protocols), 2345 * the urgent pointer points to the last octet 2346 * of urgent data. We continue, however, 2347 * to consider it to indicate the first octet 2348 * of data past the urgent section as the original 2349 * spec states (in one of two places). 2350 */ 2351 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 2352 tp->rcv_up = th->th_seq + th->th_urp; 2353 so->so_oobmark = so->so_rcv.ssb_cc + 2354 (tp->rcv_up - tp->rcv_nxt) - 1; 2355 if (so->so_oobmark == 0) 2356 sosetstate(so, SS_RCVATMARK); 2357 sohasoutofband(so); 2358 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2359 } 2360 /* 2361 * Remove out of band data so doesn't get presented to user. 2362 * This can happen independent of advancing the URG pointer, 2363 * but if two URG's are pending at once, some out-of-band 2364 * data may creep in... ick. 2365 */ 2366 if (th->th_urp <= (u_long)tlen && 2367 !(so->so_options & SO_OOBINLINE)) { 2368 /* hdr drop is delayed */ 2369 tcp_pulloutofband(so, th, m, drop_hdrlen); 2370 } 2371 } else { 2372 /* 2373 * If no out of band data is expected, 2374 * pull receive urgent pointer along 2375 * with the receive window. 2376 */ 2377 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2378 tp->rcv_up = tp->rcv_nxt; 2379 } 2380 2381 dodata: /* XXX */ 2382 /* 2383 * Process the segment text, merging it into the TCP sequencing queue, 2384 * and arranging for acknowledgment of receipt if necessary. 2385 * This process logically involves adjusting tp->rcv_wnd as data 2386 * is presented to the user (this happens in tcp_usrreq.c, 2387 * case PRU_RCVD). If a FIN has already been received on this 2388 * connection then we just ignore the text. 2389 */ 2390 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) { 2391 m_adj(m, drop_hdrlen); /* delayed header drop */ 2392 /* 2393 * Insert segment which includes th into TCP reassembly queue 2394 * with control block tp. Set thflags to whether reassembly now 2395 * includes a segment with FIN. This handles the common case 2396 * inline (segment is the next to be received on an established 2397 * connection, and the queue is empty), avoiding linkage into 2398 * and removal from the queue and repetition of various 2399 * conversions. 2400 * Set DELACK for segments received in order, but ack 2401 * immediately when segments are out of order (so 2402 * fast retransmit can work). 2403 */ 2404 if (th->th_seq == tp->rcv_nxt && 2405 TAILQ_EMPTY(&tp->t_segq) && 2406 TCPS_HAVEESTABLISHED(tp->t_state)) { 2407 if (DELAY_ACK(tp)) { 2408 tcp_callout_reset(tp, tp->tt_delack, 2409 tcp_delacktime, tcp_timer_delack); 2410 } else { 2411 tp->t_flags |= TF_ACKNOW; 2412 } 2413 tp->rcv_nxt += tlen; 2414 thflags = th->th_flags & TH_FIN; 2415 tcpstat.tcps_rcvpack++; 2416 tcpstat.tcps_rcvbyte += tlen; 2417 ND6_HINT(tp); 2418 if (so->so_state & SS_CANTRCVMORE) { 2419 m_freem(m); 2420 } else { 2421 lwkt_gettoken(&so->so_rcv.ssb_token); 2422 ssb_appendstream(&so->so_rcv, m); 2423 lwkt_reltoken(&so->so_rcv.ssb_token); 2424 } 2425 sorwakeup(so); 2426 } else { 2427 if (!(tp->sack_flags & TSACK_F_DUPSEG)) { 2428 /* Initialize SACK report block. */ 2429 tp->reportblk.rblk_start = th->th_seq; 2430 tp->reportblk.rblk_end = TCP_SACK_BLKEND( 2431 th->th_seq + tlen, thflags); 2432 } 2433 thflags = tcp_reass(tp, th, &tlen, m); 2434 tp->t_flags |= TF_ACKNOW; 2435 } 2436 2437 /* 2438 * Note the amount of data that peer has sent into 2439 * our window, in order to estimate the sender's 2440 * buffer size. 2441 */ 2442 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2443 } else { 2444 m_freem(m); 2445 thflags &= ~TH_FIN; 2446 } 2447 2448 /* 2449 * If FIN is received ACK the FIN and let the user know 2450 * that the connection is closing. 2451 */ 2452 if (thflags & TH_FIN) { 2453 if (!TCPS_HAVERCVDFIN(tp->t_state)) { 2454 socantrcvmore(so); 2455 /* 2456 * If connection is half-synchronized 2457 * (ie NEEDSYN flag on) then delay ACK, 2458 * so it may be piggybacked when SYN is sent. 2459 * Otherwise, since we received a FIN then no 2460 * more input can be expected, send ACK now. 2461 */ 2462 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) { 2463 tcp_callout_reset(tp, tp->tt_delack, 2464 tcp_delacktime, tcp_timer_delack); 2465 } else { 2466 tp->t_flags |= TF_ACKNOW; 2467 } 2468 tp->rcv_nxt++; 2469 } 2470 2471 switch (tp->t_state) { 2472 /* 2473 * In SYN_RECEIVED and ESTABLISHED STATES 2474 * enter the CLOSE_WAIT state. 2475 */ 2476 case TCPS_SYN_RECEIVED: 2477 tp->t_starttime = ticks; 2478 /*FALLTHROUGH*/ 2479 case TCPS_ESTABLISHED: 2480 tp->t_state = TCPS_CLOSE_WAIT; 2481 break; 2482 2483 /* 2484 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2485 * enter the CLOSING state. 2486 */ 2487 case TCPS_FIN_WAIT_1: 2488 tp->t_state = TCPS_CLOSING; 2489 break; 2490 2491 /* 2492 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2493 * starting the time-wait timer, turning off the other 2494 * standard timers. 2495 */ 2496 case TCPS_FIN_WAIT_2: 2497 tp->t_state = TCPS_TIME_WAIT; 2498 tcp_canceltimers(tp); 2499 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2500 tcp_timer_2msl); 2501 soisdisconnected(so); 2502 break; 2503 2504 /* 2505 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2506 */ 2507 case TCPS_TIME_WAIT: 2508 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2509 tcp_timer_2msl); 2510 break; 2511 } 2512 } 2513 2514 #ifdef TCPDEBUG 2515 if (so->so_options & SO_DEBUG) 2516 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2517 #endif 2518 2519 /* 2520 * Delayed duplicated ACK processing 2521 */ 2522 if (delayed_dupack && tcp_recv_dupack(tp, th_dupack, &to)) 2523 needoutput = FALSE; 2524 2525 /* 2526 * Return any desired output. 2527 */ 2528 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2529 tcp_output(tp); 2530 tcp_sack_report_cleanup(tp); 2531 return(IPPROTO_DONE); 2532 2533 dropafterack: 2534 /* 2535 * Generate an ACK dropping incoming segment if it occupies 2536 * sequence space, where the ACK reflects our state. 2537 * 2538 * We can now skip the test for the RST flag since all 2539 * paths to this code happen after packets containing 2540 * RST have been dropped. 2541 * 2542 * In the SYN-RECEIVED state, don't send an ACK unless the 2543 * segment we received passes the SYN-RECEIVED ACK test. 2544 * If it fails send a RST. This breaks the loop in the 2545 * "LAND" DoS attack, and also prevents an ACK storm 2546 * between two listening ports that have been sent forged 2547 * SYN segments, each with the source address of the other. 2548 */ 2549 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2550 (SEQ_GT(tp->snd_una, th->th_ack) || 2551 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2552 rstreason = BANDLIM_RST_OPENPORT; 2553 goto dropwithreset; 2554 } 2555 #ifdef TCPDEBUG 2556 if (so->so_options & SO_DEBUG) 2557 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2558 #endif 2559 m_freem(m); 2560 tp->t_flags |= TF_ACKNOW; 2561 tcp_output(tp); 2562 tcp_sack_report_cleanup(tp); 2563 return(IPPROTO_DONE); 2564 2565 dropwithreset: 2566 /* 2567 * Generate a RST, dropping incoming segment. 2568 * Make ACK acceptable to originator of segment. 2569 * Don't bother to respond if destination was broadcast/multicast. 2570 */ 2571 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) 2572 goto drop; 2573 if (isipv6) { 2574 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2575 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2576 goto drop; 2577 } else { 2578 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2579 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2580 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2581 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2582 goto drop; 2583 } 2584 /* IPv6 anycast check is done at tcp6_input() */ 2585 2586 /* 2587 * Perform bandwidth limiting. 2588 */ 2589 #ifdef ICMP_BANDLIM 2590 if (badport_bandlim(rstreason) < 0) 2591 goto drop; 2592 #endif 2593 2594 #ifdef TCPDEBUG 2595 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2596 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2597 #endif 2598 if (thflags & TH_ACK) 2599 /* mtod() below is safe as long as hdr dropping is delayed */ 2600 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack, 2601 TH_RST); 2602 else { 2603 if (thflags & TH_SYN) 2604 tlen++; 2605 /* mtod() below is safe as long as hdr dropping is delayed */ 2606 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen, 2607 (tcp_seq)0, TH_RST | TH_ACK); 2608 } 2609 if (tp != NULL) 2610 tcp_sack_report_cleanup(tp); 2611 return(IPPROTO_DONE); 2612 2613 drop: 2614 /* 2615 * Drop space held by incoming segment and return. 2616 */ 2617 #ifdef TCPDEBUG 2618 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2619 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2620 #endif 2621 m_freem(m); 2622 if (tp != NULL) 2623 tcp_sack_report_cleanup(tp); 2624 return(IPPROTO_DONE); 2625 } 2626 2627 /* 2628 * Parse TCP options and place in tcpopt. 2629 */ 2630 static void 2631 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn, 2632 tcp_seq ack) 2633 { 2634 int opt, optlen, i; 2635 2636 to->to_flags = 0; 2637 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2638 opt = cp[0]; 2639 if (opt == TCPOPT_EOL) 2640 break; 2641 if (opt == TCPOPT_NOP) 2642 optlen = 1; 2643 else { 2644 if (cnt < 2) 2645 break; 2646 optlen = cp[1]; 2647 if (optlen < 2 || optlen > cnt) 2648 break; 2649 } 2650 switch (opt) { 2651 case TCPOPT_MAXSEG: 2652 if (optlen != TCPOLEN_MAXSEG) 2653 continue; 2654 if (!is_syn) 2655 continue; 2656 to->to_flags |= TOF_MSS; 2657 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss); 2658 to->to_mss = ntohs(to->to_mss); 2659 break; 2660 case TCPOPT_WINDOW: 2661 if (optlen != TCPOLEN_WINDOW) 2662 continue; 2663 if (!is_syn) 2664 continue; 2665 to->to_flags |= TOF_SCALE; 2666 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); 2667 break; 2668 case TCPOPT_TIMESTAMP: 2669 if (optlen != TCPOLEN_TIMESTAMP) 2670 continue; 2671 to->to_flags |= TOF_TS; 2672 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval); 2673 to->to_tsval = ntohl(to->to_tsval); 2674 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr); 2675 to->to_tsecr = ntohl(to->to_tsecr); 2676 /* 2677 * If echoed timestamp is later than the current time, 2678 * fall back to non RFC1323 RTT calculation. 2679 */ 2680 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks)) 2681 to->to_tsecr = 0; 2682 break; 2683 case TCPOPT_SACK_PERMITTED: 2684 if (optlen != TCPOLEN_SACK_PERMITTED) 2685 continue; 2686 if (!is_syn) 2687 continue; 2688 to->to_flags |= TOF_SACK_PERMITTED; 2689 break; 2690 case TCPOPT_SACK: 2691 if ((optlen - 2) & 0x07) /* not multiple of 8 */ 2692 continue; 2693 to->to_nsackblocks = (optlen - 2) / 8; 2694 to->to_sackblocks = (struct raw_sackblock *) (cp + 2); 2695 to->to_flags |= TOF_SACK; 2696 for (i = 0; i < to->to_nsackblocks; i++) { 2697 struct raw_sackblock *r = &to->to_sackblocks[i]; 2698 2699 r->rblk_start = ntohl(r->rblk_start); 2700 r->rblk_end = ntohl(r->rblk_end); 2701 2702 if (SEQ_LEQ(r->rblk_end, r->rblk_start)) { 2703 /* 2704 * Invalid SACK block; discard all 2705 * SACK blocks 2706 */ 2707 tcpstat.tcps_rcvbadsackopt++; 2708 to->to_nsackblocks = 0; 2709 to->to_sackblocks = NULL; 2710 to->to_flags &= ~TOF_SACK; 2711 break; 2712 } 2713 } 2714 if ((to->to_flags & TOF_SACK) && 2715 tcp_sack_ndsack_blocks(to->to_sackblocks, 2716 to->to_nsackblocks, ack)) 2717 to->to_flags |= TOF_DSACK; 2718 break; 2719 #ifdef TCP_SIGNATURE 2720 /* 2721 * XXX In order to reply to a host which has set the 2722 * TCP_SIGNATURE option in its initial SYN, we have to 2723 * record the fact that the option was observed here 2724 * for the syncache code to perform the correct response. 2725 */ 2726 case TCPOPT_SIGNATURE: 2727 if (optlen != TCPOLEN_SIGNATURE) 2728 continue; 2729 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); 2730 break; 2731 #endif /* TCP_SIGNATURE */ 2732 default: 2733 continue; 2734 } 2735 } 2736 } 2737 2738 /* 2739 * Pull out of band byte out of a segment so 2740 * it doesn't appear in the user's data queue. 2741 * It is still reflected in the segment length for 2742 * sequencing purposes. 2743 * "off" is the delayed to be dropped hdrlen. 2744 */ 2745 static void 2746 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off) 2747 { 2748 int cnt = off + th->th_urp - 1; 2749 2750 while (cnt >= 0) { 2751 if (m->m_len > cnt) { 2752 char *cp = mtod(m, caddr_t) + cnt; 2753 struct tcpcb *tp = sototcpcb(so); 2754 2755 tp->t_iobc = *cp; 2756 tp->t_oobflags |= TCPOOB_HAVEDATA; 2757 bcopy(cp + 1, cp, m->m_len - cnt - 1); 2758 m->m_len--; 2759 if (m->m_flags & M_PKTHDR) 2760 m->m_pkthdr.len--; 2761 return; 2762 } 2763 cnt -= m->m_len; 2764 m = m->m_next; 2765 if (m == NULL) 2766 break; 2767 } 2768 panic("tcp_pulloutofband"); 2769 } 2770 2771 /* 2772 * Collect new round-trip time estimate 2773 * and update averages and current timeout. 2774 */ 2775 static void 2776 tcp_xmit_timer(struct tcpcb *tp, int rtt, tcp_seq ack) 2777 { 2778 int rebaserto = 0; 2779 2780 tcpstat.tcps_rttupdated++; 2781 tp->t_rttupdated++; 2782 if ((tp->rxt_flags & TRXT_F_REBASERTO) && 2783 SEQ_GT(ack, tp->snd_max_prev)) { 2784 #ifdef DEBUG_EIFEL_RESPONSE 2785 kprintf("srtt/rttvar, prev %d/%d, cur %d/%d, ", 2786 tp->t_srtt_prev, tp->t_rttvar_prev, 2787 tp->t_srtt, tp->t_rttvar); 2788 #endif 2789 2790 tcpstat.tcps_eifelresponse++; 2791 rebaserto = 1; 2792 tp->rxt_flags &= ~TRXT_F_REBASERTO; 2793 tp->t_srtt = max(tp->t_srtt_prev, (rtt << TCP_RTT_SHIFT)); 2794 tp->t_rttvar = max(tp->t_rttvar_prev, 2795 (rtt << (TCP_RTTVAR_SHIFT - 1))); 2796 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2797 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2798 2799 #ifdef DEBUG_EIFEL_RESPONSE 2800 kprintf("new %d/%d ", tp->t_srtt, tp->t_rttvar); 2801 #endif 2802 } else if (tp->t_srtt != 0) { 2803 int delta; 2804 2805 /* 2806 * srtt is stored as fixed point with 5 bits after the 2807 * binary point (i.e., scaled by 8). The following magic 2808 * is equivalent to the smoothing algorithm in rfc793 with 2809 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2810 * point). Adjust rtt to origin 0. 2811 */ 2812 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2813 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2814 2815 if ((tp->t_srtt += delta) <= 0) 2816 tp->t_srtt = 1; 2817 2818 /* 2819 * We accumulate a smoothed rtt variance (actually, a 2820 * smoothed mean difference), then set the retransmit 2821 * timer to smoothed rtt + 4 times the smoothed variance. 2822 * rttvar is stored as fixed point with 4 bits after the 2823 * binary point (scaled by 16). The following is 2824 * equivalent to rfc793 smoothing with an alpha of .75 2825 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2826 * rfc793's wired-in beta. 2827 */ 2828 if (delta < 0) 2829 delta = -delta; 2830 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2831 if ((tp->t_rttvar += delta) <= 0) 2832 tp->t_rttvar = 1; 2833 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2834 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2835 } else { 2836 /* 2837 * No rtt measurement yet - use the unsmoothed rtt. 2838 * Set the variance to half the rtt (so our first 2839 * retransmit happens at 3*rtt). 2840 */ 2841 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2842 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2843 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2844 } 2845 tp->t_rtttime = 0; 2846 tp->t_rxtshift = 0; 2847 2848 #ifdef DEBUG_EIFEL_RESPONSE 2849 if (rebaserto) { 2850 kprintf("| rxtcur prev %d, old %d, ", 2851 tp->t_rxtcur_prev, tp->t_rxtcur); 2852 } 2853 #endif 2854 2855 /* 2856 * the retransmit should happen at rtt + 4 * rttvar. 2857 * Because of the way we do the smoothing, srtt and rttvar 2858 * will each average +1/2 tick of bias. When we compute 2859 * the retransmit timer, we want 1/2 tick of rounding and 2860 * 1 extra tick because of +-1/2 tick uncertainty in the 2861 * firing of the timer. The bias will give us exactly the 2862 * 1.5 tick we need. But, because the bias is 2863 * statistical, we have to test that we don't drop below 2864 * the minimum feasible timer (which is 2 ticks). 2865 */ 2866 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2867 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2868 2869 if (rebaserto) { 2870 if (tp->t_rxtcur < tp->t_rxtcur_prev + tcp_eifel_rtoinc) { 2871 /* 2872 * RFC4015 requires that the new RTO is at least 2873 * 2*G (tcp_eifel_rtoinc) greater then the RTO 2874 * (t_rxtcur_prev) when the spurious retransmit 2875 * timeout happens. 2876 * 2877 * The above condition could be true, if the SRTT 2878 * and RTTVAR used to calculate t_rxtcur_prev 2879 * resulted in a value less than t_rttmin. So 2880 * simply increasing SRTT by tcp_eifel_rtoinc when 2881 * preparing for the Eifel response in 2882 * tcp_save_congestion_state() could not ensure 2883 * that the new RTO will be tcp_eifel_rtoinc greater 2884 * t_rxtcur_prev. 2885 */ 2886 tp->t_rxtcur = tp->t_rxtcur_prev + tcp_eifel_rtoinc; 2887 } 2888 #ifdef DEBUG_EIFEL_RESPONSE 2889 kprintf("new %d\n", tp->t_rxtcur); 2890 #endif 2891 } 2892 2893 /* 2894 * We received an ack for a packet that wasn't retransmitted; 2895 * it is probably safe to discard any error indications we've 2896 * received recently. This isn't quite right, but close enough 2897 * for now (a route might have failed after we sent a segment, 2898 * and the return path might not be symmetrical). 2899 */ 2900 tp->t_softerror = 0; 2901 } 2902 2903 /* 2904 * Determine a reasonable value for maxseg size. 2905 * If the route is known, check route for mtu. 2906 * If none, use an mss that can be handled on the outgoing 2907 * interface without forcing IP to fragment; if bigger than 2908 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2909 * to utilize large mbufs. If no route is found, route has no mtu, 2910 * or the destination isn't local, use a default, hopefully conservative 2911 * size (usually 512 or the default IP max size, but no more than the mtu 2912 * of the interface), as we can't discover anything about intervening 2913 * gateways or networks. We also initialize the congestion/slow start 2914 * window to be a single segment if the destination isn't local. 2915 * While looking at the routing entry, we also initialize other path-dependent 2916 * parameters from pre-set or cached values in the routing entry. 2917 * 2918 * Also take into account the space needed for options that we 2919 * send regularly. Make maxseg shorter by that amount to assure 2920 * that we can send maxseg amount of data even when the options 2921 * are present. Store the upper limit of the length of options plus 2922 * data in maxopd. 2923 * 2924 * NOTE that this routine is only called when we process an incoming 2925 * segment, for outgoing segments only tcp_mssopt is called. 2926 */ 2927 void 2928 tcp_mss(struct tcpcb *tp, int offer) 2929 { 2930 struct rtentry *rt; 2931 struct ifnet *ifp; 2932 int rtt, mss; 2933 u_long bufsize; 2934 struct inpcb *inp = tp->t_inpcb; 2935 struct socket *so; 2936 #ifdef INET6 2937 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 2938 size_t min_protoh = isipv6 ? 2939 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 2940 sizeof(struct tcpiphdr); 2941 #else 2942 const boolean_t isipv6 = FALSE; 2943 const size_t min_protoh = sizeof(struct tcpiphdr); 2944 #endif 2945 2946 if (isipv6) 2947 rt = tcp_rtlookup6(&inp->inp_inc); 2948 else 2949 rt = tcp_rtlookup(&inp->inp_inc); 2950 if (rt == NULL) { 2951 tp->t_maxopd = tp->t_maxseg = 2952 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 2953 return; 2954 } 2955 ifp = rt->rt_ifp; 2956 so = inp->inp_socket; 2957 2958 /* 2959 * Offer == 0 means that there was no MSS on the SYN segment, 2960 * in this case we use either the interface mtu or tcp_mssdflt. 2961 * 2962 * An offer which is too large will be cut down later. 2963 */ 2964 if (offer == 0) { 2965 if (isipv6) { 2966 if (in6_localaddr(&inp->in6p_faddr)) { 2967 offer = ND_IFINFO(rt->rt_ifp)->linkmtu - 2968 min_protoh; 2969 } else { 2970 offer = tcp_v6mssdflt; 2971 } 2972 } else { 2973 if (in_localaddr(inp->inp_faddr)) 2974 offer = ifp->if_mtu - min_protoh; 2975 else 2976 offer = tcp_mssdflt; 2977 } 2978 } 2979 2980 /* 2981 * Prevent DoS attack with too small MSS. Round up 2982 * to at least minmss. 2983 * 2984 * Sanity check: make sure that maxopd will be large 2985 * enough to allow some data on segments even is the 2986 * all the option space is used (40bytes). Otherwise 2987 * funny things may happen in tcp_output. 2988 */ 2989 offer = max(offer, tcp_minmss); 2990 offer = max(offer, 64); 2991 2992 rt->rt_rmx.rmx_mssopt = offer; 2993 2994 /* 2995 * While we're here, check if there's an initial rtt 2996 * or rttvar. Convert from the route-table units 2997 * to scaled multiples of the slow timeout timer. 2998 */ 2999 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 3000 /* 3001 * XXX the lock bit for RTT indicates that the value 3002 * is also a minimum value; this is subject to time. 3003 */ 3004 if (rt->rt_rmx.rmx_locks & RTV_RTT) 3005 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); 3006 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 3007 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3008 tcpstat.tcps_usedrtt++; 3009 if (rt->rt_rmx.rmx_rttvar) { 3010 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 3011 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 3012 tcpstat.tcps_usedrttvar++; 3013 } else { 3014 /* default variation is +- 1 rtt */ 3015 tp->t_rttvar = 3016 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3017 } 3018 TCPT_RANGESET(tp->t_rxtcur, 3019 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3020 tp->t_rttmin, TCPTV_REXMTMAX); 3021 } 3022 3023 /* 3024 * if there's an mtu associated with the route, use it 3025 * else, use the link mtu. Take the smaller of mss or offer 3026 * as our final mss. 3027 */ 3028 if (rt->rt_rmx.rmx_mtu) { 3029 mss = rt->rt_rmx.rmx_mtu - min_protoh; 3030 } else { 3031 if (isipv6) 3032 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh; 3033 else 3034 mss = ifp->if_mtu - min_protoh; 3035 } 3036 mss = min(mss, offer); 3037 3038 /* 3039 * maxopd stores the maximum length of data AND options 3040 * in a segment; maxseg is the amount of data in a normal 3041 * segment. We need to store this value (maxopd) apart 3042 * from maxseg, because now every segment carries options 3043 * and thus we normally have somewhat less data in segments. 3044 */ 3045 tp->t_maxopd = mss; 3046 3047 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 3048 ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3049 mss -= TCPOLEN_TSTAMP_APPA; 3050 3051 #if (MCLBYTES & (MCLBYTES - 1)) == 0 3052 if (mss > MCLBYTES) 3053 mss &= ~(MCLBYTES-1); 3054 #else 3055 if (mss > MCLBYTES) 3056 mss = mss / MCLBYTES * MCLBYTES; 3057 #endif 3058 /* 3059 * If there's a pipesize, change the socket buffer 3060 * to that size. Make the socket buffers an integral 3061 * number of mss units; if the mss is larger than 3062 * the socket buffer, decrease the mss. 3063 */ 3064 #ifdef RTV_SPIPE 3065 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) 3066 #endif 3067 bufsize = so->so_snd.ssb_hiwat; 3068 if (bufsize < mss) 3069 mss = bufsize; 3070 else { 3071 bufsize = roundup(bufsize, mss); 3072 if (bufsize > sb_max) 3073 bufsize = sb_max; 3074 if (bufsize > so->so_snd.ssb_hiwat) 3075 ssb_reserve(&so->so_snd, bufsize, so, NULL); 3076 } 3077 tp->t_maxseg = mss; 3078 3079 #ifdef RTV_RPIPE 3080 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) 3081 #endif 3082 bufsize = so->so_rcv.ssb_hiwat; 3083 if (bufsize > mss) { 3084 bufsize = roundup(bufsize, mss); 3085 if (bufsize > sb_max) 3086 bufsize = sb_max; 3087 if (bufsize > so->so_rcv.ssb_hiwat) { 3088 lwkt_gettoken(&so->so_rcv.ssb_token); 3089 ssb_reserve(&so->so_rcv, bufsize, so, NULL); 3090 lwkt_reltoken(&so->so_rcv.ssb_token); 3091 } 3092 } 3093 3094 /* 3095 * Set the slow-start flight size 3096 * 3097 * NOTE: t_maxseg must have been configured! 3098 */ 3099 tp->snd_cwnd = tcp_initial_window(tp); 3100 3101 if (rt->rt_rmx.rmx_ssthresh) { 3102 /* 3103 * There's some sort of gateway or interface 3104 * buffer limit on the path. Use this to set 3105 * the slow start threshhold, but set the 3106 * threshold to no less than 2*mss. 3107 */ 3108 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 3109 tcpstat.tcps_usedssthresh++; 3110 } 3111 } 3112 3113 /* 3114 * Determine the MSS option to send on an outgoing SYN. 3115 */ 3116 int 3117 tcp_mssopt(struct tcpcb *tp) 3118 { 3119 struct rtentry *rt; 3120 #ifdef INET6 3121 boolean_t isipv6 = 3122 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3123 int min_protoh = isipv6 ? 3124 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 3125 sizeof(struct tcpiphdr); 3126 #else 3127 const boolean_t isipv6 = FALSE; 3128 const size_t min_protoh = sizeof(struct tcpiphdr); 3129 #endif 3130 3131 if (isipv6) 3132 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc); 3133 else 3134 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc); 3135 if (rt == NULL) 3136 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 3137 3138 return (rt->rt_ifp->if_mtu - min_protoh); 3139 } 3140 3141 /* 3142 * When a partial ack arrives, force the retransmission of the 3143 * next unacknowledged segment. Do not exit Fast Recovery. 3144 * 3145 * Implement the Slow-but-Steady variant of NewReno by restarting the 3146 * the retransmission timer. Turn it off here so it can be restarted 3147 * later in tcp_output(). 3148 */ 3149 static void 3150 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked) 3151 { 3152 tcp_seq old_snd_nxt = tp->snd_nxt; 3153 u_long ocwnd = tp->snd_cwnd; 3154 3155 tcp_callout_stop(tp, tp->tt_rexmt); 3156 tp->t_rtttime = 0; 3157 tp->snd_nxt = th->th_ack; 3158 /* Set snd_cwnd to one segment beyond acknowledged offset. */ 3159 tp->snd_cwnd = tp->t_maxseg; 3160 tp->t_flags |= TF_ACKNOW; 3161 tcp_output(tp); 3162 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3163 tp->snd_nxt = old_snd_nxt; 3164 /* partial window deflation */ 3165 if (ocwnd > acked) 3166 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg; 3167 else 3168 tp->snd_cwnd = tp->t_maxseg; 3169 } 3170 3171 /* 3172 * In contrast to the Slow-but-Steady NewReno variant, 3173 * we do not reset the retransmission timer for SACK retransmissions, 3174 * except when retransmitting snd_una. 3175 */ 3176 static void 3177 tcp_sack_rexmt(struct tcpcb *tp, boolean_t force) 3178 { 3179 tcp_seq old_snd_nxt = tp->snd_nxt; 3180 u_long ocwnd = tp->snd_cwnd; 3181 uint32_t pipe; 3182 int nseg = 0; /* consecutive new segments */ 3183 int nseg_rexmt = 0; /* retransmitted segments */ 3184 int maxrexmt = 0; 3185 #define MAXBURST 4 /* limit burst of new packets on partial ack */ 3186 3187 if (force) { 3188 uint32_t unsacked = tcp_sack_first_unsacked_len(tp); 3189 3190 /* 3191 * Try to fill the first hole in the receiver's 3192 * reassemble queue. 3193 */ 3194 maxrexmt = howmany(unsacked, tp->t_maxseg); 3195 if (maxrexmt > tcp_force_sackrxt) 3196 maxrexmt = tcp_force_sackrxt; 3197 } 3198 3199 tp->t_rtttime = 0; 3200 pipe = tcp_sack_compute_pipe(tp); 3201 while (((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg 3202 || (force && nseg_rexmt < maxrexmt && nseg == 0)) && 3203 (!tcp_do_smartsack || nseg < MAXBURST)) { 3204 tcp_seq old_snd_max, old_rexmt_high, nextrexmt; 3205 uint32_t sent, seglen; 3206 boolean_t rescue; 3207 int error; 3208 3209 old_rexmt_high = tp->rexmt_high; 3210 if (!tcp_sack_nextseg(tp, &nextrexmt, &seglen, &rescue)) { 3211 tp->rexmt_high = old_rexmt_high; 3212 break; 3213 } 3214 3215 /* 3216 * If the next tranmission is a rescue retranmission, 3217 * we check whether we have already sent some data 3218 * (either new segments or retransmitted segments) 3219 * into the the network or not. Since the idea of rescue 3220 * retransmission is to sustain ACK clock, as long as 3221 * some segments are in the network, ACK clock will be 3222 * kept ticking. 3223 */ 3224 if (rescue && (nseg_rexmt > 0 || nseg > 0)) { 3225 tp->rexmt_high = old_rexmt_high; 3226 break; 3227 } 3228 3229 if (nextrexmt == tp->snd_max) 3230 ++nseg; 3231 else 3232 ++nseg_rexmt; 3233 tp->snd_nxt = nextrexmt; 3234 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen; 3235 old_snd_max = tp->snd_max; 3236 if (nextrexmt == tp->snd_una) 3237 tcp_callout_stop(tp, tp->tt_rexmt); 3238 error = tcp_output(tp); 3239 if (error != 0) { 3240 tp->rexmt_high = old_rexmt_high; 3241 break; 3242 } 3243 sent = tp->snd_nxt - nextrexmt; 3244 if (sent <= 0) { 3245 tp->rexmt_high = old_rexmt_high; 3246 break; 3247 } 3248 pipe += sent; 3249 tcpstat.tcps_sndsackpack++; 3250 tcpstat.tcps_sndsackbyte += sent; 3251 3252 if (rescue) { 3253 tcpstat.tcps_sackrescue++; 3254 tp->rexmt_rescue = tp->snd_nxt; 3255 tp->sack_flags |= TSACK_F_SACKRESCUED; 3256 break; 3257 } 3258 if (SEQ_LT(nextrexmt, old_snd_max) && 3259 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) { 3260 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max); 3261 if (tcp_aggressive_rescuesack && 3262 (tp->sack_flags & TSACK_F_SACKRESCUED) && 3263 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) { 3264 /* Drag RescueRxt along with HighRxt */ 3265 tp->rexmt_rescue = tp->rexmt_high; 3266 } 3267 } 3268 } 3269 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3270 tp->snd_nxt = old_snd_nxt; 3271 tp->snd_cwnd = ocwnd; 3272 } 3273 3274 /* 3275 * Return TRUE, if some new segments are sent 3276 */ 3277 static boolean_t 3278 tcp_sack_limitedxmit(struct tcpcb *tp) 3279 { 3280 tcp_seq oldsndnxt = tp->snd_nxt; 3281 tcp_seq oldsndmax = tp->snd_max; 3282 u_long ocwnd = tp->snd_cwnd; 3283 uint32_t pipe, sent; 3284 boolean_t ret = FALSE; 3285 tcp_seq_diff_t cwnd_left; 3286 tcp_seq next; 3287 3288 tp->rexmt_high = tp->snd_una - 1; 3289 pipe = tcp_sack_compute_pipe(tp); 3290 cwnd_left = (tcp_seq_diff_t)(ocwnd - pipe); 3291 if (cwnd_left < (tcp_seq_diff_t)tp->t_maxseg) 3292 return FALSE; 3293 3294 next = tp->snd_nxt = tp->snd_max; 3295 tp->snd_cwnd = tp->snd_nxt - tp->snd_una + 3296 rounddown(cwnd_left, tp->t_maxseg); 3297 3298 tcp_output(tp); 3299 3300 sent = tp->snd_nxt - next; 3301 if (sent > 0) { 3302 tcpstat.tcps_sndlimited += howmany(sent, tp->t_maxseg); 3303 ret = TRUE; 3304 } 3305 3306 if (SEQ_LT(oldsndnxt, oldsndmax)) { 3307 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una), 3308 ("snd_una moved in other threads")); 3309 tp->snd_nxt = oldsndnxt; 3310 } 3311 tp->snd_cwnd = ocwnd; 3312 3313 if (ret && TCP_DO_NCR(tp)) 3314 tcp_ncr_update_rxtthresh(tp); 3315 3316 return ret; 3317 } 3318 3319 /* 3320 * Reset idle time and keep-alive timer, typically called when a valid 3321 * tcp packet is received but may also be called when FASTKEEP is set 3322 * to prevent the previous long-timeout from calculating to a drop. 3323 * 3324 * Only update t_rcvtime for non-SYN packets. 3325 * 3326 * Handle the case where one side thinks the connection is established 3327 * but the other side has, say, rebooted without cleaning out the 3328 * connection. The SYNs could be construed as an attack and wind 3329 * up ignored, but in case it isn't an attack we can validate the 3330 * connection by forcing a keepalive. 3331 */ 3332 void 3333 tcp_timer_keep_activity(struct tcpcb *tp, int thflags) 3334 { 3335 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3336 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) { 3337 tp->t_flags |= TF_KEEPALIVE; 3338 tcp_callout_reset(tp, tp->tt_keep, hz / 2, 3339 tcp_timer_keep); 3340 } else { 3341 tp->t_rcvtime = ticks; 3342 tp->t_flags &= ~TF_KEEPALIVE; 3343 tcp_callout_reset(tp, tp->tt_keep, 3344 tp->t_keepidle, 3345 tcp_timer_keep); 3346 } 3347 } 3348 } 3349 3350 static int 3351 tcp_rmx_msl(const struct tcpcb *tp) 3352 { 3353 struct rtentry *rt; 3354 struct inpcb *inp = tp->t_inpcb; 3355 int msl; 3356 #ifdef INET6 3357 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3358 #else 3359 const boolean_t isipv6 = FALSE; 3360 #endif 3361 3362 if (isipv6) 3363 rt = tcp_rtlookup6(&inp->inp_inc); 3364 else 3365 rt = tcp_rtlookup(&inp->inp_inc); 3366 if (rt == NULL || rt->rt_rmx.rmx_msl == 0) 3367 return tcp_msl; 3368 3369 msl = (rt->rt_rmx.rmx_msl * hz) / 1000; 3370 if (msl == 0) 3371 msl = 1; 3372 3373 return msl; 3374 } 3375 3376 static void 3377 tcp_established(struct tcpcb *tp) 3378 { 3379 tp->t_state = TCPS_ESTABLISHED; 3380 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep); 3381 3382 if (tp->t_rxtsyn > 0) { 3383 /* 3384 * RFC6298: 3385 * "If the timer expires awaiting the ACK of a SYN segment 3386 * and the TCP implementation is using an RTO less than 3 3387 * seconds, the RTO MUST be re-initialized to 3 seconds 3388 * when data transmission begins" 3389 */ 3390 if (tp->t_rxtcur < TCPTV_RTOBASE3) 3391 tp->t_rxtcur = TCPTV_RTOBASE3; 3392 } 3393 } 3394 3395 /* 3396 * Returns TRUE, if the ACK should be dropped 3397 */ 3398 static boolean_t 3399 tcp_recv_dupack(struct tcpcb *tp, tcp_seq th_ack, const struct tcpopt *to) 3400 { 3401 boolean_t fast_sack_rexmt = TRUE; 3402 3403 tcpstat.tcps_rcvdupack++; 3404 3405 /* 3406 * We have outstanding data (other than a window probe), 3407 * this is a completely duplicate ack (ie, window info 3408 * didn't change), the ack is the biggest we've seen and 3409 * we've seen exactly our rexmt threshhold of them, so 3410 * assume a packet has been dropped and retransmit it. 3411 * Kludge snd_nxt & the congestion window so we send only 3412 * this one packet. 3413 */ 3414 if (IN_FASTRECOVERY(tp)) { 3415 if (TCP_DO_SACK(tp)) { 3416 boolean_t force = FALSE; 3417 3418 if (tp->snd_una == tp->rexmt_high && 3419 (to->to_flags & (TOF_SACK | TOF_SACK_REDUNDANT)) == 3420 TOF_SACK) { 3421 /* 3422 * New segments got SACKed and 3423 * no retransmit yet. 3424 */ 3425 force = TRUE; 3426 } 3427 3428 /* No artifical cwnd inflation. */ 3429 tcp_sack_rexmt(tp, force); 3430 } else { 3431 /* 3432 * Dup acks mean that packets have left 3433 * the network (they're now cached at the 3434 * receiver) so bump cwnd by the amount in 3435 * the receiver to keep a constant cwnd 3436 * packets in the network. 3437 */ 3438 tp->snd_cwnd += tp->t_maxseg; 3439 tcp_output(tp); 3440 } 3441 return TRUE; 3442 } else if (SEQ_LT(th_ack, tp->snd_recover)) { 3443 tp->t_dupacks = 0; 3444 return FALSE; 3445 } else if (tcp_ignore_redun_dsack && TCP_DO_SACK(tp) && 3446 (to->to_flags & (TOF_DSACK | TOF_SACK_REDUNDANT)) == 3447 (TOF_DSACK | TOF_SACK_REDUNDANT)) { 3448 /* 3449 * If the ACK carries DSACK and other SACK blocks 3450 * carry information that we have already known, 3451 * don't count this ACK as duplicate ACK. This 3452 * prevents spurious early retransmit and fast 3453 * retransmit. This also meets the requirement of 3454 * RFC3042 that new segments should not be sent if 3455 * the SACK blocks do not contain new information 3456 * (XXX we actually loosen the requirment that only 3457 * DSACK is checked here). 3458 * 3459 * This kind of ACKs are usually sent after spurious 3460 * retransmit. 3461 */ 3462 /* Do nothing; don't change t_dupacks */ 3463 return TRUE; 3464 } else if (tp->t_dupacks == 0 && TCP_DO_NCR(tp)) { 3465 tcp_ncr_update_rxtthresh(tp); 3466 } 3467 3468 if (++tp->t_dupacks == tp->t_rxtthresh) { 3469 tcp_seq old_snd_nxt; 3470 u_int win; 3471 3472 fastretransmit: 3473 if (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) { 3474 tcp_save_congestion_state(tp); 3475 tp->rxt_flags |= TRXT_F_FASTREXMT; 3476 } 3477 /* 3478 * We know we're losing at the current window size, 3479 * so do congestion avoidance: set ssthresh to half 3480 * the current window and pull our congestion window 3481 * back to the new ssthresh. 3482 */ 3483 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; 3484 if (win < 2) 3485 win = 2; 3486 tp->snd_ssthresh = win * tp->t_maxseg; 3487 ENTER_FASTRECOVERY(tp); 3488 tp->snd_recover = tp->snd_max; 3489 tcp_callout_stop(tp, tp->tt_rexmt); 3490 tp->t_rtttime = 0; 3491 old_snd_nxt = tp->snd_nxt; 3492 tp->snd_nxt = th_ack; 3493 tp->snd_cwnd = tp->t_maxseg; 3494 tcp_output(tp); 3495 ++tcpstat.tcps_sndfastrexmit; 3496 tp->snd_cwnd = tp->snd_ssthresh; 3497 tp->rexmt_high = tp->snd_nxt; 3498 tp->sack_flags &= ~TSACK_F_SACKRESCUED; 3499 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3500 tp->snd_nxt = old_snd_nxt; 3501 KASSERT(tp->snd_limited <= 2, ("tp->snd_limited too big")); 3502 if (TCP_DO_SACK(tp)) { 3503 if (fast_sack_rexmt) 3504 tcp_sack_rexmt(tp, FALSE); 3505 } else { 3506 tp->snd_cwnd += tp->t_maxseg * 3507 (tp->t_dupacks - tp->snd_limited); 3508 } 3509 } else if ((tcp_do_rfc3517bis && TCP_DO_SACK(tp)) || TCP_DO_NCR(tp)) { 3510 /* 3511 * The RFC3517bis recommends to reduce the byte threshold, 3512 * and enter fast retransmit if IsLost(snd_una). However, 3513 * if we use IsLost(snd_una) based fast retransmit here, 3514 * segments reordering will cause spurious retransmit. So 3515 * we defer the IsLost(snd_una) based fast retransmit until 3516 * the extended limited transmit can't send any segments and 3517 * early retransmit can't be done. 3518 */ 3519 if (tcp_rfc3517bis_rxt && tcp_do_rfc3517bis && 3520 tcp_sack_islost(&tp->scb, tp->snd_una)) 3521 goto fastretransmit; 3522 3523 if (tcp_do_limitedtransmit || TCP_DO_NCR(tp)) { 3524 if (!tcp_sack_limitedxmit(tp)) { 3525 /* outstanding data */ 3526 uint32_t ownd = tp->snd_max - tp->snd_una; 3527 3528 if (need_early_retransmit(tp, ownd)) { 3529 ++tcpstat.tcps_sndearlyrexmit; 3530 tp->rxt_flags |= TRXT_F_EARLYREXMT; 3531 goto fastretransmit; 3532 } else if (tcp_do_rfc3517bis && 3533 tcp_sack_islost(&tp->scb, tp->snd_una)) { 3534 fast_sack_rexmt = FALSE; 3535 goto fastretransmit; 3536 } 3537 } 3538 } 3539 } else if (tcp_do_limitedtransmit) { 3540 u_long oldcwnd = tp->snd_cwnd; 3541 tcp_seq oldsndmax = tp->snd_max; 3542 tcp_seq oldsndnxt = tp->snd_nxt; 3543 /* outstanding data */ 3544 uint32_t ownd = tp->snd_max - tp->snd_una; 3545 u_int sent; 3546 3547 KASSERT(tp->t_dupacks == 1 || tp->t_dupacks == 2, 3548 ("dupacks not 1 or 2")); 3549 if (tp->t_dupacks == 1) 3550 tp->snd_limited = 0; 3551 tp->snd_nxt = tp->snd_max; 3552 tp->snd_cwnd = ownd + 3553 (tp->t_dupacks - tp->snd_limited) * tp->t_maxseg; 3554 tcp_output(tp); 3555 3556 if (SEQ_LT(oldsndnxt, oldsndmax)) { 3557 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una), 3558 ("snd_una moved in other threads")); 3559 tp->snd_nxt = oldsndnxt; 3560 } 3561 tp->snd_cwnd = oldcwnd; 3562 sent = tp->snd_max - oldsndmax; 3563 if (sent > tp->t_maxseg) { 3564 KASSERT((tp->t_dupacks == 2 && tp->snd_limited == 0) || 3565 (sent == tp->t_maxseg + 1 && 3566 (tp->t_flags & TF_SENTFIN)), 3567 ("sent too much")); 3568 KASSERT(sent <= tp->t_maxseg * 2, 3569 ("sent too many segments")); 3570 tp->snd_limited = 2; 3571 tcpstat.tcps_sndlimited += 2; 3572 } else if (sent > 0) { 3573 ++tp->snd_limited; 3574 ++tcpstat.tcps_sndlimited; 3575 } else if (need_early_retransmit(tp, ownd)) { 3576 ++tcpstat.tcps_sndearlyrexmit; 3577 tp->rxt_flags |= TRXT_F_EARLYREXMT; 3578 goto fastretransmit; 3579 } 3580 } 3581 return TRUE; 3582 } 3583