1 /* 2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $ 68 * $DragonFly: src/sys/netinet/tcp_input.c,v 1.65 2007/04/04 06:13:26 dillon Exp $ 69 */ 70 71 #include "opt_ipfw.h" /* for ipfw_fwd */ 72 #include "opt_inet6.h" 73 #include "opt_ipsec.h" 74 #include "opt_tcpdebug.h" 75 #include "opt_tcp_input.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/malloc.h> 82 #include <sys/mbuf.h> 83 #include <sys/proc.h> /* for proc0 declaration */ 84 #include <sys/protosw.h> 85 #include <sys/socket.h> 86 #include <sys/socketvar.h> 87 #include <sys/syslog.h> 88 #include <sys/in_cksum.h> 89 90 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 91 #include <machine/stdarg.h> 92 93 #include <net/if.h> 94 #include <net/route.h> 95 96 #include <netinet/in.h> 97 #include <netinet/in_systm.h> 98 #include <netinet/ip.h> 99 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */ 100 #include <netinet/in_var.h> 101 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 102 #include <netinet/in_pcb.h> 103 #include <netinet/ip_var.h> 104 #include <netinet/ip6.h> 105 #include <netinet/icmp6.h> 106 #include <netinet6/nd6.h> 107 #include <netinet6/ip6_var.h> 108 #include <netinet6/in6_pcb.h> 109 #include <netinet/tcp.h> 110 #include <netinet/tcp_fsm.h> 111 #include <netinet/tcp_seq.h> 112 #include <netinet/tcp_timer.h> 113 #include <netinet/tcp_var.h> 114 #include <netinet6/tcp6_var.h> 115 #include <netinet/tcpip.h> 116 117 #ifdef TCPDEBUG 118 #include <netinet/tcp_debug.h> 119 120 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */ 121 struct tcphdr tcp_savetcp; 122 #endif 123 124 #ifdef FAST_IPSEC 125 #include <netproto/ipsec/ipsec.h> 126 #include <netproto/ipsec/ipsec6.h> 127 #endif 128 129 #ifdef IPSEC 130 #include <netinet6/ipsec.h> 131 #include <netinet6/ipsec6.h> 132 #include <netproto/key/key.h> 133 #endif 134 135 MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry"); 136 137 tcp_cc tcp_ccgen; 138 static int log_in_vain = 0; 139 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 140 &log_in_vain, 0, "Log all incoming TCP connections"); 141 142 static int blackhole = 0; 143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 144 &blackhole, 0, "Do not send RST when dropping refused connections"); 145 146 int tcp_delack_enabled = 1; 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 148 &tcp_delack_enabled, 0, 149 "Delay ACK to try and piggyback it onto a data packet"); 150 151 #ifdef TCP_DROP_SYNFIN 152 static int drop_synfin = 0; 153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 154 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 155 #endif 156 157 static int tcp_do_limitedtransmit = 1; 158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW, 159 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)"); 160 161 static int tcp_do_early_retransmit = 1; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW, 163 &tcp_do_early_retransmit, 0, "Early retransmit"); 164 165 int tcp_aggregate_acks = 1; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW, 167 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack"); 168 169 int tcp_do_rfc3390 = 1; 170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 171 &tcp_do_rfc3390, 0, 172 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 173 174 static int tcp_do_eifel_detect = 1; 175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW, 176 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)"); 177 178 static int tcp_do_abc = 1; 179 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW, 180 &tcp_do_abc, 0, 181 "TCP Appropriate Byte Counting (RFC 3465)"); 182 183 /* 184 * Define as tunable for easy testing with SACK on and off. 185 * Warning: do not change setting in the middle of an existing active TCP flow, 186 * else strange things might happen to that flow. 187 */ 188 int tcp_do_sack = 1; 189 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 190 &tcp_do_sack, 0, "Enable SACK Algorithms"); 191 192 int tcp_do_smartsack = 1; 193 SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW, 194 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms"); 195 196 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 197 "TCP Segment Reassembly Queue"); 198 199 int tcp_reass_maxseg = 0; 200 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD, 201 &tcp_reass_maxseg, 0, 202 "Global maximum number of TCP Segments in Reassembly Queue"); 203 204 int tcp_reass_qsize = 0; 205 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 206 &tcp_reass_qsize, 0, 207 "Global number of TCP Segments currently in Reassembly Queue"); 208 209 static int tcp_reass_overflows = 0; 210 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 211 &tcp_reass_overflows, 0, 212 "Global number of TCP Segment Reassembly Queue Overflows"); 213 214 static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t); 215 static void tcp_pulloutofband(struct socket *, 216 struct tcphdr *, struct mbuf *, int); 217 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 218 struct mbuf *); 219 static void tcp_xmit_timer(struct tcpcb *, int); 220 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int); 221 static void tcp_sack_rexmt(struct tcpcb *, struct tcphdr *); 222 223 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 224 #ifdef INET6 225 #define ND6_HINT(tp) \ 226 do { \ 227 if ((tp) && (tp)->t_inpcb && \ 228 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \ 229 (tp)->t_inpcb->in6p_route.ro_rt) \ 230 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \ 231 } while (0) 232 #else 233 #define ND6_HINT(tp) 234 #endif 235 236 /* 237 * Indicate whether this ack should be delayed. We can delay the ack if 238 * - delayed acks are enabled and 239 * - there is no delayed ack timer in progress and 240 * - our last ack wasn't a 0-sized window. We never want to delay 241 * the ack that opens up a 0-sized window. 242 */ 243 #define DELAY_ACK(tp) \ 244 (tcp_delack_enabled && !callout_pending(tp->tt_delack) && \ 245 !(tp->t_flags & TF_RXWIN0SENT)) 246 247 #define acceptable_window_update(tp, th, tiwin) \ 248 (SEQ_LT(tp->snd_wl1, th->th_seq) || \ 249 (tp->snd_wl1 == th->th_seq && \ 250 (SEQ_LT(tp->snd_wl2, th->th_ack) || \ 251 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) 252 253 static int 254 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 255 { 256 struct tseg_qent *q; 257 struct tseg_qent *p = NULL; 258 struct tseg_qent *te; 259 struct socket *so = tp->t_inpcb->inp_socket; 260 int flags; 261 262 /* 263 * Call with th == NULL after become established to 264 * force pre-ESTABLISHED data up to user socket. 265 */ 266 if (th == NULL) 267 goto present; 268 269 /* 270 * Limit the number of segments in the reassembly queue to prevent 271 * holding on to too many segments (and thus running out of mbufs). 272 * Make sure to let the missing segment through which caused this 273 * queue. Always keep one global queue entry spare to be able to 274 * process the missing segment. 275 */ 276 if (th->th_seq != tp->rcv_nxt && 277 tcp_reass_qsize + 1 >= tcp_reass_maxseg) { 278 tcp_reass_overflows++; 279 tcpstat.tcps_rcvmemdrop++; 280 m_freem(m); 281 /* no SACK block to report */ 282 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 283 return (0); 284 } 285 286 /* Allocate a new queue entry. */ 287 MALLOC(te, struct tseg_qent *, sizeof(struct tseg_qent), M_TSEGQ, 288 M_INTWAIT | M_NULLOK); 289 if (te == NULL) { 290 tcpstat.tcps_rcvmemdrop++; 291 m_freem(m); 292 /* no SACK block to report */ 293 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 294 return (0); 295 } 296 tcp_reass_qsize++; 297 298 /* 299 * Find a segment which begins after this one does. 300 */ 301 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 302 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 303 break; 304 p = q; 305 } 306 307 /* 308 * If there is a preceding segment, it may provide some of 309 * our data already. If so, drop the data from the incoming 310 * segment. If it provides all of our data, drop us. 311 */ 312 if (p != NULL) { 313 tcp_seq_diff_t i; 314 315 /* conversion to int (in i) handles seq wraparound */ 316 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 317 if (i > 0) { /* overlaps preceding segment */ 318 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG); 319 /* enclosing block starts w/ preceding segment */ 320 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 321 if (i >= *tlenp) { 322 /* preceding encloses incoming segment */ 323 tp->encloseblk.rblk_end = p->tqe_th->th_seq + 324 p->tqe_len; 325 tcpstat.tcps_rcvduppack++; 326 tcpstat.tcps_rcvdupbyte += *tlenp; 327 m_freem(m); 328 kfree(te, M_TSEGQ); 329 tcp_reass_qsize--; 330 /* 331 * Try to present any queued data 332 * at the left window edge to the user. 333 * This is needed after the 3-WHS 334 * completes. 335 */ 336 goto present; /* ??? */ 337 } 338 m_adj(m, i); 339 *tlenp -= i; 340 th->th_seq += i; 341 /* incoming segment end is enclosing block end */ 342 tp->encloseblk.rblk_end = th->th_seq + *tlenp + 343 ((th->th_flags & TH_FIN) != 0); 344 /* trim end of reported D-SACK block */ 345 tp->reportblk.rblk_end = th->th_seq; 346 } 347 } 348 tcpstat.tcps_rcvoopack++; 349 tcpstat.tcps_rcvoobyte += *tlenp; 350 351 /* 352 * While we overlap succeeding segments trim them or, 353 * if they are completely covered, dequeue them. 354 */ 355 while (q) { 356 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 357 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len; 358 struct tseg_qent *nq; 359 360 if (i <= 0) 361 break; 362 if (!(tp->t_flags & TF_DUPSEG)) { /* first time through */ 363 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG); 364 tp->encloseblk = tp->reportblk; 365 /* report trailing duplicate D-SACK segment */ 366 tp->reportblk.rblk_start = q->tqe_th->th_seq; 367 } 368 if ((tp->t_flags & TF_ENCLOSESEG) && 369 SEQ_GT(qend, tp->encloseblk.rblk_end)) { 370 /* extend enclosing block if one exists */ 371 tp->encloseblk.rblk_end = qend; 372 } 373 if (i < q->tqe_len) { 374 q->tqe_th->th_seq += i; 375 q->tqe_len -= i; 376 m_adj(q->tqe_m, i); 377 break; 378 } 379 380 nq = LIST_NEXT(q, tqe_q); 381 LIST_REMOVE(q, tqe_q); 382 m_freem(q->tqe_m); 383 kfree(q, M_TSEGQ); 384 tcp_reass_qsize--; 385 q = nq; 386 } 387 388 /* Insert the new segment queue entry into place. */ 389 te->tqe_m = m; 390 te->tqe_th = th; 391 te->tqe_len = *tlenp; 392 393 /* check if can coalesce with following segment */ 394 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) { 395 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len; 396 397 te->tqe_len += q->tqe_len; 398 if (q->tqe_th->th_flags & TH_FIN) 399 te->tqe_th->th_flags |= TH_FIN; 400 m_cat(te->tqe_m, q->tqe_m); 401 tp->encloseblk.rblk_end = tend; 402 /* 403 * When not reporting a duplicate segment, use 404 * the larger enclosing block as the SACK block. 405 */ 406 if (!(tp->t_flags & TF_DUPSEG)) 407 tp->reportblk.rblk_end = tend; 408 LIST_REMOVE(q, tqe_q); 409 kfree(q, M_TSEGQ); 410 tcp_reass_qsize--; 411 } 412 413 if (p == NULL) { 414 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 415 } else { 416 /* check if can coalesce with preceding segment */ 417 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) { 418 p->tqe_len += te->tqe_len; 419 m_cat(p->tqe_m, te->tqe_m); 420 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 421 /* 422 * When not reporting a duplicate segment, use 423 * the larger enclosing block as the SACK block. 424 */ 425 if (!(tp->t_flags & TF_DUPSEG)) 426 tp->reportblk.rblk_start = p->tqe_th->th_seq; 427 kfree(te, M_TSEGQ); 428 tcp_reass_qsize--; 429 } else 430 LIST_INSERT_AFTER(p, te, tqe_q); 431 } 432 433 present: 434 /* 435 * Present data to user, advancing rcv_nxt through 436 * completed sequence space. 437 */ 438 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 439 return (0); 440 q = LIST_FIRST(&tp->t_segq); 441 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt) 442 return (0); 443 tp->rcv_nxt += q->tqe_len; 444 if (!(tp->t_flags & TF_DUPSEG)) { 445 /* no SACK block to report since ACK advanced */ 446 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 447 } 448 /* no enclosing block to report since ACK advanced */ 449 tp->t_flags &= ~TF_ENCLOSESEG; 450 flags = q->tqe_th->th_flags & TH_FIN; 451 LIST_REMOVE(q, tqe_q); 452 KASSERT(LIST_EMPTY(&tp->t_segq) || 453 LIST_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt, 454 ("segment not coalesced")); 455 if (so->so_state & SS_CANTRCVMORE) 456 m_freem(q->tqe_m); 457 else 458 sbappendstream(&so->so_rcv, q->tqe_m); 459 kfree(q, M_TSEGQ); 460 tcp_reass_qsize--; 461 ND6_HINT(tp); 462 sorwakeup(so); 463 return (flags); 464 } 465 466 /* 467 * TCP input routine, follows pages 65-76 of the 468 * protocol specification dated September, 1981 very closely. 469 */ 470 #ifdef INET6 471 int 472 tcp6_input(struct mbuf **mp, int *offp, int proto) 473 { 474 struct mbuf *m = *mp; 475 struct in6_ifaddr *ia6; 476 477 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 478 479 /* 480 * draft-itojun-ipv6-tcp-to-anycast 481 * better place to put this in? 482 */ 483 ia6 = ip6_getdstifaddr(m); 484 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 485 struct ip6_hdr *ip6; 486 487 ip6 = mtod(m, struct ip6_hdr *); 488 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 489 offsetof(struct ip6_hdr, ip6_dst)); 490 return (IPPROTO_DONE); 491 } 492 493 tcp_input(m, *offp, proto); 494 return (IPPROTO_DONE); 495 } 496 #endif 497 498 void 499 tcp_input(struct mbuf *m, ...) 500 { 501 __va_list ap; 502 int off0, proto; 503 struct tcphdr *th; 504 struct ip *ip = NULL; 505 struct ipovly *ipov; 506 struct inpcb *inp = NULL; 507 u_char *optp = NULL; 508 int optlen = 0; 509 int len, tlen, off; 510 int drop_hdrlen; 511 struct tcpcb *tp = NULL; 512 int thflags; 513 struct socket *so = 0; 514 int todrop, acked; 515 boolean_t ourfinisacked, needoutput = FALSE; 516 u_long tiwin; 517 int recvwin; 518 struct tcpopt to; /* options in this segment */ 519 struct rmxp_tao *taop; /* pointer to our TAO cache entry */ 520 struct rmxp_tao tao_noncached; /* in case there's no cached entry */ 521 struct sockaddr_in *next_hop = NULL; 522 int rstreason; /* For badport_bandlim accounting purposes */ 523 int cpu; 524 struct ip6_hdr *ip6 = NULL; 525 #ifdef INET6 526 boolean_t isipv6; 527 #else 528 const boolean_t isipv6 = FALSE; 529 #endif 530 #ifdef TCPDEBUG 531 short ostate = 0; 532 #endif 533 534 __va_start(ap, m); 535 off0 = __va_arg(ap, int); 536 proto = __va_arg(ap, int); 537 __va_end(ap); 538 539 tcpstat.tcps_rcvtotal++; 540 541 /* Grab info from and strip MT_TAG mbufs prepended to the chain. */ 542 while (m->m_type == MT_TAG) { 543 if (m->_m_tag_id == PACKET_TAG_IPFORWARD) 544 next_hop = (struct sockaddr_in *)m->m_hdr.mh_data; 545 m = m->m_next; 546 } 547 548 #ifdef INET6 549 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE; 550 #endif 551 552 if (isipv6) { 553 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 554 ip6 = mtod(m, struct ip6_hdr *); 555 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0; 556 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 557 tcpstat.tcps_rcvbadsum++; 558 goto drop; 559 } 560 th = (struct tcphdr *)((caddr_t)ip6 + off0); 561 562 /* 563 * Be proactive about unspecified IPv6 address in source. 564 * As we use all-zero to indicate unbounded/unconnected pcb, 565 * unspecified IPv6 address can be used to confuse us. 566 * 567 * Note that packets with unspecified IPv6 destination is 568 * already dropped in ip6_input. 569 */ 570 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 571 /* XXX stat */ 572 goto drop; 573 } 574 } else { 575 /* 576 * Get IP and TCP header together in first mbuf. 577 * Note: IP leaves IP header in first mbuf. 578 */ 579 if (off0 > sizeof(struct ip)) { 580 ip_stripoptions(m); 581 off0 = sizeof(struct ip); 582 } 583 /* already checked and pulled up in ip_demux() */ 584 KASSERT(m->m_len >= sizeof(struct tcpiphdr), 585 ("TCP header not in one mbuf: m->m_len %d", m->m_len)); 586 ip = mtod(m, struct ip *); 587 ipov = (struct ipovly *)ip; 588 th = (struct tcphdr *)((caddr_t)ip + off0); 589 tlen = ip->ip_len; 590 591 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 592 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 593 th->th_sum = m->m_pkthdr.csum_data; 594 else 595 th->th_sum = in_pseudo(ip->ip_src.s_addr, 596 ip->ip_dst.s_addr, 597 htonl(m->m_pkthdr.csum_data + 598 ip->ip_len + 599 IPPROTO_TCP)); 600 th->th_sum ^= 0xffff; 601 } else { 602 /* 603 * Checksum extended TCP header and data. 604 */ 605 len = sizeof(struct ip) + tlen; 606 bzero(ipov->ih_x1, sizeof ipov->ih_x1); 607 ipov->ih_len = (u_short)tlen; 608 ipov->ih_len = htons(ipov->ih_len); 609 th->th_sum = in_cksum(m, len); 610 } 611 if (th->th_sum) { 612 tcpstat.tcps_rcvbadsum++; 613 goto drop; 614 } 615 #ifdef INET6 616 /* Re-initialization for later version check */ 617 ip->ip_v = IPVERSION; 618 #endif 619 } 620 621 /* 622 * Check that TCP offset makes sense, 623 * pull out TCP options and adjust length. XXX 624 */ 625 off = th->th_off << 2; 626 /* already checked and pulled up in ip_demux() */ 627 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen, 628 ("bad TCP data offset %d (tlen %d)", off, tlen)); 629 tlen -= off; /* tlen is used instead of ti->ti_len */ 630 if (off > sizeof(struct tcphdr)) { 631 if (isipv6) { 632 IP6_EXTHDR_CHECK(m, off0, off, ); 633 ip6 = mtod(m, struct ip6_hdr *); 634 th = (struct tcphdr *)((caddr_t)ip6 + off0); 635 } else { 636 /* already pulled up in ip_demux() */ 637 KASSERT(m->m_len >= sizeof(struct ip) + off, 638 ("TCP header and options not in one mbuf: " 639 "m_len %d, off %d", m->m_len, off)); 640 } 641 optlen = off - sizeof(struct tcphdr); 642 optp = (u_char *)(th + 1); 643 } 644 thflags = th->th_flags; 645 646 #ifdef TCP_DROP_SYNFIN 647 /* 648 * If the drop_synfin option is enabled, drop all packets with 649 * both the SYN and FIN bits set. This prevents e.g. nmap from 650 * identifying the TCP/IP stack. 651 * 652 * This is a violation of the TCP specification. 653 */ 654 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) 655 goto drop; 656 #endif 657 658 /* 659 * Convert TCP protocol specific fields to host format. 660 */ 661 th->th_seq = ntohl(th->th_seq); 662 th->th_ack = ntohl(th->th_ack); 663 th->th_win = ntohs(th->th_win); 664 th->th_urp = ntohs(th->th_urp); 665 666 /* 667 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options, 668 * until after ip6_savecontrol() is called and before other functions 669 * which don't want those proto headers. 670 * Because ip6_savecontrol() is going to parse the mbuf to 671 * search for data to be passed up to user-land, it wants mbuf 672 * parameters to be unchanged. 673 * XXX: the call of ip6_savecontrol() has been obsoleted based on 674 * latest version of the advanced API (20020110). 675 */ 676 drop_hdrlen = off0 + off; 677 678 /* 679 * Locate pcb for segment. 680 */ 681 findpcb: 682 /* IPFIREWALL_FORWARD section */ 683 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */ 684 /* 685 * Transparently forwarded. Pretend to be the destination. 686 * already got one like this? 687 */ 688 cpu = mycpu->gd_cpuid; 689 inp = in_pcblookup_hash(&tcbinfo[cpu], 690 ip->ip_src, th->th_sport, 691 ip->ip_dst, th->th_dport, 692 0, m->m_pkthdr.rcvif); 693 if (!inp) { 694 /* 695 * It's new. Try to find the ambushing socket. 696 */ 697 698 /* 699 * The rest of the ipfw code stores the port in 700 * host order. XXX 701 * (The IP address is still in network order.) 702 */ 703 in_port_t dport = next_hop->sin_port ? 704 htons(next_hop->sin_port) : 705 th->th_dport; 706 707 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport, 708 next_hop->sin_addr.s_addr, dport); 709 inp = in_pcblookup_hash(&tcbinfo[cpu], 710 ip->ip_src, th->th_sport, 711 next_hop->sin_addr, dport, 712 1, m->m_pkthdr.rcvif); 713 } 714 } else { 715 if (isipv6) { 716 inp = in6_pcblookup_hash(&tcbinfo[0], 717 &ip6->ip6_src, th->th_sport, 718 &ip6->ip6_dst, th->th_dport, 719 1, m->m_pkthdr.rcvif); 720 } else { 721 cpu = mycpu->gd_cpuid; 722 inp = in_pcblookup_hash(&tcbinfo[cpu], 723 ip->ip_src, th->th_sport, 724 ip->ip_dst, th->th_dport, 725 1, m->m_pkthdr.rcvif); 726 } 727 } 728 729 /* 730 * If the state is CLOSED (i.e., TCB does not exist) then 731 * all data in the incoming segment is discarded. 732 * If the TCB exists but is in CLOSED state, it is embryonic, 733 * but should either do a listen or a connect soon. 734 */ 735 if (inp == NULL) { 736 if (log_in_vain) { 737 #ifdef INET6 738 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 739 #else 740 char dbuf[sizeof "aaa.bbb.ccc.ddd"]; 741 char sbuf[sizeof "aaa.bbb.ccc.ddd"]; 742 #endif 743 if (isipv6) { 744 strcpy(dbuf, "["); 745 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst)); 746 strcat(dbuf, "]"); 747 strcpy(sbuf, "["); 748 strcat(sbuf, ip6_sprintf(&ip6->ip6_src)); 749 strcat(sbuf, "]"); 750 } else { 751 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 752 strcpy(sbuf, inet_ntoa(ip->ip_src)); 753 } 754 switch (log_in_vain) { 755 case 1: 756 if (!(thflags & TH_SYN)) 757 break; 758 case 2: 759 log(LOG_INFO, 760 "Connection attempt to TCP %s:%d " 761 "from %s:%d flags:0x%02x\n", 762 dbuf, ntohs(th->th_dport), sbuf, 763 ntohs(th->th_sport), thflags); 764 break; 765 default: 766 break; 767 } 768 } 769 if (blackhole) { 770 switch (blackhole) { 771 case 1: 772 if (thflags & TH_SYN) 773 goto drop; 774 break; 775 case 2: 776 goto drop; 777 default: 778 goto drop; 779 } 780 } 781 rstreason = BANDLIM_RST_CLOSEDPORT; 782 goto dropwithreset; 783 } 784 785 #ifdef IPSEC 786 if (isipv6) { 787 if (ipsec6_in_reject_so(m, inp->inp_socket)) { 788 ipsec6stat.in_polvio++; 789 goto drop; 790 } 791 } else { 792 if (ipsec4_in_reject_so(m, inp->inp_socket)) { 793 ipsecstat.in_polvio++; 794 goto drop; 795 } 796 } 797 #endif 798 #ifdef FAST_IPSEC 799 if (isipv6) { 800 if (ipsec6_in_reject(m, inp)) 801 goto drop; 802 } else { 803 if (ipsec4_in_reject(m, inp)) 804 goto drop; 805 } 806 #endif 807 /* Check the minimum TTL for socket. */ 808 #ifdef INET6 809 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl) 810 goto drop; 811 #endif 812 813 tp = intotcpcb(inp); 814 if (tp == NULL) { 815 rstreason = BANDLIM_RST_CLOSEDPORT; 816 goto dropwithreset; 817 } 818 if (tp->t_state <= TCPS_CLOSED) 819 goto drop; 820 821 /* Unscale the window into a 32-bit value. */ 822 if (!(thflags & TH_SYN)) 823 tiwin = th->th_win << tp->snd_scale; 824 else 825 tiwin = th->th_win; 826 827 so = inp->inp_socket; 828 829 #ifdef TCPDEBUG 830 if (so->so_options & SO_DEBUG) { 831 ostate = tp->t_state; 832 if (isipv6) 833 bcopy(ip6, tcp_saveipgen, sizeof(*ip6)); 834 else 835 bcopy(ip, tcp_saveipgen, sizeof(*ip)); 836 tcp_savetcp = *th; 837 } 838 #endif 839 840 bzero(&to, sizeof to); 841 842 if (so->so_options & SO_ACCEPTCONN) { 843 struct in_conninfo inc; 844 845 #ifdef INET6 846 inc.inc_isipv6 = (isipv6 == TRUE); 847 #endif 848 if (isipv6) { 849 inc.inc6_faddr = ip6->ip6_src; 850 inc.inc6_laddr = ip6->ip6_dst; 851 inc.inc6_route.ro_rt = NULL; /* XXX */ 852 } else { 853 inc.inc_faddr = ip->ip_src; 854 inc.inc_laddr = ip->ip_dst; 855 inc.inc_route.ro_rt = NULL; /* XXX */ 856 } 857 inc.inc_fport = th->th_sport; 858 inc.inc_lport = th->th_dport; 859 860 /* 861 * If the state is LISTEN then ignore segment if it contains 862 * a RST. If the segment contains an ACK then it is bad and 863 * send a RST. If it does not contain a SYN then it is not 864 * interesting; drop it. 865 * 866 * If the state is SYN_RECEIVED (syncache) and seg contains 867 * an ACK, but not for our SYN/ACK, send a RST. If the seg 868 * contains a RST, check the sequence number to see if it 869 * is a valid reset segment. 870 */ 871 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { 872 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) { 873 if (!syncache_expand(&inc, th, &so, m)) { 874 /* 875 * No syncache entry, or ACK was not 876 * for our SYN/ACK. Send a RST. 877 */ 878 tcpstat.tcps_badsyn++; 879 rstreason = BANDLIM_RST_OPENPORT; 880 goto dropwithreset; 881 } 882 if (so == NULL) 883 /* 884 * Could not complete 3-way handshake, 885 * connection is being closed down, and 886 * syncache will free mbuf. 887 */ 888 return; 889 /* 890 * Socket is created in state SYN_RECEIVED. 891 * Continue processing segment. 892 */ 893 inp = so->so_pcb; 894 tp = intotcpcb(inp); 895 /* 896 * This is what would have happened in 897 * tcp_output() when the SYN,ACK was sent. 898 */ 899 tp->snd_up = tp->snd_una; 900 tp->snd_max = tp->snd_nxt = tp->iss + 1; 901 tp->last_ack_sent = tp->rcv_nxt; 902 /* 903 * XXX possible bug - it doesn't appear that tp->snd_wnd is unscaled 904 * until the _second_ ACK is received: 905 * rcv SYN (set wscale opts) --> send SYN/ACK, set snd_wnd = window. 906 * rcv ACK, calculate tiwin --> process SYN_RECEIVED, determine wscale, 907 * move to ESTAB, set snd_wnd to tiwin. 908 */ 909 tp->snd_wnd = tiwin; /* unscaled */ 910 goto after_listen; 911 } 912 if (thflags & TH_RST) { 913 syncache_chkrst(&inc, th); 914 goto drop; 915 } 916 if (thflags & TH_ACK) { 917 syncache_badack(&inc); 918 tcpstat.tcps_badsyn++; 919 rstreason = BANDLIM_RST_OPENPORT; 920 goto dropwithreset; 921 } 922 goto drop; 923 } 924 925 /* 926 * Segment's flags are (SYN) or (SYN | FIN). 927 */ 928 #ifdef INET6 929 /* 930 * If deprecated address is forbidden, 931 * we do not accept SYN to deprecated interface 932 * address to prevent any new inbound connection from 933 * getting established. 934 * When we do not accept SYN, we send a TCP RST, 935 * with deprecated source address (instead of dropping 936 * it). We compromise it as it is much better for peer 937 * to send a RST, and RST will be the final packet 938 * for the exchange. 939 * 940 * If we do not forbid deprecated addresses, we accept 941 * the SYN packet. RFC2462 does not suggest dropping 942 * SYN in this case. 943 * If we decipher RFC2462 5.5.4, it says like this: 944 * 1. use of deprecated addr with existing 945 * communication is okay - "SHOULD continue to be 946 * used" 947 * 2. use of it with new communication: 948 * (2a) "SHOULD NOT be used if alternate address 949 * with sufficient scope is available" 950 * (2b) nothing mentioned otherwise. 951 * Here we fall into (2b) case as we have no choice in 952 * our source address selection - we must obey the peer. 953 * 954 * The wording in RFC2462 is confusing, and there are 955 * multiple description text for deprecated address 956 * handling - worse, they are not exactly the same. 957 * I believe 5.5.4 is the best one, so we follow 5.5.4. 958 */ 959 if (isipv6 && !ip6_use_deprecated) { 960 struct in6_ifaddr *ia6; 961 962 if ((ia6 = ip6_getdstifaddr(m)) && 963 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 964 tp = NULL; 965 rstreason = BANDLIM_RST_OPENPORT; 966 goto dropwithreset; 967 } 968 } 969 #endif 970 /* 971 * If it is from this socket, drop it, it must be forged. 972 * Don't bother responding if the destination was a broadcast. 973 */ 974 if (th->th_dport == th->th_sport) { 975 if (isipv6) { 976 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, 977 &ip6->ip6_src)) 978 goto drop; 979 } else { 980 if (ip->ip_dst.s_addr == ip->ip_src.s_addr) 981 goto drop; 982 } 983 } 984 /* 985 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN 986 * 987 * Note that it is quite possible to receive unicast 988 * link-layer packets with a broadcast IP address. Use 989 * in_broadcast() to find them. 990 */ 991 if (m->m_flags & (M_BCAST | M_MCAST)) 992 goto drop; 993 if (isipv6) { 994 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 995 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 996 goto drop; 997 } else { 998 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 999 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1000 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1001 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 1002 goto drop; 1003 } 1004 /* 1005 * SYN appears to be valid; create compressed TCP state 1006 * for syncache, or perform t/tcp connection. 1007 */ 1008 if (so->so_qlen <= so->so_qlimit) { 1009 tcp_dooptions(&to, optp, optlen, TRUE); 1010 if (!syncache_add(&inc, &to, th, &so, m)) 1011 goto drop; 1012 if (so == NULL) 1013 /* 1014 * Entry added to syncache, mbuf used to 1015 * send SYN,ACK packet. 1016 */ 1017 return; 1018 /* 1019 * Segment passed TAO tests. 1020 */ 1021 inp = so->so_pcb; 1022 tp = intotcpcb(inp); 1023 tp->snd_wnd = tiwin; 1024 tp->t_starttime = ticks; 1025 tp->t_state = TCPS_ESTABLISHED; 1026 1027 /* 1028 * If there is a FIN, or if there is data and the 1029 * connection is local, then delay SYN,ACK(SYN) in 1030 * the hope of piggy-backing it on a response 1031 * segment. Otherwise must send ACK now in case 1032 * the other side is slow starting. 1033 */ 1034 if (DELAY_ACK(tp) && 1035 ((thflags & TH_FIN) || 1036 (tlen != 0 && 1037 ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 1038 (!isipv6 && in_localaddr(inp->inp_faddr)))))) { 1039 callout_reset(tp->tt_delack, tcp_delacktime, 1040 tcp_timer_delack, tp); 1041 tp->t_flags |= TF_NEEDSYN; 1042 } else 1043 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1044 1045 tcpstat.tcps_connects++; 1046 soisconnected(so); 1047 goto trimthenstep6; 1048 } 1049 goto drop; 1050 } 1051 after_listen: 1052 1053 /* should not happen - syncache should pick up these connections */ 1054 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state")); 1055 1056 /* 1057 * Segment received on connection. 1058 * Reset idle time and keep-alive timer. 1059 */ 1060 tp->t_rcvtime = ticks; 1061 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1062 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp); 1063 1064 /* 1065 * Process options. 1066 * XXX this is tradtitional behavior, may need to be cleaned up. 1067 */ 1068 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0); 1069 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1070 if (to.to_flags & TOF_SCALE) { 1071 tp->t_flags |= TF_RCVD_SCALE; 1072 tp->requested_s_scale = to.to_requested_s_scale; 1073 } 1074 if (to.to_flags & TOF_TS) { 1075 tp->t_flags |= TF_RCVD_TSTMP; 1076 tp->ts_recent = to.to_tsval; 1077 tp->ts_recent_age = ticks; 1078 } 1079 if (to.to_flags & (TOF_CC | TOF_CCNEW)) 1080 tp->t_flags |= TF_RCVD_CC; 1081 if (to.to_flags & TOF_MSS) 1082 tcp_mss(tp, to.to_mss); 1083 /* 1084 * Only set the TF_SACK_PERMITTED per-connection flag 1085 * if we got a SACK_PERMITTED option from the other side 1086 * and the global tcp_do_sack variable is true. 1087 */ 1088 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED)) 1089 tp->t_flags |= TF_SACK_PERMITTED; 1090 } 1091 1092 /* 1093 * Header prediction: check for the two common cases 1094 * of a uni-directional data xfer. If the packet has 1095 * no control flags, is in-sequence, the window didn't 1096 * change and we're not retransmitting, it's a 1097 * candidate. If the length is zero and the ack moved 1098 * forward, we're the sender side of the xfer. Just 1099 * free the data acked & wake any higher level process 1100 * that was blocked waiting for space. If the length 1101 * is non-zero and the ack didn't move, we're the 1102 * receiver side. If we're getting packets in-order 1103 * (the reassembly queue is empty), add the data to 1104 * the socket buffer and note that we need a delayed ack. 1105 * Make sure that the hidden state-flags are also off. 1106 * Since we check for TCPS_ESTABLISHED above, it can only 1107 * be TH_NEEDSYN. 1108 */ 1109 if (tp->t_state == TCPS_ESTABLISHED && 1110 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1111 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && 1112 (!(to.to_flags & TOF_TS) || 1113 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1114 /* 1115 * Using the CC option is compulsory if once started: 1116 * the segment is OK if no T/TCP was negotiated or 1117 * if the segment has a CC option equal to CCrecv 1118 */ 1119 ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) != (TF_REQ_CC|TF_RCVD_CC) || 1120 ((to.to_flags & TOF_CC) && to.to_cc == tp->cc_recv)) && 1121 th->th_seq == tp->rcv_nxt && 1122 tp->snd_nxt == tp->snd_max) { 1123 1124 /* 1125 * If last ACK falls within this segment's sequence numbers, 1126 * record the timestamp. 1127 * NOTE that the test is modified according to the latest 1128 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1129 */ 1130 if ((to.to_flags & TOF_TS) && 1131 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1132 tp->ts_recent_age = ticks; 1133 tp->ts_recent = to.to_tsval; 1134 } 1135 1136 if (tlen == 0) { 1137 if (SEQ_GT(th->th_ack, tp->snd_una) && 1138 SEQ_LEQ(th->th_ack, tp->snd_max) && 1139 tp->snd_cwnd >= tp->snd_wnd && 1140 !IN_FASTRECOVERY(tp)) { 1141 /* 1142 * This is a pure ack for outstanding data. 1143 */ 1144 ++tcpstat.tcps_predack; 1145 /* 1146 * "bad retransmit" recovery 1147 * 1148 * If Eifel detection applies, then 1149 * it is deterministic, so use it 1150 * unconditionally over the old heuristic. 1151 * Otherwise, fall back to the old heuristic. 1152 */ 1153 if (tcp_do_eifel_detect && 1154 (to.to_flags & TOF_TS) && to.to_tsecr && 1155 (tp->t_flags & TF_FIRSTACCACK)) { 1156 /* Eifel detection applicable. */ 1157 if (to.to_tsecr < tp->t_rexmtTS) { 1158 tcp_revert_congestion_state(tp); 1159 ++tcpstat.tcps_eifeldetected; 1160 } 1161 } else if (tp->t_rxtshift == 1 && 1162 ticks < tp->t_badrxtwin) { 1163 tcp_revert_congestion_state(tp); 1164 ++tcpstat.tcps_rttdetected; 1165 } 1166 tp->t_flags &= ~(TF_FIRSTACCACK | 1167 TF_FASTREXMT | TF_EARLYREXMT); 1168 /* 1169 * Recalculate the retransmit timer / rtt. 1170 * 1171 * Some machines (certain windows boxes) 1172 * send broken timestamp replies during the 1173 * SYN+ACK phase, ignore timestamps of 0. 1174 */ 1175 if ((to.to_flags & TOF_TS) && to.to_tsecr) { 1176 tcp_xmit_timer(tp, 1177 ticks - to.to_tsecr + 1); 1178 } else if (tp->t_rtttime && 1179 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1180 tcp_xmit_timer(tp, 1181 ticks - tp->t_rtttime); 1182 } 1183 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1184 acked = th->th_ack - tp->snd_una; 1185 tcpstat.tcps_rcvackpack++; 1186 tcpstat.tcps_rcvackbyte += acked; 1187 sbdrop(&so->so_snd, acked); 1188 tp->snd_recover = th->th_ack - 1; 1189 tp->snd_una = th->th_ack; 1190 tp->t_dupacks = 0; 1191 /* 1192 * Update window information. 1193 */ 1194 if (tiwin != tp->snd_wnd && 1195 acceptable_window_update(tp, th, tiwin)) { 1196 /* keep track of pure window updates */ 1197 if (tp->snd_wl2 == th->th_ack && 1198 tiwin > tp->snd_wnd) 1199 tcpstat.tcps_rcvwinupd++; 1200 tp->snd_wnd = tiwin; 1201 tp->snd_wl1 = th->th_seq; 1202 tp->snd_wl2 = th->th_ack; 1203 if (tp->snd_wnd > tp->max_sndwnd) 1204 tp->max_sndwnd = tp->snd_wnd; 1205 } 1206 m_freem(m); 1207 ND6_HINT(tp); /* some progress has been done */ 1208 /* 1209 * If all outstanding data are acked, stop 1210 * retransmit timer, otherwise restart timer 1211 * using current (possibly backed-off) value. 1212 * If process is waiting for space, 1213 * wakeup/selwakeup/signal. If data 1214 * are ready to send, let tcp_output 1215 * decide between more output or persist. 1216 */ 1217 if (tp->snd_una == tp->snd_max) 1218 callout_stop(tp->tt_rexmt); 1219 else if (!callout_active(tp->tt_persist)) 1220 callout_reset(tp->tt_rexmt, 1221 tp->t_rxtcur, 1222 tcp_timer_rexmt, tp); 1223 sowwakeup(so); 1224 if (so->so_snd.sb_cc > 0) 1225 tcp_output(tp); 1226 return; 1227 } 1228 } else if (tiwin == tp->snd_wnd && 1229 th->th_ack == tp->snd_una && 1230 LIST_EMPTY(&tp->t_segq) && 1231 tlen <= sbspace(&so->so_rcv)) { 1232 /* 1233 * This is a pure, in-sequence data packet 1234 * with nothing on the reassembly queue and 1235 * we have enough buffer space to take it. 1236 */ 1237 ++tcpstat.tcps_preddat; 1238 tp->rcv_nxt += tlen; 1239 tcpstat.tcps_rcvpack++; 1240 tcpstat.tcps_rcvbyte += tlen; 1241 ND6_HINT(tp); /* some progress has been done */ 1242 /* 1243 * Add data to socket buffer. 1244 */ 1245 if (so->so_state & SS_CANTRCVMORE) { 1246 m_freem(m); 1247 } else { 1248 m_adj(m, drop_hdrlen); /* delayed header drop */ 1249 sbappendstream(&so->so_rcv, m); 1250 } 1251 sorwakeup(so); 1252 /* 1253 * This code is responsible for most of the ACKs 1254 * the TCP stack sends back after receiving a data 1255 * packet. Note that the DELAY_ACK check fails if 1256 * the delack timer is already running, which results 1257 * in an ack being sent every other packet (which is 1258 * what we want). 1259 * 1260 * We then further aggregate acks by not actually 1261 * sending one until the protocol thread has completed 1262 * processing the current backlog of packets. This 1263 * does not delay the ack any further, but allows us 1264 * to take advantage of the packet aggregation that 1265 * high speed NICs do (usually blocks of 8-10 packets) 1266 * to send a single ack rather then four or five acks, 1267 * greatly reducing the ack rate, the return channel 1268 * bandwidth, and the protocol overhead on both ends. 1269 * 1270 * Since this also has the effect of slowing down 1271 * the exponential slow-start ramp-up, systems with 1272 * very large bandwidth-delay products might want 1273 * to turn the feature off. 1274 */ 1275 if (DELAY_ACK(tp)) { 1276 callout_reset(tp->tt_delack, tcp_delacktime, 1277 tcp_timer_delack, tp); 1278 } else if (tcp_aggregate_acks) { 1279 tp->t_flags |= TF_ACKNOW; 1280 if (!(tp->t_flags & TF_ONOUTPUTQ)) { 1281 tp->t_flags |= TF_ONOUTPUTQ; 1282 tp->tt_cpu = mycpu->gd_cpuid; 1283 TAILQ_INSERT_TAIL( 1284 &tcpcbackq[tp->tt_cpu], 1285 tp, t_outputq); 1286 } 1287 } else { 1288 tp->t_flags |= TF_ACKNOW; 1289 tcp_output(tp); 1290 } 1291 return; 1292 } 1293 } 1294 1295 /* 1296 * Calculate amount of space in receive window, 1297 * and then do TCP input processing. 1298 * Receive window is amount of space in rcv queue, 1299 * but not less than advertised window. 1300 */ 1301 recvwin = sbspace(&so->so_rcv); 1302 if (recvwin < 0) 1303 recvwin = 0; 1304 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt)); 1305 1306 switch (tp->t_state) { 1307 /* 1308 * If the state is SYN_RECEIVED: 1309 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1310 */ 1311 case TCPS_SYN_RECEIVED: 1312 if ((thflags & TH_ACK) && 1313 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1314 SEQ_GT(th->th_ack, tp->snd_max))) { 1315 rstreason = BANDLIM_RST_OPENPORT; 1316 goto dropwithreset; 1317 } 1318 break; 1319 1320 /* 1321 * If the state is SYN_SENT: 1322 * if seg contains an ACK, but not for our SYN, drop the input. 1323 * if seg contains a RST, then drop the connection. 1324 * if seg does not contain SYN, then drop it. 1325 * Otherwise this is an acceptable SYN segment 1326 * initialize tp->rcv_nxt and tp->irs 1327 * if seg contains ack then advance tp->snd_una 1328 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1329 * arrange for segment to be acked (eventually) 1330 * continue processing rest of data/controls, beginning with URG 1331 */ 1332 case TCPS_SYN_SENT: 1333 if ((taop = tcp_gettaocache(&inp->inp_inc)) == NULL) { 1334 taop = &tao_noncached; 1335 bzero(taop, sizeof *taop); 1336 } 1337 1338 if ((thflags & TH_ACK) && 1339 (SEQ_LEQ(th->th_ack, tp->iss) || 1340 SEQ_GT(th->th_ack, tp->snd_max))) { 1341 /* 1342 * If we have a cached CCsent for the remote host, 1343 * hence we haven't just crashed and restarted, 1344 * do not send a RST. This may be a retransmission 1345 * from the other side after our earlier ACK was lost. 1346 * Our new SYN, when it arrives, will serve as the 1347 * needed ACK. 1348 */ 1349 if (taop->tao_ccsent != 0) 1350 goto drop; 1351 else { 1352 rstreason = BANDLIM_UNLIMITED; 1353 goto dropwithreset; 1354 } 1355 } 1356 if (thflags & TH_RST) { 1357 if (thflags & TH_ACK) 1358 tp = tcp_drop(tp, ECONNREFUSED); 1359 goto drop; 1360 } 1361 if (!(thflags & TH_SYN)) 1362 goto drop; 1363 tp->snd_wnd = th->th_win; /* initial send window */ 1364 tp->cc_recv = to.to_cc; /* foreign CC */ 1365 1366 tp->irs = th->th_seq; 1367 tcp_rcvseqinit(tp); 1368 if (thflags & TH_ACK) { 1369 /* 1370 * Our SYN was acked. If segment contains CC.ECHO 1371 * option, check it to make sure this segment really 1372 * matches our SYN. If not, just drop it as old 1373 * duplicate, but send an RST if we're still playing 1374 * by the old rules. If no CC.ECHO option, make sure 1375 * we don't get fooled into using T/TCP. 1376 */ 1377 if (to.to_flags & TOF_CCECHO) { 1378 if (tp->cc_send != to.to_ccecho) { 1379 if (taop->tao_ccsent != 0) 1380 goto drop; 1381 else { 1382 rstreason = BANDLIM_UNLIMITED; 1383 goto dropwithreset; 1384 } 1385 } 1386 } else 1387 tp->t_flags &= ~TF_RCVD_CC; 1388 tcpstat.tcps_connects++; 1389 soisconnected(so); 1390 /* Do window scaling on this connection? */ 1391 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1392 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 1393 tp->snd_scale = tp->requested_s_scale; 1394 tp->rcv_scale = tp->request_r_scale; 1395 } 1396 /* Segment is acceptable, update cache if undefined. */ 1397 if (taop->tao_ccsent == 0) 1398 taop->tao_ccsent = to.to_ccecho; 1399 1400 tp->rcv_adv += tp->rcv_wnd; 1401 tp->snd_una++; /* SYN is acked */ 1402 callout_stop(tp->tt_rexmt); 1403 /* 1404 * If there's data, delay ACK; if there's also a FIN 1405 * ACKNOW will be turned on later. 1406 */ 1407 if (DELAY_ACK(tp) && tlen != 0) 1408 callout_reset(tp->tt_delack, tcp_delacktime, 1409 tcp_timer_delack, tp); 1410 else 1411 tp->t_flags |= TF_ACKNOW; 1412 /* 1413 * Received <SYN,ACK> in SYN_SENT[*] state. 1414 * Transitions: 1415 * SYN_SENT --> ESTABLISHED 1416 * SYN_SENT* --> FIN_WAIT_1 1417 */ 1418 tp->t_starttime = ticks; 1419 if (tp->t_flags & TF_NEEDFIN) { 1420 tp->t_state = TCPS_FIN_WAIT_1; 1421 tp->t_flags &= ~TF_NEEDFIN; 1422 thflags &= ~TH_SYN; 1423 } else { 1424 tp->t_state = TCPS_ESTABLISHED; 1425 callout_reset(tp->tt_keep, tcp_keepidle, 1426 tcp_timer_keep, tp); 1427 } 1428 } else { 1429 /* 1430 * Received initial SYN in SYN-SENT[*] state => 1431 * simultaneous open. If segment contains CC option 1432 * and there is a cached CC, apply TAO test. 1433 * If it succeeds, connection is * half-synchronized. 1434 * Otherwise, do 3-way handshake: 1435 * SYN-SENT -> SYN-RECEIVED 1436 * SYN-SENT* -> SYN-RECEIVED* 1437 * If there was no CC option, clear cached CC value. 1438 */ 1439 tp->t_flags |= TF_ACKNOW; 1440 callout_stop(tp->tt_rexmt); 1441 if (to.to_flags & TOF_CC) { 1442 if (taop->tao_cc != 0 && 1443 CC_GT(to.to_cc, taop->tao_cc)) { 1444 /* 1445 * update cache and make transition: 1446 * SYN-SENT -> ESTABLISHED* 1447 * SYN-SENT* -> FIN-WAIT-1* 1448 */ 1449 taop->tao_cc = to.to_cc; 1450 tp->t_starttime = ticks; 1451 if (tp->t_flags & TF_NEEDFIN) { 1452 tp->t_state = TCPS_FIN_WAIT_1; 1453 tp->t_flags &= ~TF_NEEDFIN; 1454 } else { 1455 tp->t_state = TCPS_ESTABLISHED; 1456 callout_reset(tp->tt_keep, 1457 tcp_keepidle, 1458 tcp_timer_keep, 1459 tp); 1460 } 1461 tp->t_flags |= TF_NEEDSYN; 1462 } else 1463 tp->t_state = TCPS_SYN_RECEIVED; 1464 } else { 1465 /* CC.NEW or no option => invalidate cache */ 1466 taop->tao_cc = 0; 1467 tp->t_state = TCPS_SYN_RECEIVED; 1468 } 1469 } 1470 1471 trimthenstep6: 1472 /* 1473 * Advance th->th_seq to correspond to first data byte. 1474 * If data, trim to stay within window, 1475 * dropping FIN if necessary. 1476 */ 1477 th->th_seq++; 1478 if (tlen > tp->rcv_wnd) { 1479 todrop = tlen - tp->rcv_wnd; 1480 m_adj(m, -todrop); 1481 tlen = tp->rcv_wnd; 1482 thflags &= ~TH_FIN; 1483 tcpstat.tcps_rcvpackafterwin++; 1484 tcpstat.tcps_rcvbyteafterwin += todrop; 1485 } 1486 tp->snd_wl1 = th->th_seq - 1; 1487 tp->rcv_up = th->th_seq; 1488 /* 1489 * Client side of transaction: already sent SYN and data. 1490 * If the remote host used T/TCP to validate the SYN, 1491 * our data will be ACK'd; if so, enter normal data segment 1492 * processing in the middle of step 5, ack processing. 1493 * Otherwise, goto step 6. 1494 */ 1495 if (thflags & TH_ACK) 1496 goto process_ACK; 1497 1498 goto step6; 1499 1500 /* 1501 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1502 * if segment contains a SYN and CC [not CC.NEW] option: 1503 * if state == TIME_WAIT and connection duration > MSL, 1504 * drop packet and send RST; 1505 * 1506 * if SEG.CC > CCrecv then is new SYN, and can implicitly 1507 * ack the FIN (and data) in retransmission queue. 1508 * Complete close and delete TCPCB. Then reprocess 1509 * segment, hoping to find new TCPCB in LISTEN state; 1510 * 1511 * else must be old SYN; drop it. 1512 * else do normal processing. 1513 */ 1514 case TCPS_LAST_ACK: 1515 case TCPS_CLOSING: 1516 case TCPS_TIME_WAIT: 1517 if ((thflags & TH_SYN) && 1518 (to.to_flags & TOF_CC) && tp->cc_recv != 0) { 1519 if (tp->t_state == TCPS_TIME_WAIT && 1520 (ticks - tp->t_starttime) > tcp_msl) { 1521 rstreason = BANDLIM_UNLIMITED; 1522 goto dropwithreset; 1523 } 1524 if (CC_GT(to.to_cc, tp->cc_recv)) { 1525 tp = tcp_close(tp); 1526 goto findpcb; 1527 } 1528 else 1529 goto drop; 1530 } 1531 break; /* continue normal processing */ 1532 } 1533 1534 /* 1535 * States other than LISTEN or SYN_SENT. 1536 * First check the RST flag and sequence number since reset segments 1537 * are exempt from the timestamp and connection count tests. This 1538 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1539 * below which allowed reset segments in half the sequence space 1540 * to fall though and be processed (which gives forged reset 1541 * segments with a random sequence number a 50 percent chance of 1542 * killing a connection). 1543 * Then check timestamp, if present. 1544 * Then check the connection count, if present. 1545 * Then check that at least some bytes of segment are within 1546 * receive window. If segment begins before rcv_nxt, 1547 * drop leading data (and SYN); if nothing left, just ack. 1548 * 1549 * 1550 * If the RST bit is set, check the sequence number to see 1551 * if this is a valid reset segment. 1552 * RFC 793 page 37: 1553 * In all states except SYN-SENT, all reset (RST) segments 1554 * are validated by checking their SEQ-fields. A reset is 1555 * valid if its sequence number is in the window. 1556 * Note: this does not take into account delayed ACKs, so 1557 * we should test against last_ack_sent instead of rcv_nxt. 1558 * The sequence number in the reset segment is normally an 1559 * echo of our outgoing acknowledgement numbers, but some hosts 1560 * send a reset with the sequence number at the rightmost edge 1561 * of our receive window, and we have to handle this case. 1562 * If we have multiple segments in flight, the intial reset 1563 * segment sequence numbers will be to the left of last_ack_sent, 1564 * but they will eventually catch up. 1565 * In any case, it never made sense to trim reset segments to 1566 * fit the receive window since RFC 1122 says: 1567 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1568 * 1569 * A TCP SHOULD allow a received RST segment to include data. 1570 * 1571 * DISCUSSION 1572 * It has been suggested that a RST segment could contain 1573 * ASCII text that encoded and explained the cause of the 1574 * RST. No standard has yet been established for such 1575 * data. 1576 * 1577 * If the reset segment passes the sequence number test examine 1578 * the state: 1579 * SYN_RECEIVED STATE: 1580 * If passive open, return to LISTEN state. 1581 * If active open, inform user that connection was refused. 1582 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1583 * Inform user that connection was reset, and close tcb. 1584 * CLOSING, LAST_ACK STATES: 1585 * Close the tcb. 1586 * TIME_WAIT STATE: 1587 * Drop the segment - see Stevens, vol. 2, p. 964 and 1588 * RFC 1337. 1589 */ 1590 if (thflags & TH_RST) { 1591 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 1592 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1593 switch (tp->t_state) { 1594 1595 case TCPS_SYN_RECEIVED: 1596 so->so_error = ECONNREFUSED; 1597 goto close; 1598 1599 case TCPS_ESTABLISHED: 1600 case TCPS_FIN_WAIT_1: 1601 case TCPS_FIN_WAIT_2: 1602 case TCPS_CLOSE_WAIT: 1603 so->so_error = ECONNRESET; 1604 close: 1605 tp->t_state = TCPS_CLOSED; 1606 tcpstat.tcps_drops++; 1607 tp = tcp_close(tp); 1608 break; 1609 1610 case TCPS_CLOSING: 1611 case TCPS_LAST_ACK: 1612 tp = tcp_close(tp); 1613 break; 1614 1615 case TCPS_TIME_WAIT: 1616 break; 1617 } 1618 } 1619 goto drop; 1620 } 1621 1622 /* 1623 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1624 * and it's less than ts_recent, drop it. 1625 */ 1626 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 && 1627 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1628 1629 /* Check to see if ts_recent is over 24 days old. */ 1630 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1631 /* 1632 * Invalidate ts_recent. If this segment updates 1633 * ts_recent, the age will be reset later and ts_recent 1634 * will get a valid value. If it does not, setting 1635 * ts_recent to zero will at least satisfy the 1636 * requirement that zero be placed in the timestamp 1637 * echo reply when ts_recent isn't valid. The 1638 * age isn't reset until we get a valid ts_recent 1639 * because we don't want out-of-order segments to be 1640 * dropped when ts_recent is old. 1641 */ 1642 tp->ts_recent = 0; 1643 } else { 1644 tcpstat.tcps_rcvduppack++; 1645 tcpstat.tcps_rcvdupbyte += tlen; 1646 tcpstat.tcps_pawsdrop++; 1647 if (tlen) 1648 goto dropafterack; 1649 goto drop; 1650 } 1651 } 1652 1653 /* 1654 * T/TCP mechanism 1655 * If T/TCP was negotiated and the segment doesn't have CC, 1656 * or if its CC is wrong then drop the segment. 1657 * RST segments do not have to comply with this. 1658 */ 1659 if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) && 1660 (!(to.to_flags & TOF_CC) || tp->cc_recv != to.to_cc)) 1661 goto dropafterack; 1662 1663 /* 1664 * In the SYN-RECEIVED state, validate that the packet belongs to 1665 * this connection before trimming the data to fit the receive 1666 * window. Check the sequence number versus IRS since we know 1667 * the sequence numbers haven't wrapped. This is a partial fix 1668 * for the "LAND" DoS attack. 1669 */ 1670 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1671 rstreason = BANDLIM_RST_OPENPORT; 1672 goto dropwithreset; 1673 } 1674 1675 todrop = tp->rcv_nxt - th->th_seq; 1676 if (todrop > 0) { 1677 if (TCP_DO_SACK(tp)) { 1678 /* Report duplicate segment at head of packet. */ 1679 tp->reportblk.rblk_start = th->th_seq; 1680 tp->reportblk.rblk_end = th->th_seq + tlen; 1681 if (thflags & TH_FIN) 1682 ++tp->reportblk.rblk_end; 1683 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt)) 1684 tp->reportblk.rblk_end = tp->rcv_nxt; 1685 tp->t_flags |= (TF_DUPSEG | TF_SACKLEFT | TF_ACKNOW); 1686 } 1687 if (thflags & TH_SYN) { 1688 thflags &= ~TH_SYN; 1689 th->th_seq++; 1690 if (th->th_urp > 1) 1691 th->th_urp--; 1692 else 1693 thflags &= ~TH_URG; 1694 todrop--; 1695 } 1696 /* 1697 * Following if statement from Stevens, vol. 2, p. 960. 1698 */ 1699 if (todrop > tlen || 1700 (todrop == tlen && !(thflags & TH_FIN))) { 1701 /* 1702 * Any valid FIN must be to the left of the window. 1703 * At this point the FIN must be a duplicate or out 1704 * of sequence; drop it. 1705 */ 1706 thflags &= ~TH_FIN; 1707 1708 /* 1709 * Send an ACK to resynchronize and drop any data. 1710 * But keep on processing for RST or ACK. 1711 */ 1712 tp->t_flags |= TF_ACKNOW; 1713 todrop = tlen; 1714 tcpstat.tcps_rcvduppack++; 1715 tcpstat.tcps_rcvdupbyte += todrop; 1716 } else { 1717 tcpstat.tcps_rcvpartduppack++; 1718 tcpstat.tcps_rcvpartdupbyte += todrop; 1719 } 1720 drop_hdrlen += todrop; /* drop from the top afterwards */ 1721 th->th_seq += todrop; 1722 tlen -= todrop; 1723 if (th->th_urp > todrop) 1724 th->th_urp -= todrop; 1725 else { 1726 thflags &= ~TH_URG; 1727 th->th_urp = 0; 1728 } 1729 } 1730 1731 /* 1732 * If new data are received on a connection after the 1733 * user processes are gone, then RST the other end. 1734 */ 1735 if ((so->so_state & SS_NOFDREF) && 1736 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1737 tp = tcp_close(tp); 1738 tcpstat.tcps_rcvafterclose++; 1739 rstreason = BANDLIM_UNLIMITED; 1740 goto dropwithreset; 1741 } 1742 1743 /* 1744 * If segment ends after window, drop trailing data 1745 * (and PUSH and FIN); if nothing left, just ACK. 1746 */ 1747 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1748 if (todrop > 0) { 1749 tcpstat.tcps_rcvpackafterwin++; 1750 if (todrop >= tlen) { 1751 tcpstat.tcps_rcvbyteafterwin += tlen; 1752 /* 1753 * If a new connection request is received 1754 * while in TIME_WAIT, drop the old connection 1755 * and start over if the sequence numbers 1756 * are above the previous ones. 1757 */ 1758 if (thflags & TH_SYN && 1759 tp->t_state == TCPS_TIME_WAIT && 1760 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1761 tp = tcp_close(tp); 1762 goto findpcb; 1763 } 1764 /* 1765 * If window is closed can only take segments at 1766 * window edge, and have to drop data and PUSH from 1767 * incoming segments. Continue processing, but 1768 * remember to ack. Otherwise, drop segment 1769 * and ack. 1770 */ 1771 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1772 tp->t_flags |= TF_ACKNOW; 1773 tcpstat.tcps_rcvwinprobe++; 1774 } else 1775 goto dropafterack; 1776 } else 1777 tcpstat.tcps_rcvbyteafterwin += todrop; 1778 m_adj(m, -todrop); 1779 tlen -= todrop; 1780 thflags &= ~(TH_PUSH | TH_FIN); 1781 } 1782 1783 /* 1784 * If last ACK falls within this segment's sequence numbers, 1785 * record its timestamp. 1786 * NOTE: 1787 * 1) That the test incorporates suggestions from the latest 1788 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1789 * 2) That updating only on newer timestamps interferes with 1790 * our earlier PAWS tests, so this check should be solely 1791 * predicated on the sequence space of this segment. 1792 * 3) That we modify the segment boundary check to be 1793 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN 1794 * instead of RFC1323's 1795 * Last.ACK.Sent < SEG.SEQ + SEG.LEN, 1796 * This modified check allows us to overcome RFC1323's 1797 * limitations as described in Stevens TCP/IP Illustrated 1798 * Vol. 2 p.869. In such cases, we can still calculate the 1799 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1800 */ 1801 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1802 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen 1803 + ((thflags & TH_SYN) != 0) 1804 + ((thflags & TH_FIN) != 0)))) { 1805 tp->ts_recent_age = ticks; 1806 tp->ts_recent = to.to_tsval; 1807 } 1808 1809 /* 1810 * If a SYN is in the window, then this is an 1811 * error and we send an RST and drop the connection. 1812 */ 1813 if (thflags & TH_SYN) { 1814 tp = tcp_drop(tp, ECONNRESET); 1815 rstreason = BANDLIM_UNLIMITED; 1816 goto dropwithreset; 1817 } 1818 1819 /* 1820 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1821 * flag is on (half-synchronized state), then queue data for 1822 * later processing; else drop segment and return. 1823 */ 1824 if (!(thflags & TH_ACK)) { 1825 if (tp->t_state == TCPS_SYN_RECEIVED || 1826 (tp->t_flags & TF_NEEDSYN)) 1827 goto step6; 1828 else 1829 goto drop; 1830 } 1831 1832 /* 1833 * Ack processing. 1834 */ 1835 switch (tp->t_state) { 1836 /* 1837 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter 1838 * ESTABLISHED state and continue processing. 1839 * The ACK was checked above. 1840 */ 1841 case TCPS_SYN_RECEIVED: 1842 1843 tcpstat.tcps_connects++; 1844 soisconnected(so); 1845 /* Do window scaling? */ 1846 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1847 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 1848 tp->snd_scale = tp->requested_s_scale; 1849 tp->rcv_scale = tp->request_r_scale; 1850 } 1851 /* 1852 * Upon successful completion of 3-way handshake, 1853 * update cache.CC if it was undefined, pass any queued 1854 * data to the user, and advance state appropriately. 1855 */ 1856 if ((taop = tcp_gettaocache(&inp->inp_inc)) != NULL && 1857 taop->tao_cc == 0) 1858 taop->tao_cc = tp->cc_recv; 1859 1860 /* 1861 * Make transitions: 1862 * SYN-RECEIVED -> ESTABLISHED 1863 * SYN-RECEIVED* -> FIN-WAIT-1 1864 */ 1865 tp->t_starttime = ticks; 1866 if (tp->t_flags & TF_NEEDFIN) { 1867 tp->t_state = TCPS_FIN_WAIT_1; 1868 tp->t_flags &= ~TF_NEEDFIN; 1869 } else { 1870 tp->t_state = TCPS_ESTABLISHED; 1871 callout_reset(tp->tt_keep, tcp_keepidle, 1872 tcp_timer_keep, tp); 1873 } 1874 /* 1875 * If segment contains data or ACK, will call tcp_reass() 1876 * later; if not, do so now to pass queued data to user. 1877 */ 1878 if (tlen == 0 && !(thflags & TH_FIN)) 1879 tcp_reass(tp, NULL, NULL, NULL); 1880 /* fall into ... */ 1881 1882 /* 1883 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1884 * ACKs. If the ack is in the range 1885 * tp->snd_una < th->th_ack <= tp->snd_max 1886 * then advance tp->snd_una to th->th_ack and drop 1887 * data from the retransmission queue. If this ACK reflects 1888 * more up to date window information we update our window information. 1889 */ 1890 case TCPS_ESTABLISHED: 1891 case TCPS_FIN_WAIT_1: 1892 case TCPS_FIN_WAIT_2: 1893 case TCPS_CLOSE_WAIT: 1894 case TCPS_CLOSING: 1895 case TCPS_LAST_ACK: 1896 case TCPS_TIME_WAIT: 1897 1898 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1899 if (TCP_DO_SACK(tp)) 1900 tcp_sack_update_scoreboard(tp, &to); 1901 if (tlen != 0 || tiwin != tp->snd_wnd) { 1902 tp->t_dupacks = 0; 1903 break; 1904 } 1905 tcpstat.tcps_rcvdupack++; 1906 if (!callout_active(tp->tt_rexmt) || 1907 th->th_ack != tp->snd_una) { 1908 tp->t_dupacks = 0; 1909 break; 1910 } 1911 /* 1912 * We have outstanding data (other than 1913 * a window probe), this is a completely 1914 * duplicate ack (ie, window info didn't 1915 * change), the ack is the biggest we've 1916 * seen and we've seen exactly our rexmt 1917 * threshhold of them, so assume a packet 1918 * has been dropped and retransmit it. 1919 * Kludge snd_nxt & the congestion 1920 * window so we send only this one 1921 * packet. 1922 */ 1923 if (IN_FASTRECOVERY(tp)) { 1924 if (TCP_DO_SACK(tp)) { 1925 /* No artifical cwnd inflation. */ 1926 tcp_sack_rexmt(tp, th); 1927 } else { 1928 /* 1929 * Dup acks mean that packets 1930 * have left the network 1931 * (they're now cached at the 1932 * receiver) so bump cwnd by 1933 * the amount in the receiver 1934 * to keep a constant cwnd 1935 * packets in the network. 1936 */ 1937 tp->snd_cwnd += tp->t_maxseg; 1938 tcp_output(tp); 1939 } 1940 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) { 1941 tp->t_dupacks = 0; 1942 break; 1943 } else if (++tp->t_dupacks == tcprexmtthresh) { 1944 tcp_seq old_snd_nxt; 1945 u_int win; 1946 1947 fastretransmit: 1948 if (tcp_do_eifel_detect && 1949 (tp->t_flags & TF_RCVD_TSTMP)) { 1950 tcp_save_congestion_state(tp); 1951 tp->t_flags |= TF_FASTREXMT; 1952 } 1953 /* 1954 * We know we're losing at the current 1955 * window size, so do congestion avoidance: 1956 * set ssthresh to half the current window 1957 * and pull our congestion window back to the 1958 * new ssthresh. 1959 */ 1960 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1961 tp->t_maxseg; 1962 if (win < 2) 1963 win = 2; 1964 tp->snd_ssthresh = win * tp->t_maxseg; 1965 ENTER_FASTRECOVERY(tp); 1966 tp->snd_recover = tp->snd_max; 1967 callout_stop(tp->tt_rexmt); 1968 tp->t_rtttime = 0; 1969 old_snd_nxt = tp->snd_nxt; 1970 tp->snd_nxt = th->th_ack; 1971 tp->snd_cwnd = tp->t_maxseg; 1972 tcp_output(tp); 1973 ++tcpstat.tcps_sndfastrexmit; 1974 tp->snd_cwnd = tp->snd_ssthresh; 1975 tp->rexmt_high = tp->snd_nxt; 1976 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 1977 tp->snd_nxt = old_snd_nxt; 1978 KASSERT(tp->snd_limited <= 2, 1979 ("tp->snd_limited too big")); 1980 if (TCP_DO_SACK(tp)) 1981 tcp_sack_rexmt(tp, th); 1982 else 1983 tp->snd_cwnd += tp->t_maxseg * 1984 (tp->t_dupacks - tp->snd_limited); 1985 } else if (tcp_do_limitedtransmit) { 1986 u_long oldcwnd = tp->snd_cwnd; 1987 tcp_seq oldsndmax = tp->snd_max; 1988 tcp_seq oldsndnxt = tp->snd_nxt; 1989 /* outstanding data */ 1990 uint32_t ownd = tp->snd_max - tp->snd_una; 1991 u_int sent; 1992 1993 #define iceildiv(n, d) (((n)+(d)-1) / (d)) 1994 1995 KASSERT(tp->t_dupacks == 1 || 1996 tp->t_dupacks == 2, 1997 ("dupacks not 1 or 2")); 1998 if (tp->t_dupacks == 1) 1999 tp->snd_limited = 0; 2000 tp->snd_nxt = tp->snd_max; 2001 tp->snd_cwnd = ownd + 2002 (tp->t_dupacks - tp->snd_limited) * 2003 tp->t_maxseg; 2004 tcp_output(tp); 2005 if (SEQ_LT(oldsndnxt, oldsndmax)) 2006 tp->snd_nxt = oldsndnxt; 2007 tp->snd_cwnd = oldcwnd; 2008 sent = tp->snd_max - oldsndmax; 2009 if (sent > tp->t_maxseg) { 2010 KASSERT((tp->t_dupacks == 2 && 2011 tp->snd_limited == 0) || 2012 (sent == tp->t_maxseg + 1 && 2013 tp->t_flags & TF_SENTFIN), 2014 ("sent too much")); 2015 KASSERT(sent <= tp->t_maxseg * 2, 2016 ("sent too many segments")); 2017 tp->snd_limited = 2; 2018 tcpstat.tcps_sndlimited += 2; 2019 } else if (sent > 0) { 2020 ++tp->snd_limited; 2021 ++tcpstat.tcps_sndlimited; 2022 } else if (tcp_do_early_retransmit && 2023 (tcp_do_eifel_detect && 2024 (tp->t_flags & TF_RCVD_TSTMP)) && 2025 ownd < 4 * tp->t_maxseg && 2026 tp->t_dupacks + 1 >= 2027 iceildiv(ownd, tp->t_maxseg) && 2028 (!TCP_DO_SACK(tp) || 2029 ownd <= tp->t_maxseg || 2030 tcp_sack_has_sacked(&tp->scb, 2031 ownd - tp->t_maxseg))) { 2032 ++tcpstat.tcps_sndearlyrexmit; 2033 tp->t_flags |= TF_EARLYREXMT; 2034 goto fastretransmit; 2035 } 2036 } 2037 goto drop; 2038 } 2039 2040 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una")); 2041 tp->t_dupacks = 0; 2042 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2043 /* 2044 * Detected optimistic ACK attack. 2045 * Force slow-start to de-synchronize attack. 2046 */ 2047 tp->snd_cwnd = tp->t_maxseg; 2048 tp->snd_wacked = 0; 2049 2050 tcpstat.tcps_rcvacktoomuch++; 2051 goto dropafterack; 2052 } 2053 /* 2054 * If we reach this point, ACK is not a duplicate, 2055 * i.e., it ACKs something we sent. 2056 */ 2057 if (tp->t_flags & TF_NEEDSYN) { 2058 /* 2059 * T/TCP: Connection was half-synchronized, and our 2060 * SYN has been ACK'd (so connection is now fully 2061 * synchronized). Go to non-starred state, 2062 * increment snd_una for ACK of SYN, and check if 2063 * we can do window scaling. 2064 */ 2065 tp->t_flags &= ~TF_NEEDSYN; 2066 tp->snd_una++; 2067 /* Do window scaling? */ 2068 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 2069 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 2070 tp->snd_scale = tp->requested_s_scale; 2071 tp->rcv_scale = tp->request_r_scale; 2072 } 2073 } 2074 2075 process_ACK: 2076 acked = th->th_ack - tp->snd_una; 2077 tcpstat.tcps_rcvackpack++; 2078 tcpstat.tcps_rcvackbyte += acked; 2079 2080 if (tcp_do_eifel_detect && acked > 0 && 2081 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) && 2082 (tp->t_flags & TF_FIRSTACCACK)) { 2083 /* Eifel detection applicable. */ 2084 if (to.to_tsecr < tp->t_rexmtTS) { 2085 ++tcpstat.tcps_eifeldetected; 2086 tcp_revert_congestion_state(tp); 2087 if (tp->t_rxtshift == 1 && 2088 ticks >= tp->t_badrxtwin) 2089 ++tcpstat.tcps_rttcantdetect; 2090 } 2091 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2092 /* 2093 * If we just performed our first retransmit, 2094 * and the ACK arrives within our recovery window, 2095 * then it was a mistake to do the retransmit 2096 * in the first place. Recover our original cwnd 2097 * and ssthresh, and proceed to transmit where we 2098 * left off. 2099 */ 2100 tcp_revert_congestion_state(tp); 2101 ++tcpstat.tcps_rttdetected; 2102 } 2103 2104 /* 2105 * If we have a timestamp reply, update smoothed 2106 * round trip time. If no timestamp is present but 2107 * transmit timer is running and timed sequence 2108 * number was acked, update smoothed round trip time. 2109 * Since we now have an rtt measurement, cancel the 2110 * timer backoff (cf., Phil Karn's retransmit alg.). 2111 * Recompute the initial retransmit timer. 2112 * 2113 * Some machines (certain windows boxes) send broken 2114 * timestamp replies during the SYN+ACK phase, ignore 2115 * timestamps of 0. 2116 */ 2117 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) 2118 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2119 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) 2120 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2121 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2122 2123 /* 2124 * If no data (only SYN) was ACK'd, 2125 * skip rest of ACK processing. 2126 */ 2127 if (acked == 0) 2128 goto step6; 2129 2130 /* Stop looking for an acceptable ACK since one was received. */ 2131 tp->t_flags &= ~(TF_FIRSTACCACK | TF_FASTREXMT | TF_EARLYREXMT); 2132 2133 if (acked > so->so_snd.sb_cc) { 2134 tp->snd_wnd -= so->so_snd.sb_cc; 2135 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc); 2136 ourfinisacked = TRUE; 2137 } else { 2138 sbdrop(&so->so_snd, acked); 2139 tp->snd_wnd -= acked; 2140 ourfinisacked = FALSE; 2141 } 2142 sowwakeup(so); 2143 2144 /* 2145 * Update window information. 2146 * Don't look at window if no ACK: 2147 * TAC's send garbage on first SYN. 2148 */ 2149 if (SEQ_LT(tp->snd_wl1, th->th_seq) || 2150 (tp->snd_wl1 == th->th_seq && 2151 (SEQ_LT(tp->snd_wl2, th->th_ack) || 2152 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) { 2153 /* keep track of pure window updates */ 2154 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2155 tiwin > tp->snd_wnd) 2156 tcpstat.tcps_rcvwinupd++; 2157 tp->snd_wnd = tiwin; 2158 tp->snd_wl1 = th->th_seq; 2159 tp->snd_wl2 = th->th_ack; 2160 if (tp->snd_wnd > tp->max_sndwnd) 2161 tp->max_sndwnd = tp->snd_wnd; 2162 needoutput = TRUE; 2163 } 2164 2165 tp->snd_una = th->th_ack; 2166 if (TCP_DO_SACK(tp)) 2167 tcp_sack_update_scoreboard(tp, &to); 2168 if (IN_FASTRECOVERY(tp)) { 2169 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2170 EXIT_FASTRECOVERY(tp); 2171 needoutput = TRUE; 2172 /* 2173 * If the congestion window was inflated 2174 * to account for the other side's 2175 * cached packets, retract it. 2176 */ 2177 if (!TCP_DO_SACK(tp)) 2178 tp->snd_cwnd = tp->snd_ssthresh; 2179 2180 /* 2181 * Window inflation should have left us 2182 * with approximately snd_ssthresh outstanding 2183 * data. But, in case we would be inclined 2184 * to send a burst, better do it using 2185 * slow start. 2186 */ 2187 if (SEQ_GT(th->th_ack + tp->snd_cwnd, 2188 tp->snd_max + 2 * tp->t_maxseg)) 2189 tp->snd_cwnd = 2190 (tp->snd_max - tp->snd_una) + 2191 2 * tp->t_maxseg; 2192 2193 tp->snd_wacked = 0; 2194 } else { 2195 if (TCP_DO_SACK(tp)) { 2196 tp->snd_max_rexmt = tp->snd_max; 2197 tcp_sack_rexmt(tp, th); 2198 } else { 2199 tcp_newreno_partial_ack(tp, th, acked); 2200 } 2201 needoutput = FALSE; 2202 } 2203 } else { 2204 /* 2205 * Open the congestion window. When in slow-start, 2206 * open exponentially: maxseg per packet. Otherwise, 2207 * open linearly: maxseg per window. 2208 */ 2209 if (tp->snd_cwnd <= tp->snd_ssthresh) { 2210 u_int abc_sslimit = 2211 (SEQ_LT(tp->snd_nxt, tp->snd_max) ? 2212 tp->t_maxseg : 2 * tp->t_maxseg); 2213 2214 /* slow-start */ 2215 tp->snd_cwnd += tcp_do_abc ? 2216 min(acked, abc_sslimit) : tp->t_maxseg; 2217 } else { 2218 /* linear increase */ 2219 tp->snd_wacked += tcp_do_abc ? acked : 2220 tp->t_maxseg; 2221 if (tp->snd_wacked >= tp->snd_cwnd) { 2222 tp->snd_wacked -= tp->snd_cwnd; 2223 tp->snd_cwnd += tp->t_maxseg; 2224 } 2225 } 2226 tp->snd_cwnd = min(tp->snd_cwnd, 2227 TCP_MAXWIN << tp->snd_scale); 2228 tp->snd_recover = th->th_ack - 1; 2229 } 2230 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2231 tp->snd_nxt = tp->snd_una; 2232 2233 /* 2234 * If all outstanding data is acked, stop retransmit 2235 * timer and remember to restart (more output or persist). 2236 * If there is more data to be acked, restart retransmit 2237 * timer, using current (possibly backed-off) value. 2238 */ 2239 if (th->th_ack == tp->snd_max) { 2240 callout_stop(tp->tt_rexmt); 2241 needoutput = TRUE; 2242 } else if (!callout_active(tp->tt_persist)) 2243 callout_reset(tp->tt_rexmt, tp->t_rxtcur, 2244 tcp_timer_rexmt, tp); 2245 2246 switch (tp->t_state) { 2247 /* 2248 * In FIN_WAIT_1 STATE in addition to the processing 2249 * for the ESTABLISHED state if our FIN is now acknowledged 2250 * then enter FIN_WAIT_2. 2251 */ 2252 case TCPS_FIN_WAIT_1: 2253 if (ourfinisacked) { 2254 /* 2255 * If we can't receive any more 2256 * data, then closing user can proceed. 2257 * Starting the timer is contrary to the 2258 * specification, but if we don't get a FIN 2259 * we'll hang forever. 2260 */ 2261 if (so->so_state & SS_CANTRCVMORE) { 2262 soisdisconnected(so); 2263 callout_reset(tp->tt_2msl, tcp_maxidle, 2264 tcp_timer_2msl, tp); 2265 } 2266 tp->t_state = TCPS_FIN_WAIT_2; 2267 } 2268 break; 2269 2270 /* 2271 * In CLOSING STATE in addition to the processing for 2272 * the ESTABLISHED state if the ACK acknowledges our FIN 2273 * then enter the TIME-WAIT state, otherwise ignore 2274 * the segment. 2275 */ 2276 case TCPS_CLOSING: 2277 if (ourfinisacked) { 2278 tp->t_state = TCPS_TIME_WAIT; 2279 tcp_canceltimers(tp); 2280 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 2281 if (tp->cc_recv != 0 && 2282 (ticks - tp->t_starttime) < tcp_msl) 2283 callout_reset(tp->tt_2msl, 2284 tp->t_rxtcur * TCPTV_TWTRUNC, 2285 tcp_timer_2msl, tp); 2286 else 2287 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2288 tcp_timer_2msl, tp); 2289 soisdisconnected(so); 2290 } 2291 break; 2292 2293 /* 2294 * In LAST_ACK, we may still be waiting for data to drain 2295 * and/or to be acked, as well as for the ack of our FIN. 2296 * If our FIN is now acknowledged, delete the TCB, 2297 * enter the closed state and return. 2298 */ 2299 case TCPS_LAST_ACK: 2300 if (ourfinisacked) { 2301 tp = tcp_close(tp); 2302 goto drop; 2303 } 2304 break; 2305 2306 /* 2307 * In TIME_WAIT state the only thing that should arrive 2308 * is a retransmission of the remote FIN. Acknowledge 2309 * it and restart the finack timer. 2310 */ 2311 case TCPS_TIME_WAIT: 2312 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2313 tcp_timer_2msl, tp); 2314 goto dropafterack; 2315 } 2316 } 2317 2318 step6: 2319 /* 2320 * Update window information. 2321 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2322 */ 2323 if ((thflags & TH_ACK) && 2324 acceptable_window_update(tp, th, tiwin)) { 2325 /* keep track of pure window updates */ 2326 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2327 tiwin > tp->snd_wnd) 2328 tcpstat.tcps_rcvwinupd++; 2329 tp->snd_wnd = tiwin; 2330 tp->snd_wl1 = th->th_seq; 2331 tp->snd_wl2 = th->th_ack; 2332 if (tp->snd_wnd > tp->max_sndwnd) 2333 tp->max_sndwnd = tp->snd_wnd; 2334 needoutput = TRUE; 2335 } 2336 2337 /* 2338 * Process segments with URG. 2339 */ 2340 if ((thflags & TH_URG) && th->th_urp && 2341 !TCPS_HAVERCVDFIN(tp->t_state)) { 2342 /* 2343 * This is a kludge, but if we receive and accept 2344 * random urgent pointers, we'll crash in 2345 * soreceive. It's hard to imagine someone 2346 * actually wanting to send this much urgent data. 2347 */ 2348 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2349 th->th_urp = 0; /* XXX */ 2350 thflags &= ~TH_URG; /* XXX */ 2351 goto dodata; /* XXX */ 2352 } 2353 /* 2354 * If this segment advances the known urgent pointer, 2355 * then mark the data stream. This should not happen 2356 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2357 * a FIN has been received from the remote side. 2358 * In these states we ignore the URG. 2359 * 2360 * According to RFC961 (Assigned Protocols), 2361 * the urgent pointer points to the last octet 2362 * of urgent data. We continue, however, 2363 * to consider it to indicate the first octet 2364 * of data past the urgent section as the original 2365 * spec states (in one of two places). 2366 */ 2367 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 2368 tp->rcv_up = th->th_seq + th->th_urp; 2369 so->so_oobmark = so->so_rcv.sb_cc + 2370 (tp->rcv_up - tp->rcv_nxt) - 1; 2371 if (so->so_oobmark == 0) 2372 so->so_state |= SS_RCVATMARK; 2373 sohasoutofband(so); 2374 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2375 } 2376 /* 2377 * Remove out of band data so doesn't get presented to user. 2378 * This can happen independent of advancing the URG pointer, 2379 * but if two URG's are pending at once, some out-of-band 2380 * data may creep in... ick. 2381 */ 2382 if (th->th_urp <= (u_long)tlen && 2383 !(so->so_options & SO_OOBINLINE)) { 2384 /* hdr drop is delayed */ 2385 tcp_pulloutofband(so, th, m, drop_hdrlen); 2386 } 2387 } else { 2388 /* 2389 * If no out of band data is expected, 2390 * pull receive urgent pointer along 2391 * with the receive window. 2392 */ 2393 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2394 tp->rcv_up = tp->rcv_nxt; 2395 } 2396 2397 dodata: /* XXX */ 2398 /* 2399 * Process the segment text, merging it into the TCP sequencing queue, 2400 * and arranging for acknowledgment of receipt if necessary. 2401 * This process logically involves adjusting tp->rcv_wnd as data 2402 * is presented to the user (this happens in tcp_usrreq.c, 2403 * case PRU_RCVD). If a FIN has already been received on this 2404 * connection then we just ignore the text. 2405 */ 2406 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) { 2407 m_adj(m, drop_hdrlen); /* delayed header drop */ 2408 /* 2409 * Insert segment which includes th into TCP reassembly queue 2410 * with control block tp. Set thflags to whether reassembly now 2411 * includes a segment with FIN. This handles the common case 2412 * inline (segment is the next to be received on an established 2413 * connection, and the queue is empty), avoiding linkage into 2414 * and removal from the queue and repetition of various 2415 * conversions. 2416 * Set DELACK for segments received in order, but ack 2417 * immediately when segments are out of order (so 2418 * fast retransmit can work). 2419 */ 2420 if (th->th_seq == tp->rcv_nxt && 2421 LIST_EMPTY(&tp->t_segq) && 2422 TCPS_HAVEESTABLISHED(tp->t_state)) { 2423 if (DELAY_ACK(tp)) 2424 callout_reset(tp->tt_delack, tcp_delacktime, 2425 tcp_timer_delack, tp); 2426 else 2427 tp->t_flags |= TF_ACKNOW; 2428 tp->rcv_nxt += tlen; 2429 thflags = th->th_flags & TH_FIN; 2430 tcpstat.tcps_rcvpack++; 2431 tcpstat.tcps_rcvbyte += tlen; 2432 ND6_HINT(tp); 2433 if (so->so_state & SS_CANTRCVMORE) 2434 m_freem(m); 2435 else 2436 sbappendstream(&so->so_rcv, m); 2437 sorwakeup(so); 2438 } else { 2439 if (!(tp->t_flags & TF_DUPSEG)) { 2440 /* Initialize SACK report block. */ 2441 tp->reportblk.rblk_start = th->th_seq; 2442 tp->reportblk.rblk_end = th->th_seq + tlen + 2443 ((thflags & TH_FIN) != 0); 2444 } 2445 thflags = tcp_reass(tp, th, &tlen, m); 2446 tp->t_flags |= TF_ACKNOW; 2447 } 2448 2449 /* 2450 * Note the amount of data that peer has sent into 2451 * our window, in order to estimate the sender's 2452 * buffer size. 2453 */ 2454 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2455 } else { 2456 m_freem(m); 2457 thflags &= ~TH_FIN; 2458 } 2459 2460 /* 2461 * If FIN is received ACK the FIN and let the user know 2462 * that the connection is closing. 2463 */ 2464 if (thflags & TH_FIN) { 2465 if (!TCPS_HAVERCVDFIN(tp->t_state)) { 2466 socantrcvmore(so); 2467 /* 2468 * If connection is half-synchronized 2469 * (ie NEEDSYN flag on) then delay ACK, 2470 * so it may be piggybacked when SYN is sent. 2471 * Otherwise, since we received a FIN then no 2472 * more input can be expected, send ACK now. 2473 */ 2474 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) 2475 callout_reset(tp->tt_delack, tcp_delacktime, 2476 tcp_timer_delack, tp); 2477 else 2478 tp->t_flags |= TF_ACKNOW; 2479 tp->rcv_nxt++; 2480 } 2481 2482 switch (tp->t_state) { 2483 /* 2484 * In SYN_RECEIVED and ESTABLISHED STATES 2485 * enter the CLOSE_WAIT state. 2486 */ 2487 case TCPS_SYN_RECEIVED: 2488 tp->t_starttime = ticks; 2489 /*FALLTHROUGH*/ 2490 case TCPS_ESTABLISHED: 2491 tp->t_state = TCPS_CLOSE_WAIT; 2492 break; 2493 2494 /* 2495 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2496 * enter the CLOSING state. 2497 */ 2498 case TCPS_FIN_WAIT_1: 2499 tp->t_state = TCPS_CLOSING; 2500 break; 2501 2502 /* 2503 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2504 * starting the time-wait timer, turning off the other 2505 * standard timers. 2506 */ 2507 case TCPS_FIN_WAIT_2: 2508 tp->t_state = TCPS_TIME_WAIT; 2509 tcp_canceltimers(tp); 2510 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 2511 if (tp->cc_recv != 0 && 2512 (ticks - tp->t_starttime) < tcp_msl) { 2513 callout_reset(tp->tt_2msl, 2514 tp->t_rxtcur * TCPTV_TWTRUNC, 2515 tcp_timer_2msl, tp); 2516 /* For transaction client, force ACK now. */ 2517 tp->t_flags |= TF_ACKNOW; 2518 } 2519 else 2520 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2521 tcp_timer_2msl, tp); 2522 soisdisconnected(so); 2523 break; 2524 2525 /* 2526 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2527 */ 2528 case TCPS_TIME_WAIT: 2529 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2530 tcp_timer_2msl, tp); 2531 break; 2532 } 2533 } 2534 2535 #ifdef TCPDEBUG 2536 if (so->so_options & SO_DEBUG) 2537 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2538 #endif 2539 2540 /* 2541 * Return any desired output. 2542 */ 2543 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2544 tcp_output(tp); 2545 return; 2546 2547 dropafterack: 2548 /* 2549 * Generate an ACK dropping incoming segment if it occupies 2550 * sequence space, where the ACK reflects our state. 2551 * 2552 * We can now skip the test for the RST flag since all 2553 * paths to this code happen after packets containing 2554 * RST have been dropped. 2555 * 2556 * In the SYN-RECEIVED state, don't send an ACK unless the 2557 * segment we received passes the SYN-RECEIVED ACK test. 2558 * If it fails send a RST. This breaks the loop in the 2559 * "LAND" DoS attack, and also prevents an ACK storm 2560 * between two listening ports that have been sent forged 2561 * SYN segments, each with the source address of the other. 2562 */ 2563 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2564 (SEQ_GT(tp->snd_una, th->th_ack) || 2565 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2566 rstreason = BANDLIM_RST_OPENPORT; 2567 goto dropwithreset; 2568 } 2569 #ifdef TCPDEBUG 2570 if (so->so_options & SO_DEBUG) 2571 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2572 #endif 2573 m_freem(m); 2574 tp->t_flags |= TF_ACKNOW; 2575 tcp_output(tp); 2576 return; 2577 2578 dropwithreset: 2579 /* 2580 * Generate a RST, dropping incoming segment. 2581 * Make ACK acceptable to originator of segment. 2582 * Don't bother to respond if destination was broadcast/multicast. 2583 */ 2584 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) 2585 goto drop; 2586 if (isipv6) { 2587 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2588 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2589 goto drop; 2590 } else { 2591 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2592 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2593 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2594 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2595 goto drop; 2596 } 2597 /* IPv6 anycast check is done at tcp6_input() */ 2598 2599 /* 2600 * Perform bandwidth limiting. 2601 */ 2602 #ifdef ICMP_BANDLIM 2603 if (badport_bandlim(rstreason) < 0) 2604 goto drop; 2605 #endif 2606 2607 #ifdef TCPDEBUG 2608 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2609 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2610 #endif 2611 if (thflags & TH_ACK) 2612 /* mtod() below is safe as long as hdr dropping is delayed */ 2613 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack, 2614 TH_RST); 2615 else { 2616 if (thflags & TH_SYN) 2617 tlen++; 2618 /* mtod() below is safe as long as hdr dropping is delayed */ 2619 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen, 2620 (tcp_seq)0, TH_RST | TH_ACK); 2621 } 2622 return; 2623 2624 drop: 2625 /* 2626 * Drop space held by incoming segment and return. 2627 */ 2628 #ifdef TCPDEBUG 2629 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2630 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2631 #endif 2632 m_freem(m); 2633 return; 2634 } 2635 2636 /* 2637 * Parse TCP options and place in tcpopt. 2638 */ 2639 static void 2640 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn) 2641 { 2642 int opt, optlen, i; 2643 2644 to->to_flags = 0; 2645 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2646 opt = cp[0]; 2647 if (opt == TCPOPT_EOL) 2648 break; 2649 if (opt == TCPOPT_NOP) 2650 optlen = 1; 2651 else { 2652 if (cnt < 2) 2653 break; 2654 optlen = cp[1]; 2655 if (optlen < 2 || optlen > cnt) 2656 break; 2657 } 2658 switch (opt) { 2659 case TCPOPT_MAXSEG: 2660 if (optlen != TCPOLEN_MAXSEG) 2661 continue; 2662 if (!is_syn) 2663 continue; 2664 to->to_flags |= TOF_MSS; 2665 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss); 2666 to->to_mss = ntohs(to->to_mss); 2667 break; 2668 case TCPOPT_WINDOW: 2669 if (optlen != TCPOLEN_WINDOW) 2670 continue; 2671 if (!is_syn) 2672 continue; 2673 to->to_flags |= TOF_SCALE; 2674 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); 2675 break; 2676 case TCPOPT_TIMESTAMP: 2677 if (optlen != TCPOLEN_TIMESTAMP) 2678 continue; 2679 to->to_flags |= TOF_TS; 2680 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval); 2681 to->to_tsval = ntohl(to->to_tsval); 2682 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr); 2683 to->to_tsecr = ntohl(to->to_tsecr); 2684 /* 2685 * If echoed timestamp is later than the current time, 2686 * fall back to non RFC1323 RTT calculation. 2687 */ 2688 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks)) 2689 to->to_tsecr = 0; 2690 break; 2691 case TCPOPT_CC: 2692 if (optlen != TCPOLEN_CC) 2693 continue; 2694 to->to_flags |= TOF_CC; 2695 bcopy(cp + 2, &to->to_cc, sizeof to->to_cc); 2696 to->to_cc = ntohl(to->to_cc); 2697 break; 2698 case TCPOPT_CCNEW: 2699 if (optlen != TCPOLEN_CC) 2700 continue; 2701 if (!is_syn) 2702 continue; 2703 to->to_flags |= TOF_CCNEW; 2704 bcopy(cp + 2, &to->to_cc, sizeof to->to_cc); 2705 to->to_cc = ntohl(to->to_cc); 2706 break; 2707 case TCPOPT_CCECHO: 2708 if (optlen != TCPOLEN_CC) 2709 continue; 2710 if (!is_syn) 2711 continue; 2712 to->to_flags |= TOF_CCECHO; 2713 bcopy(cp + 2, &to->to_ccecho, sizeof to->to_ccecho); 2714 to->to_ccecho = ntohl(to->to_ccecho); 2715 break; 2716 case TCPOPT_SACK_PERMITTED: 2717 if (optlen != TCPOLEN_SACK_PERMITTED) 2718 continue; 2719 if (!is_syn) 2720 continue; 2721 to->to_flags |= TOF_SACK_PERMITTED; 2722 break; 2723 case TCPOPT_SACK: 2724 if ((optlen - 2) & 0x07) /* not multiple of 8 */ 2725 continue; 2726 to->to_nsackblocks = (optlen - 2) / 8; 2727 to->to_sackblocks = (struct raw_sackblock *) (cp + 2); 2728 to->to_flags |= TOF_SACK; 2729 for (i = 0; i < to->to_nsackblocks; i++) { 2730 struct raw_sackblock *r = &to->to_sackblocks[i]; 2731 2732 r->rblk_start = ntohl(r->rblk_start); 2733 r->rblk_end = ntohl(r->rblk_end); 2734 } 2735 break; 2736 default: 2737 continue; 2738 } 2739 } 2740 } 2741 2742 /* 2743 * Pull out of band byte out of a segment so 2744 * it doesn't appear in the user's data queue. 2745 * It is still reflected in the segment length for 2746 * sequencing purposes. 2747 * "off" is the delayed to be dropped hdrlen. 2748 */ 2749 static void 2750 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off) 2751 { 2752 int cnt = off + th->th_urp - 1; 2753 2754 while (cnt >= 0) { 2755 if (m->m_len > cnt) { 2756 char *cp = mtod(m, caddr_t) + cnt; 2757 struct tcpcb *tp = sototcpcb(so); 2758 2759 tp->t_iobc = *cp; 2760 tp->t_oobflags |= TCPOOB_HAVEDATA; 2761 bcopy(cp + 1, cp, m->m_len - cnt - 1); 2762 m->m_len--; 2763 if (m->m_flags & M_PKTHDR) 2764 m->m_pkthdr.len--; 2765 return; 2766 } 2767 cnt -= m->m_len; 2768 m = m->m_next; 2769 if (m == 0) 2770 break; 2771 } 2772 panic("tcp_pulloutofband"); 2773 } 2774 2775 /* 2776 * Collect new round-trip time estimate 2777 * and update averages and current timeout. 2778 */ 2779 static void 2780 tcp_xmit_timer(struct tcpcb *tp, int rtt) 2781 { 2782 int delta; 2783 2784 tcpstat.tcps_rttupdated++; 2785 tp->t_rttupdated++; 2786 if (tp->t_srtt != 0) { 2787 /* 2788 * srtt is stored as fixed point with 5 bits after the 2789 * binary point (i.e., scaled by 8). The following magic 2790 * is equivalent to the smoothing algorithm in rfc793 with 2791 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2792 * point). Adjust rtt to origin 0. 2793 */ 2794 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2795 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2796 2797 if ((tp->t_srtt += delta) <= 0) 2798 tp->t_srtt = 1; 2799 2800 /* 2801 * We accumulate a smoothed rtt variance (actually, a 2802 * smoothed mean difference), then set the retransmit 2803 * timer to smoothed rtt + 4 times the smoothed variance. 2804 * rttvar is stored as fixed point with 4 bits after the 2805 * binary point (scaled by 16). The following is 2806 * equivalent to rfc793 smoothing with an alpha of .75 2807 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2808 * rfc793's wired-in beta. 2809 */ 2810 if (delta < 0) 2811 delta = -delta; 2812 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2813 if ((tp->t_rttvar += delta) <= 0) 2814 tp->t_rttvar = 1; 2815 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2816 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2817 } else { 2818 /* 2819 * No rtt measurement yet - use the unsmoothed rtt. 2820 * Set the variance to half the rtt (so our first 2821 * retransmit happens at 3*rtt). 2822 */ 2823 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2824 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2825 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2826 } 2827 tp->t_rtttime = 0; 2828 tp->t_rxtshift = 0; 2829 2830 /* 2831 * the retransmit should happen at rtt + 4 * rttvar. 2832 * Because of the way we do the smoothing, srtt and rttvar 2833 * will each average +1/2 tick of bias. When we compute 2834 * the retransmit timer, we want 1/2 tick of rounding and 2835 * 1 extra tick because of +-1/2 tick uncertainty in the 2836 * firing of the timer. The bias will give us exactly the 2837 * 1.5 tick we need. But, because the bias is 2838 * statistical, we have to test that we don't drop below 2839 * the minimum feasible timer (which is 2 ticks). 2840 */ 2841 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2842 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2843 2844 /* 2845 * We received an ack for a packet that wasn't retransmitted; 2846 * it is probably safe to discard any error indications we've 2847 * received recently. This isn't quite right, but close enough 2848 * for now (a route might have failed after we sent a segment, 2849 * and the return path might not be symmetrical). 2850 */ 2851 tp->t_softerror = 0; 2852 } 2853 2854 /* 2855 * Determine a reasonable value for maxseg size. 2856 * If the route is known, check route for mtu. 2857 * If none, use an mss that can be handled on the outgoing 2858 * interface without forcing IP to fragment; if bigger than 2859 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2860 * to utilize large mbufs. If no route is found, route has no mtu, 2861 * or the destination isn't local, use a default, hopefully conservative 2862 * size (usually 512 or the default IP max size, but no more than the mtu 2863 * of the interface), as we can't discover anything about intervening 2864 * gateways or networks. We also initialize the congestion/slow start 2865 * window to be a single segment if the destination isn't local. 2866 * While looking at the routing entry, we also initialize other path-dependent 2867 * parameters from pre-set or cached values in the routing entry. 2868 * 2869 * Also take into account the space needed for options that we 2870 * send regularly. Make maxseg shorter by that amount to assure 2871 * that we can send maxseg amount of data even when the options 2872 * are present. Store the upper limit of the length of options plus 2873 * data in maxopd. 2874 * 2875 * NOTE that this routine is only called when we process an incoming 2876 * segment, for outgoing segments only tcp_mssopt is called. 2877 * 2878 * In case of T/TCP, we call this routine during implicit connection 2879 * setup as well (offer = -1), to initialize maxseg from the cached 2880 * MSS of our peer. 2881 */ 2882 void 2883 tcp_mss(struct tcpcb *tp, int offer) 2884 { 2885 struct rtentry *rt; 2886 struct ifnet *ifp; 2887 int rtt, mss; 2888 u_long bufsize; 2889 struct inpcb *inp = tp->t_inpcb; 2890 struct socket *so; 2891 struct rmxp_tao *taop; 2892 int origoffer = offer; 2893 #ifdef INET6 2894 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 2895 size_t min_protoh = isipv6 ? 2896 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 2897 sizeof(struct tcpiphdr); 2898 #else 2899 const boolean_t isipv6 = FALSE; 2900 const size_t min_protoh = sizeof(struct tcpiphdr); 2901 #endif 2902 2903 if (isipv6) 2904 rt = tcp_rtlookup6(&inp->inp_inc); 2905 else 2906 rt = tcp_rtlookup(&inp->inp_inc); 2907 if (rt == NULL) { 2908 tp->t_maxopd = tp->t_maxseg = 2909 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 2910 return; 2911 } 2912 ifp = rt->rt_ifp; 2913 so = inp->inp_socket; 2914 2915 taop = rmx_taop(rt->rt_rmx); 2916 /* 2917 * Offer == -1 means that we didn't receive SYN yet, 2918 * use cached value in that case; 2919 */ 2920 if (offer == -1) 2921 offer = taop->tao_mssopt; 2922 /* 2923 * Offer == 0 means that there was no MSS on the SYN segment, 2924 * in this case we use tcp_mssdflt. 2925 */ 2926 if (offer == 0) 2927 offer = (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 2928 else 2929 /* 2930 * Sanity check: make sure that maxopd will be large 2931 * enough to allow some data on segments even is the 2932 * all the option space is used (40bytes). Otherwise 2933 * funny things may happen in tcp_output. 2934 */ 2935 offer = max(offer, 64); 2936 taop->tao_mssopt = offer; 2937 2938 /* 2939 * While we're here, check if there's an initial rtt 2940 * or rttvar. Convert from the route-table units 2941 * to scaled multiples of the slow timeout timer. 2942 */ 2943 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 2944 /* 2945 * XXX the lock bit for RTT indicates that the value 2946 * is also a minimum value; this is subject to time. 2947 */ 2948 if (rt->rt_rmx.rmx_locks & RTV_RTT) 2949 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); 2950 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 2951 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 2952 tcpstat.tcps_usedrtt++; 2953 if (rt->rt_rmx.rmx_rttvar) { 2954 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 2955 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 2956 tcpstat.tcps_usedrttvar++; 2957 } else { 2958 /* default variation is +- 1 rtt */ 2959 tp->t_rttvar = 2960 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 2961 } 2962 TCPT_RANGESET(tp->t_rxtcur, 2963 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 2964 tp->t_rttmin, TCPTV_REXMTMAX); 2965 } 2966 /* 2967 * if there's an mtu associated with the route, use it 2968 * else, use the link mtu. 2969 */ 2970 if (rt->rt_rmx.rmx_mtu) 2971 mss = rt->rt_rmx.rmx_mtu - min_protoh; 2972 else { 2973 if (isipv6) { 2974 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh; 2975 if (!in6_localaddr(&inp->in6p_faddr)) 2976 mss = min(mss, tcp_v6mssdflt); 2977 } else { 2978 mss = ifp->if_mtu - min_protoh; 2979 if (!in_localaddr(inp->inp_faddr)) 2980 mss = min(mss, tcp_mssdflt); 2981 } 2982 } 2983 mss = min(mss, offer); 2984 /* 2985 * maxopd stores the maximum length of data AND options 2986 * in a segment; maxseg is the amount of data in a normal 2987 * segment. We need to store this value (maxopd) apart 2988 * from maxseg, because now every segment carries options 2989 * and thus we normally have somewhat less data in segments. 2990 */ 2991 tp->t_maxopd = mss; 2992 2993 /* 2994 * In case of T/TCP, origoffer==-1 indicates, that no segments 2995 * were received yet. In this case we just guess, otherwise 2996 * we do the same as before T/TCP. 2997 */ 2998 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 2999 (origoffer == -1 || 3000 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3001 mss -= TCPOLEN_TSTAMP_APPA; 3002 if ((tp->t_flags & (TF_REQ_CC | TF_NOOPT)) == TF_REQ_CC && 3003 (origoffer == -1 || 3004 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)) 3005 mss -= TCPOLEN_CC_APPA; 3006 3007 #if (MCLBYTES & (MCLBYTES - 1)) == 0 3008 if (mss > MCLBYTES) 3009 mss &= ~(MCLBYTES-1); 3010 #else 3011 if (mss > MCLBYTES) 3012 mss = mss / MCLBYTES * MCLBYTES; 3013 #endif 3014 /* 3015 * If there's a pipesize, change the socket buffer 3016 * to that size. Make the socket buffers an integral 3017 * number of mss units; if the mss is larger than 3018 * the socket buffer, decrease the mss. 3019 */ 3020 #ifdef RTV_SPIPE 3021 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) 3022 #endif 3023 bufsize = so->so_snd.sb_hiwat; 3024 if (bufsize < mss) 3025 mss = bufsize; 3026 else { 3027 bufsize = roundup(bufsize, mss); 3028 if (bufsize > sb_max) 3029 bufsize = sb_max; 3030 if (bufsize > so->so_snd.sb_hiwat) 3031 sbreserve(&so->so_snd, bufsize, so, NULL); 3032 } 3033 tp->t_maxseg = mss; 3034 3035 #ifdef RTV_RPIPE 3036 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) 3037 #endif 3038 bufsize = so->so_rcv.sb_hiwat; 3039 if (bufsize > mss) { 3040 bufsize = roundup(bufsize, mss); 3041 if (bufsize > sb_max) 3042 bufsize = sb_max; 3043 if (bufsize > so->so_rcv.sb_hiwat) 3044 sbreserve(&so->so_rcv, bufsize, so, NULL); 3045 } 3046 3047 /* 3048 * Set the slow-start flight size depending on whether this 3049 * is a local network or not. 3050 */ 3051 if (tcp_do_rfc3390) 3052 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3053 else 3054 tp->snd_cwnd = mss; 3055 3056 if (rt->rt_rmx.rmx_ssthresh) { 3057 /* 3058 * There's some sort of gateway or interface 3059 * buffer limit on the path. Use this to set 3060 * the slow start threshhold, but set the 3061 * threshold to no less than 2*mss. 3062 */ 3063 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 3064 tcpstat.tcps_usedssthresh++; 3065 } 3066 } 3067 3068 /* 3069 * Determine the MSS option to send on an outgoing SYN. 3070 */ 3071 int 3072 tcp_mssopt(struct tcpcb *tp) 3073 { 3074 struct rtentry *rt; 3075 #ifdef INET6 3076 boolean_t isipv6 = 3077 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3078 int min_protoh = isipv6 ? 3079 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 3080 sizeof(struct tcpiphdr); 3081 #else 3082 const boolean_t isipv6 = FALSE; 3083 const size_t min_protoh = sizeof(struct tcpiphdr); 3084 #endif 3085 3086 if (isipv6) 3087 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc); 3088 else 3089 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc); 3090 if (rt == NULL) 3091 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 3092 3093 return (rt->rt_ifp->if_mtu - min_protoh); 3094 } 3095 3096 /* 3097 * When a partial ack arrives, force the retransmission of the 3098 * next unacknowledged segment. Do not exit Fast Recovery. 3099 * 3100 * Implement the Slow-but-Steady variant of NewReno by restarting the 3101 * the retransmission timer. Turn it off here so it can be restarted 3102 * later in tcp_output(). 3103 */ 3104 static void 3105 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked) 3106 { 3107 tcp_seq old_snd_nxt = tp->snd_nxt; 3108 u_long ocwnd = tp->snd_cwnd; 3109 3110 callout_stop(tp->tt_rexmt); 3111 tp->t_rtttime = 0; 3112 tp->snd_nxt = th->th_ack; 3113 /* Set snd_cwnd to one segment beyond acknowledged offset. */ 3114 tp->snd_cwnd = tp->t_maxseg; 3115 tp->t_flags |= TF_ACKNOW; 3116 tcp_output(tp); 3117 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3118 tp->snd_nxt = old_snd_nxt; 3119 /* partial window deflation */ 3120 if (ocwnd > acked) 3121 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg; 3122 else 3123 tp->snd_cwnd = tp->t_maxseg; 3124 } 3125 3126 /* 3127 * In contrast to the Slow-but-Steady NewReno variant, 3128 * we do not reset the retransmission timer for SACK retransmissions, 3129 * except when retransmitting snd_una. 3130 */ 3131 static void 3132 tcp_sack_rexmt(struct tcpcb *tp, struct tcphdr *th) 3133 { 3134 uint32_t pipe, seglen; 3135 tcp_seq nextrexmt; 3136 boolean_t lostdup; 3137 tcp_seq old_snd_nxt = tp->snd_nxt; 3138 u_long ocwnd = tp->snd_cwnd; 3139 int nseg = 0; /* consecutive new segments */ 3140 #define MAXBURST 4 /* limit burst of new packets on partial ack */ 3141 3142 tp->t_rtttime = 0; 3143 pipe = tcp_sack_compute_pipe(tp); 3144 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg && 3145 (!tcp_do_smartsack || nseg < MAXBURST) && 3146 tcp_sack_nextseg(tp, &nextrexmt, &seglen, &lostdup)) { 3147 uint32_t sent; 3148 tcp_seq old_snd_max; 3149 int error; 3150 3151 if (nextrexmt == tp->snd_max) ++nseg; 3152 tp->snd_nxt = nextrexmt; 3153 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen; 3154 old_snd_max = tp->snd_max; 3155 if (nextrexmt == tp->snd_una) 3156 callout_stop(tp->tt_rexmt); 3157 error = tcp_output(tp); 3158 if (error != 0) 3159 break; 3160 sent = tp->snd_nxt - nextrexmt; 3161 if (sent <= 0) 3162 break; 3163 if (!lostdup) 3164 pipe += sent; 3165 tcpstat.tcps_sndsackpack++; 3166 tcpstat.tcps_sndsackbyte += sent; 3167 if (SEQ_LT(nextrexmt, old_snd_max) && 3168 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) 3169 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max); 3170 } 3171 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3172 tp->snd_nxt = old_snd_nxt; 3173 tp->snd_cwnd = ocwnd; 3174 } 3175