1 /* 2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $ 68 * $DragonFly: src/sys/netinet/tcp_input.c,v 1.68 2008/08/22 09:14:17 sephe Exp $ 69 */ 70 71 #include "opt_ipfw.h" /* for ipfw_fwd */ 72 #include "opt_inet.h" 73 #include "opt_inet6.h" 74 #include "opt_ipsec.h" 75 #include "opt_tcpdebug.h" 76 #include "opt_tcp_input.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/sysctl.h> 82 #include <sys/malloc.h> 83 #include <sys/mbuf.h> 84 #include <sys/proc.h> /* for proc0 declaration */ 85 #include <sys/protosw.h> 86 #include <sys/socket.h> 87 #include <sys/socketvar.h> 88 #include <sys/syslog.h> 89 #include <sys/in_cksum.h> 90 91 #include <sys/socketvar2.h> 92 93 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 94 #include <machine/stdarg.h> 95 96 #include <net/if.h> 97 #include <net/route.h> 98 99 #include <netinet/in.h> 100 #include <netinet/in_systm.h> 101 #include <netinet/ip.h> 102 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */ 103 #include <netinet/in_var.h> 104 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 105 #include <netinet/in_pcb.h> 106 #include <netinet/ip_var.h> 107 #include <netinet/ip6.h> 108 #include <netinet/icmp6.h> 109 #include <netinet6/nd6.h> 110 #include <netinet6/ip6_var.h> 111 #include <netinet6/in6_pcb.h> 112 #include <netinet/tcp.h> 113 #include <netinet/tcp_fsm.h> 114 #include <netinet/tcp_seq.h> 115 #include <netinet/tcp_timer.h> 116 #include <netinet/tcp_timer2.h> 117 #include <netinet/tcp_var.h> 118 #include <netinet6/tcp6_var.h> 119 #include <netinet/tcpip.h> 120 121 #ifdef TCPDEBUG 122 #include <netinet/tcp_debug.h> 123 124 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */ 125 struct tcphdr tcp_savetcp; 126 #endif 127 128 #ifdef FAST_IPSEC 129 #include <netproto/ipsec/ipsec.h> 130 #include <netproto/ipsec/ipsec6.h> 131 #endif 132 133 #ifdef IPSEC 134 #include <netinet6/ipsec.h> 135 #include <netinet6/ipsec6.h> 136 #include <netproto/key/key.h> 137 #endif 138 139 MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry"); 140 141 static int log_in_vain = 0; 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 143 &log_in_vain, 0, "Log all incoming TCP connections"); 144 145 static int blackhole = 0; 146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 147 &blackhole, 0, "Do not send RST when dropping refused connections"); 148 149 int tcp_delack_enabled = 1; 150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 151 &tcp_delack_enabled, 0, 152 "Delay ACK to try and piggyback it onto a data packet"); 153 154 #ifdef TCP_DROP_SYNFIN 155 static int drop_synfin = 0; 156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 157 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 158 #endif 159 160 static int tcp_do_limitedtransmit = 1; 161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW, 162 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)"); 163 164 static int tcp_do_early_retransmit = 1; 165 SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW, 166 &tcp_do_early_retransmit, 0, "Early retransmit"); 167 168 int tcp_aggregate_acks = 1; 169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW, 170 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack"); 171 172 int tcp_do_rfc3390 = 1; 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 174 &tcp_do_rfc3390, 0, 175 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 176 177 static int tcp_do_eifel_detect = 1; 178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW, 179 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)"); 180 181 static int tcp_do_abc = 1; 182 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW, 183 &tcp_do_abc, 0, 184 "TCP Appropriate Byte Counting (RFC 3465)"); 185 186 /* 187 * Define as tunable for easy testing with SACK on and off. 188 * Warning: do not change setting in the middle of an existing active TCP flow, 189 * else strange things might happen to that flow. 190 */ 191 int tcp_do_sack = 1; 192 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 193 &tcp_do_sack, 0, "Enable SACK Algorithms"); 194 195 int tcp_do_smartsack = 1; 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW, 197 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms"); 198 199 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 200 "TCP Segment Reassembly Queue"); 201 202 int tcp_reass_maxseg = 0; 203 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD, 204 &tcp_reass_maxseg, 0, 205 "Global maximum number of TCP Segments in Reassembly Queue"); 206 207 int tcp_reass_qsize = 0; 208 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 209 &tcp_reass_qsize, 0, 210 "Global number of TCP Segments currently in Reassembly Queue"); 211 212 static int tcp_reass_overflows = 0; 213 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 214 &tcp_reass_overflows, 0, 215 "Global number of TCP Segment Reassembly Queue Overflows"); 216 217 int tcp_do_autorcvbuf = 1; 218 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 219 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 220 221 int tcp_autorcvbuf_inc = 16*1024; 222 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 223 &tcp_autorcvbuf_inc, 0, 224 "Incrementor step size of automatic receive buffer"); 225 226 int tcp_autorcvbuf_max = 2*1024*1024; 227 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 228 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 229 230 int tcp_sosnd_agglim = 2; 231 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosnd_agglim, CTLFLAG_RW, 232 &tcp_sosnd_agglim, 0, "TCP sosend mbuf aggregation limit"); 233 234 int tcp_sosnd_async = 1; 235 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosnd_async, CTLFLAG_RW, 236 &tcp_sosnd_async, 0, "TCP asynchronized pru_send"); 237 238 static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t); 239 static void tcp_pulloutofband(struct socket *, 240 struct tcphdr *, struct mbuf *, int); 241 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 242 struct mbuf *); 243 static void tcp_xmit_timer(struct tcpcb *, int); 244 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int); 245 static void tcp_sack_rexmt(struct tcpcb *, struct tcphdr *); 246 static int tcp_rmx_msl(const struct tcpcb *); 247 248 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 249 #ifdef INET6 250 #define ND6_HINT(tp) \ 251 do { \ 252 if ((tp) && (tp)->t_inpcb && \ 253 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \ 254 (tp)->t_inpcb->in6p_route.ro_rt) \ 255 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \ 256 } while (0) 257 #else 258 #define ND6_HINT(tp) 259 #endif 260 261 /* 262 * Indicate whether this ack should be delayed. We can delay the ack if 263 * - delayed acks are enabled and 264 * - there is no delayed ack timer in progress and 265 * - our last ack wasn't a 0-sized window. We never want to delay 266 * the ack that opens up a 0-sized window. 267 */ 268 #define DELAY_ACK(tp) \ 269 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \ 270 !(tp->t_flags & TF_RXWIN0SENT)) 271 272 #define acceptable_window_update(tp, th, tiwin) \ 273 (SEQ_LT(tp->snd_wl1, th->th_seq) || \ 274 (tp->snd_wl1 == th->th_seq && \ 275 (SEQ_LT(tp->snd_wl2, th->th_ack) || \ 276 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) 277 278 static int 279 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 280 { 281 struct tseg_qent *q; 282 struct tseg_qent *p = NULL; 283 struct tseg_qent *te; 284 struct socket *so = tp->t_inpcb->inp_socket; 285 int flags; 286 287 /* 288 * Call with th == NULL after become established to 289 * force pre-ESTABLISHED data up to user socket. 290 */ 291 if (th == NULL) 292 goto present; 293 294 /* 295 * Limit the number of segments in the reassembly queue to prevent 296 * holding on to too many segments (and thus running out of mbufs). 297 * Make sure to let the missing segment through which caused this 298 * queue. Always keep one global queue entry spare to be able to 299 * process the missing segment. 300 */ 301 if (th->th_seq != tp->rcv_nxt && 302 tcp_reass_qsize + 1 >= tcp_reass_maxseg) { 303 tcp_reass_overflows++; 304 tcpstat.tcps_rcvmemdrop++; 305 m_freem(m); 306 /* no SACK block to report */ 307 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 308 return (0); 309 } 310 311 /* Allocate a new queue entry. */ 312 MALLOC(te, struct tseg_qent *, sizeof(struct tseg_qent), M_TSEGQ, 313 M_INTWAIT | M_NULLOK); 314 if (te == NULL) { 315 tcpstat.tcps_rcvmemdrop++; 316 m_freem(m); 317 /* no SACK block to report */ 318 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 319 return (0); 320 } 321 atomic_add_int(&tcp_reass_qsize, 1); 322 323 /* 324 * Find a segment which begins after this one does. 325 */ 326 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 327 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 328 break; 329 p = q; 330 } 331 332 /* 333 * If there is a preceding segment, it may provide some of 334 * our data already. If so, drop the data from the incoming 335 * segment. If it provides all of our data, drop us. 336 */ 337 if (p != NULL) { 338 tcp_seq_diff_t i; 339 340 /* conversion to int (in i) handles seq wraparound */ 341 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 342 if (i > 0) { /* overlaps preceding segment */ 343 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG); 344 /* enclosing block starts w/ preceding segment */ 345 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 346 if (i >= *tlenp) { 347 /* preceding encloses incoming segment */ 348 tp->encloseblk.rblk_end = p->tqe_th->th_seq + 349 p->tqe_len; 350 tcpstat.tcps_rcvduppack++; 351 tcpstat.tcps_rcvdupbyte += *tlenp; 352 m_freem(m); 353 kfree(te, M_TSEGQ); 354 atomic_add_int(&tcp_reass_qsize, -1); 355 /* 356 * Try to present any queued data 357 * at the left window edge to the user. 358 * This is needed after the 3-WHS 359 * completes. 360 */ 361 goto present; /* ??? */ 362 } 363 m_adj(m, i); 364 *tlenp -= i; 365 th->th_seq += i; 366 /* incoming segment end is enclosing block end */ 367 tp->encloseblk.rblk_end = th->th_seq + *tlenp + 368 ((th->th_flags & TH_FIN) != 0); 369 /* trim end of reported D-SACK block */ 370 tp->reportblk.rblk_end = th->th_seq; 371 } 372 } 373 tcpstat.tcps_rcvoopack++; 374 tcpstat.tcps_rcvoobyte += *tlenp; 375 376 /* 377 * While we overlap succeeding segments trim them or, 378 * if they are completely covered, dequeue them. 379 */ 380 while (q) { 381 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 382 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len; 383 struct tseg_qent *nq; 384 385 if (i <= 0) 386 break; 387 if (!(tp->t_flags & TF_DUPSEG)) { /* first time through */ 388 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG); 389 tp->encloseblk = tp->reportblk; 390 /* report trailing duplicate D-SACK segment */ 391 tp->reportblk.rblk_start = q->tqe_th->th_seq; 392 } 393 if ((tp->t_flags & TF_ENCLOSESEG) && 394 SEQ_GT(qend, tp->encloseblk.rblk_end)) { 395 /* extend enclosing block if one exists */ 396 tp->encloseblk.rblk_end = qend; 397 } 398 if (i < q->tqe_len) { 399 q->tqe_th->th_seq += i; 400 q->tqe_len -= i; 401 m_adj(q->tqe_m, i); 402 break; 403 } 404 405 nq = LIST_NEXT(q, tqe_q); 406 LIST_REMOVE(q, tqe_q); 407 m_freem(q->tqe_m); 408 kfree(q, M_TSEGQ); 409 atomic_add_int(&tcp_reass_qsize, -1); 410 q = nq; 411 } 412 413 /* Insert the new segment queue entry into place. */ 414 te->tqe_m = m; 415 te->tqe_th = th; 416 te->tqe_len = *tlenp; 417 418 /* check if can coalesce with following segment */ 419 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) { 420 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len; 421 422 te->tqe_len += q->tqe_len; 423 if (q->tqe_th->th_flags & TH_FIN) 424 te->tqe_th->th_flags |= TH_FIN; 425 m_cat(te->tqe_m, q->tqe_m); 426 tp->encloseblk.rblk_end = tend; 427 /* 428 * When not reporting a duplicate segment, use 429 * the larger enclosing block as the SACK block. 430 */ 431 if (!(tp->t_flags & TF_DUPSEG)) 432 tp->reportblk.rblk_end = tend; 433 LIST_REMOVE(q, tqe_q); 434 kfree(q, M_TSEGQ); 435 atomic_add_int(&tcp_reass_qsize, -1); 436 } 437 438 if (p == NULL) { 439 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 440 } else { 441 /* check if can coalesce with preceding segment */ 442 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) { 443 p->tqe_len += te->tqe_len; 444 m_cat(p->tqe_m, te->tqe_m); 445 tp->encloseblk.rblk_start = p->tqe_th->th_seq; 446 /* 447 * When not reporting a duplicate segment, use 448 * the larger enclosing block as the SACK block. 449 */ 450 if (!(tp->t_flags & TF_DUPSEG)) 451 tp->reportblk.rblk_start = p->tqe_th->th_seq; 452 kfree(te, M_TSEGQ); 453 atomic_add_int(&tcp_reass_qsize, -1); 454 } else { 455 LIST_INSERT_AFTER(p, te, tqe_q); 456 } 457 } 458 459 present: 460 /* 461 * Present data to user, advancing rcv_nxt through 462 * completed sequence space. 463 */ 464 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 465 return (0); 466 q = LIST_FIRST(&tp->t_segq); 467 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt) 468 return (0); 469 tp->rcv_nxt += q->tqe_len; 470 if (!(tp->t_flags & TF_DUPSEG)) { 471 /* no SACK block to report since ACK advanced */ 472 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 473 } 474 /* no enclosing block to report since ACK advanced */ 475 tp->t_flags &= ~TF_ENCLOSESEG; 476 flags = q->tqe_th->th_flags & TH_FIN; 477 LIST_REMOVE(q, tqe_q); 478 KASSERT(LIST_EMPTY(&tp->t_segq) || 479 LIST_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt, 480 ("segment not coalesced")); 481 if (so->so_state & SS_CANTRCVMORE) { 482 m_freem(q->tqe_m); 483 } else { 484 lwkt_gettoken(&so->so_rcv.ssb_token); 485 ssb_appendstream(&so->so_rcv, q->tqe_m); 486 lwkt_reltoken(&so->so_rcv.ssb_token); 487 } 488 kfree(q, M_TSEGQ); 489 atomic_add_int(&tcp_reass_qsize, -1); 490 ND6_HINT(tp); 491 sorwakeup(so); 492 return (flags); 493 } 494 495 /* 496 * TCP input routine, follows pages 65-76 of the 497 * protocol specification dated September, 1981 very closely. 498 */ 499 #ifdef INET6 500 int 501 tcp6_input(struct mbuf **mp, int *offp, int proto) 502 { 503 struct mbuf *m = *mp; 504 struct in6_ifaddr *ia6; 505 506 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 507 508 /* 509 * draft-itojun-ipv6-tcp-to-anycast 510 * better place to put this in? 511 */ 512 ia6 = ip6_getdstifaddr(m); 513 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 514 struct ip6_hdr *ip6; 515 516 ip6 = mtod(m, struct ip6_hdr *); 517 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 518 offsetof(struct ip6_hdr, ip6_dst)); 519 return (IPPROTO_DONE); 520 } 521 522 tcp_input(mp, offp, proto); 523 return (IPPROTO_DONE); 524 } 525 #endif 526 527 int 528 tcp_input(struct mbuf **mp, int *offp, int proto) 529 { 530 int off0; 531 struct tcphdr *th; 532 struct ip *ip = NULL; 533 struct ipovly *ipov; 534 struct inpcb *inp = NULL; 535 u_char *optp = NULL; 536 int optlen = 0; 537 int tlen, off; 538 int len = 0; 539 int drop_hdrlen; 540 struct tcpcb *tp = NULL; 541 int thflags; 542 struct socket *so = 0; 543 int todrop, acked; 544 boolean_t ourfinisacked, needoutput = FALSE; 545 u_long tiwin; 546 int recvwin; 547 struct tcpopt to; /* options in this segment */ 548 struct sockaddr_in *next_hop = NULL; 549 int rstreason; /* For badport_bandlim accounting purposes */ 550 int cpu; 551 struct ip6_hdr *ip6 = NULL; 552 struct mbuf *m; 553 #ifdef INET6 554 boolean_t isipv6; 555 #else 556 const boolean_t isipv6 = FALSE; 557 #endif 558 #ifdef TCPDEBUG 559 short ostate = 0; 560 #endif 561 562 off0 = *offp; 563 m = *mp; 564 *mp = NULL; 565 566 tcpstat.tcps_rcvtotal++; 567 568 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 569 struct m_tag *mtag; 570 571 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 572 KKASSERT(mtag != NULL); 573 next_hop = m_tag_data(mtag); 574 } 575 576 #ifdef INET6 577 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE; 578 #endif 579 580 if (isipv6) { 581 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 582 ip6 = mtod(m, struct ip6_hdr *); 583 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0; 584 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 585 tcpstat.tcps_rcvbadsum++; 586 goto drop; 587 } 588 th = (struct tcphdr *)((caddr_t)ip6 + off0); 589 590 /* 591 * Be proactive about unspecified IPv6 address in source. 592 * As we use all-zero to indicate unbounded/unconnected pcb, 593 * unspecified IPv6 address can be used to confuse us. 594 * 595 * Note that packets with unspecified IPv6 destination is 596 * already dropped in ip6_input. 597 */ 598 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 599 /* XXX stat */ 600 goto drop; 601 } 602 } else { 603 /* 604 * Get IP and TCP header together in first mbuf. 605 * Note: IP leaves IP header in first mbuf. 606 */ 607 if (off0 > sizeof(struct ip)) { 608 ip_stripoptions(m); 609 off0 = sizeof(struct ip); 610 } 611 /* already checked and pulled up in ip_demux() */ 612 KASSERT(m->m_len >= sizeof(struct tcpiphdr), 613 ("TCP header not in one mbuf: m->m_len %d", m->m_len)); 614 ip = mtod(m, struct ip *); 615 ipov = (struct ipovly *)ip; 616 th = (struct tcphdr *)((caddr_t)ip + off0); 617 tlen = ip->ip_len; 618 619 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 620 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 621 th->th_sum = m->m_pkthdr.csum_data; 622 else 623 th->th_sum = in_pseudo(ip->ip_src.s_addr, 624 ip->ip_dst.s_addr, 625 htonl(m->m_pkthdr.csum_data + 626 ip->ip_len + 627 IPPROTO_TCP)); 628 th->th_sum ^= 0xffff; 629 } else { 630 /* 631 * Checksum extended TCP header and data. 632 */ 633 len = sizeof(struct ip) + tlen; 634 bzero(ipov->ih_x1, sizeof ipov->ih_x1); 635 ipov->ih_len = (u_short)tlen; 636 ipov->ih_len = htons(ipov->ih_len); 637 th->th_sum = in_cksum(m, len); 638 } 639 if (th->th_sum) { 640 tcpstat.tcps_rcvbadsum++; 641 goto drop; 642 } 643 #ifdef INET6 644 /* Re-initialization for later version check */ 645 ip->ip_v = IPVERSION; 646 #endif 647 } 648 649 /* 650 * Check that TCP offset makes sense, 651 * pull out TCP options and adjust length. XXX 652 */ 653 off = th->th_off << 2; 654 /* already checked and pulled up in ip_demux() */ 655 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen, 656 ("bad TCP data offset %d (tlen %d)", off, tlen)); 657 tlen -= off; /* tlen is used instead of ti->ti_len */ 658 if (off > sizeof(struct tcphdr)) { 659 if (isipv6) { 660 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); 661 ip6 = mtod(m, struct ip6_hdr *); 662 th = (struct tcphdr *)((caddr_t)ip6 + off0); 663 } else { 664 /* already pulled up in ip_demux() */ 665 KASSERT(m->m_len >= sizeof(struct ip) + off, 666 ("TCP header and options not in one mbuf: " 667 "m_len %d, off %d", m->m_len, off)); 668 } 669 optlen = off - sizeof(struct tcphdr); 670 optp = (u_char *)(th + 1); 671 } 672 thflags = th->th_flags; 673 674 #ifdef TCP_DROP_SYNFIN 675 /* 676 * If the drop_synfin option is enabled, drop all packets with 677 * both the SYN and FIN bits set. This prevents e.g. nmap from 678 * identifying the TCP/IP stack. 679 * 680 * This is a violation of the TCP specification. 681 */ 682 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) 683 goto drop; 684 #endif 685 686 /* 687 * Convert TCP protocol specific fields to host format. 688 */ 689 th->th_seq = ntohl(th->th_seq); 690 th->th_ack = ntohl(th->th_ack); 691 th->th_win = ntohs(th->th_win); 692 th->th_urp = ntohs(th->th_urp); 693 694 /* 695 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options, 696 * until after ip6_savecontrol() is called and before other functions 697 * which don't want those proto headers. 698 * Because ip6_savecontrol() is going to parse the mbuf to 699 * search for data to be passed up to user-land, it wants mbuf 700 * parameters to be unchanged. 701 * XXX: the call of ip6_savecontrol() has been obsoleted based on 702 * latest version of the advanced API (20020110). 703 */ 704 drop_hdrlen = off0 + off; 705 706 /* 707 * Locate pcb for segment. 708 */ 709 findpcb: 710 /* IPFIREWALL_FORWARD section */ 711 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */ 712 /* 713 * Transparently forwarded. Pretend to be the destination. 714 * already got one like this? 715 */ 716 cpu = mycpu->gd_cpuid; 717 inp = in_pcblookup_hash(&tcbinfo[cpu], 718 ip->ip_src, th->th_sport, 719 ip->ip_dst, th->th_dport, 720 0, m->m_pkthdr.rcvif); 721 if (!inp) { 722 /* 723 * It's new. Try to find the ambushing socket. 724 */ 725 726 /* 727 * The rest of the ipfw code stores the port in 728 * host order. XXX 729 * (The IP address is still in network order.) 730 */ 731 in_port_t dport = next_hop->sin_port ? 732 htons(next_hop->sin_port) : 733 th->th_dport; 734 735 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport, 736 next_hop->sin_addr.s_addr, dport); 737 inp = in_pcblookup_hash(&tcbinfo[cpu], 738 ip->ip_src, th->th_sport, 739 next_hop->sin_addr, dport, 740 1, m->m_pkthdr.rcvif); 741 } 742 } else { 743 if (isipv6) { 744 inp = in6_pcblookup_hash(&tcbinfo[0], 745 &ip6->ip6_src, th->th_sport, 746 &ip6->ip6_dst, th->th_dport, 747 1, m->m_pkthdr.rcvif); 748 } else { 749 cpu = mycpu->gd_cpuid; 750 inp = in_pcblookup_hash(&tcbinfo[cpu], 751 ip->ip_src, th->th_sport, 752 ip->ip_dst, th->th_dport, 753 1, m->m_pkthdr.rcvif); 754 } 755 } 756 757 /* 758 * If the state is CLOSED (i.e., TCB does not exist) then 759 * all data in the incoming segment is discarded. 760 * If the TCB exists but is in CLOSED state, it is embryonic, 761 * but should either do a listen or a connect soon. 762 */ 763 if (inp == NULL) { 764 if (log_in_vain) { 765 #ifdef INET6 766 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 767 #else 768 char dbuf[sizeof "aaa.bbb.ccc.ddd"]; 769 char sbuf[sizeof "aaa.bbb.ccc.ddd"]; 770 #endif 771 if (isipv6) { 772 strcpy(dbuf, "["); 773 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst)); 774 strcat(dbuf, "]"); 775 strcpy(sbuf, "["); 776 strcat(sbuf, ip6_sprintf(&ip6->ip6_src)); 777 strcat(sbuf, "]"); 778 } else { 779 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 780 strcpy(sbuf, inet_ntoa(ip->ip_src)); 781 } 782 switch (log_in_vain) { 783 case 1: 784 if (!(thflags & TH_SYN)) 785 break; 786 case 2: 787 log(LOG_INFO, 788 "Connection attempt to TCP %s:%d " 789 "from %s:%d flags:0x%02x\n", 790 dbuf, ntohs(th->th_dport), sbuf, 791 ntohs(th->th_sport), thflags); 792 break; 793 default: 794 break; 795 } 796 } 797 if (blackhole) { 798 switch (blackhole) { 799 case 1: 800 if (thflags & TH_SYN) 801 goto drop; 802 break; 803 case 2: 804 goto drop; 805 default: 806 goto drop; 807 } 808 } 809 rstreason = BANDLIM_RST_CLOSEDPORT; 810 goto dropwithreset; 811 } 812 813 #ifdef IPSEC 814 if (isipv6) { 815 if (ipsec6_in_reject_so(m, inp->inp_socket)) { 816 ipsec6stat.in_polvio++; 817 goto drop; 818 } 819 } else { 820 if (ipsec4_in_reject_so(m, inp->inp_socket)) { 821 ipsecstat.in_polvio++; 822 goto drop; 823 } 824 } 825 #endif 826 #ifdef FAST_IPSEC 827 if (isipv6) { 828 if (ipsec6_in_reject(m, inp)) 829 goto drop; 830 } else { 831 if (ipsec4_in_reject(m, inp)) 832 goto drop; 833 } 834 #endif 835 /* Check the minimum TTL for socket. */ 836 #ifdef INET6 837 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl) 838 goto drop; 839 #endif 840 841 tp = intotcpcb(inp); 842 if (tp == NULL) { 843 rstreason = BANDLIM_RST_CLOSEDPORT; 844 goto dropwithreset; 845 } 846 if (tp->t_state <= TCPS_CLOSED) 847 goto drop; 848 849 /* Unscale the window into a 32-bit value. */ 850 if (!(thflags & TH_SYN)) 851 tiwin = th->th_win << tp->snd_scale; 852 else 853 tiwin = th->th_win; 854 855 so = inp->inp_socket; 856 857 #ifdef TCPDEBUG 858 if (so->so_options & SO_DEBUG) { 859 ostate = tp->t_state; 860 if (isipv6) 861 bcopy(ip6, tcp_saveipgen, sizeof(*ip6)); 862 else 863 bcopy(ip, tcp_saveipgen, sizeof(*ip)); 864 tcp_savetcp = *th; 865 } 866 #endif 867 868 bzero(&to, sizeof to); 869 870 if (so->so_options & SO_ACCEPTCONN) { 871 struct in_conninfo inc; 872 873 #ifdef INET6 874 inc.inc_isipv6 = (isipv6 == TRUE); 875 #endif 876 if (isipv6) { 877 inc.inc6_faddr = ip6->ip6_src; 878 inc.inc6_laddr = ip6->ip6_dst; 879 inc.inc6_route.ro_rt = NULL; /* XXX */ 880 } else { 881 inc.inc_faddr = ip->ip_src; 882 inc.inc_laddr = ip->ip_dst; 883 inc.inc_route.ro_rt = NULL; /* XXX */ 884 } 885 inc.inc_fport = th->th_sport; 886 inc.inc_lport = th->th_dport; 887 888 /* 889 * If the state is LISTEN then ignore segment if it contains 890 * a RST. If the segment contains an ACK then it is bad and 891 * send a RST. If it does not contain a SYN then it is not 892 * interesting; drop it. 893 * 894 * If the state is SYN_RECEIVED (syncache) and seg contains 895 * an ACK, but not for our SYN/ACK, send a RST. If the seg 896 * contains a RST, check the sequence number to see if it 897 * is a valid reset segment. 898 */ 899 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { 900 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) { 901 if (!syncache_expand(&inc, th, &so, m)) { 902 /* 903 * No syncache entry, or ACK was not 904 * for our SYN/ACK. Send a RST. 905 */ 906 tcpstat.tcps_badsyn++; 907 rstreason = BANDLIM_RST_OPENPORT; 908 goto dropwithreset; 909 } 910 911 /* 912 * Could not complete 3-way handshake, 913 * connection is being closed down, and 914 * syncache will free mbuf. 915 */ 916 if (so == NULL) 917 return(IPPROTO_DONE); 918 919 /* 920 * We must be in the correct protocol thread 921 * for this connection. 922 */ 923 KKASSERT(so->so_port == &curthread->td_msgport); 924 925 /* 926 * Socket is created in state SYN_RECEIVED. 927 * Continue processing segment. 928 */ 929 inp = so->so_pcb; 930 tp = intotcpcb(inp); 931 /* 932 * This is what would have happened in 933 * tcp_output() when the SYN,ACK was sent. 934 */ 935 tp->snd_up = tp->snd_una; 936 tp->snd_max = tp->snd_nxt = tp->iss + 1; 937 tp->last_ack_sent = tp->rcv_nxt; 938 /* 939 * XXX possible bug - it doesn't appear that tp->snd_wnd is unscaled 940 * until the _second_ ACK is received: 941 * rcv SYN (set wscale opts) --> send SYN/ACK, set snd_wnd = window. 942 * rcv ACK, calculate tiwin --> process SYN_RECEIVED, determine wscale, 943 * move to ESTAB, set snd_wnd to tiwin. 944 */ 945 tp->snd_wnd = tiwin; /* unscaled */ 946 goto after_listen; 947 } 948 if (thflags & TH_RST) { 949 syncache_chkrst(&inc, th); 950 goto drop; 951 } 952 if (thflags & TH_ACK) { 953 syncache_badack(&inc); 954 tcpstat.tcps_badsyn++; 955 rstreason = BANDLIM_RST_OPENPORT; 956 goto dropwithreset; 957 } 958 goto drop; 959 } 960 961 /* 962 * Segment's flags are (SYN) or (SYN | FIN). 963 */ 964 #ifdef INET6 965 /* 966 * If deprecated address is forbidden, 967 * we do not accept SYN to deprecated interface 968 * address to prevent any new inbound connection from 969 * getting established. 970 * When we do not accept SYN, we send a TCP RST, 971 * with deprecated source address (instead of dropping 972 * it). We compromise it as it is much better for peer 973 * to send a RST, and RST will be the final packet 974 * for the exchange. 975 * 976 * If we do not forbid deprecated addresses, we accept 977 * the SYN packet. RFC2462 does not suggest dropping 978 * SYN in this case. 979 * If we decipher RFC2462 5.5.4, it says like this: 980 * 1. use of deprecated addr with existing 981 * communication is okay - "SHOULD continue to be 982 * used" 983 * 2. use of it with new communication: 984 * (2a) "SHOULD NOT be used if alternate address 985 * with sufficient scope is available" 986 * (2b) nothing mentioned otherwise. 987 * Here we fall into (2b) case as we have no choice in 988 * our source address selection - we must obey the peer. 989 * 990 * The wording in RFC2462 is confusing, and there are 991 * multiple description text for deprecated address 992 * handling - worse, they are not exactly the same. 993 * I believe 5.5.4 is the best one, so we follow 5.5.4. 994 */ 995 if (isipv6 && !ip6_use_deprecated) { 996 struct in6_ifaddr *ia6; 997 998 if ((ia6 = ip6_getdstifaddr(m)) && 999 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1000 tp = NULL; 1001 rstreason = BANDLIM_RST_OPENPORT; 1002 goto dropwithreset; 1003 } 1004 } 1005 #endif 1006 /* 1007 * If it is from this socket, drop it, it must be forged. 1008 * Don't bother responding if the destination was a broadcast. 1009 */ 1010 if (th->th_dport == th->th_sport) { 1011 if (isipv6) { 1012 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, 1013 &ip6->ip6_src)) 1014 goto drop; 1015 } else { 1016 if (ip->ip_dst.s_addr == ip->ip_src.s_addr) 1017 goto drop; 1018 } 1019 } 1020 /* 1021 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN 1022 * 1023 * Note that it is quite possible to receive unicast 1024 * link-layer packets with a broadcast IP address. Use 1025 * in_broadcast() to find them. 1026 */ 1027 if (m->m_flags & (M_BCAST | M_MCAST)) 1028 goto drop; 1029 if (isipv6) { 1030 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1031 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 1032 goto drop; 1033 } else { 1034 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1035 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1036 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1037 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 1038 goto drop; 1039 } 1040 /* 1041 * SYN appears to be valid; create compressed TCP state 1042 * for syncache, or perform t/tcp connection. 1043 */ 1044 if (so->so_qlen <= so->so_qlimit) { 1045 tcp_dooptions(&to, optp, optlen, TRUE); 1046 if (!syncache_add(&inc, &to, th, &so, m)) 1047 goto drop; 1048 1049 /* 1050 * Entry added to syncache, mbuf used to 1051 * send SYN,ACK packet. 1052 */ 1053 if (so == NULL) 1054 return(IPPROTO_DONE); 1055 1056 /* 1057 * We must be in the correct protocol thread for 1058 * this connection. 1059 */ 1060 KKASSERT(so->so_port == &curthread->td_msgport); 1061 1062 inp = so->so_pcb; 1063 tp = intotcpcb(inp); 1064 tp->snd_wnd = tiwin; 1065 tp->t_starttime = ticks; 1066 tp->t_state = TCPS_ESTABLISHED; 1067 1068 /* 1069 * If there is a FIN, or if there is data and the 1070 * connection is local, then delay SYN,ACK(SYN) in 1071 * the hope of piggy-backing it on a response 1072 * segment. Otherwise must send ACK now in case 1073 * the other side is slow starting. 1074 */ 1075 if (DELAY_ACK(tp) && 1076 ((thflags & TH_FIN) || 1077 (tlen != 0 && 1078 ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 1079 (!isipv6 && in_localaddr(inp->inp_faddr)))))) { 1080 tcp_callout_reset(tp, tp->tt_delack, 1081 tcp_delacktime, tcp_timer_delack); 1082 tp->t_flags |= TF_NEEDSYN; 1083 } else { 1084 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1085 } 1086 1087 tcpstat.tcps_connects++; 1088 soisconnected(so); 1089 goto trimthenstep6; 1090 } 1091 goto drop; 1092 } 1093 1094 after_listen: 1095 /* 1096 * Should not happen - syncache should pick up these connections. 1097 * 1098 * Once we are past handling listen sockets we must be in the 1099 * correct protocol processing thread. 1100 */ 1101 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state")); 1102 KKASSERT(so->so_port == &curthread->td_msgport); 1103 1104 /* 1105 * This is the second part of the MSS DoS prevention code (after 1106 * minmss on the sending side) and it deals with too many too small 1107 * tcp packets in a too short timeframe (1 second). 1108 * 1109 * XXX Removed. This code was crap. It does not scale to network 1110 * speed, and default values break NFS. Gone. 1111 */ 1112 /* REMOVED */ 1113 1114 /* 1115 * Segment received on connection. 1116 * 1117 * Reset idle time and keep-alive timer. Don't waste time if less 1118 * then a second has elapsed. 1119 */ 1120 if ((int)(ticks - tp->t_rcvtime) > hz) 1121 tcp_timer_keep_activity(tp, thflags); 1122 1123 /* 1124 * Process options. 1125 * XXX this is tradtitional behavior, may need to be cleaned up. 1126 */ 1127 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0); 1128 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1129 if (to.to_flags & TOF_SCALE) { 1130 tp->t_flags |= TF_RCVD_SCALE; 1131 tp->requested_s_scale = to.to_requested_s_scale; 1132 } 1133 if (to.to_flags & TOF_TS) { 1134 tp->t_flags |= TF_RCVD_TSTMP; 1135 tp->ts_recent = to.to_tsval; 1136 tp->ts_recent_age = ticks; 1137 } 1138 if (to.to_flags & TOF_MSS) 1139 tcp_mss(tp, to.to_mss); 1140 /* 1141 * Only set the TF_SACK_PERMITTED per-connection flag 1142 * if we got a SACK_PERMITTED option from the other side 1143 * and the global tcp_do_sack variable is true. 1144 */ 1145 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED)) 1146 tp->t_flags |= TF_SACK_PERMITTED; 1147 } 1148 1149 /* 1150 * Header prediction: check for the two common cases 1151 * of a uni-directional data xfer. If the packet has 1152 * no control flags, is in-sequence, the window didn't 1153 * change and we're not retransmitting, it's a 1154 * candidate. If the length is zero and the ack moved 1155 * forward, we're the sender side of the xfer. Just 1156 * free the data acked & wake any higher level process 1157 * that was blocked waiting for space. If the length 1158 * is non-zero and the ack didn't move, we're the 1159 * receiver side. If we're getting packets in-order 1160 * (the reassembly queue is empty), add the data to 1161 * the socket buffer and note that we need a delayed ack. 1162 * Make sure that the hidden state-flags are also off. 1163 * Since we check for TCPS_ESTABLISHED above, it can only 1164 * be TH_NEEDSYN. 1165 */ 1166 if (tp->t_state == TCPS_ESTABLISHED && 1167 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1168 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && 1169 (!(to.to_flags & TOF_TS) || 1170 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1171 th->th_seq == tp->rcv_nxt && 1172 tp->snd_nxt == tp->snd_max) { 1173 1174 /* 1175 * If last ACK falls within this segment's sequence numbers, 1176 * record the timestamp. 1177 * NOTE that the test is modified according to the latest 1178 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1179 */ 1180 if ((to.to_flags & TOF_TS) && 1181 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1182 tp->ts_recent_age = ticks; 1183 tp->ts_recent = to.to_tsval; 1184 } 1185 1186 if (tlen == 0) { 1187 if (SEQ_GT(th->th_ack, tp->snd_una) && 1188 SEQ_LEQ(th->th_ack, tp->snd_max) && 1189 tp->snd_cwnd >= tp->snd_wnd && 1190 !IN_FASTRECOVERY(tp)) { 1191 /* 1192 * This is a pure ack for outstanding data. 1193 */ 1194 ++tcpstat.tcps_predack; 1195 /* 1196 * "bad retransmit" recovery 1197 * 1198 * If Eifel detection applies, then 1199 * it is deterministic, so use it 1200 * unconditionally over the old heuristic. 1201 * Otherwise, fall back to the old heuristic. 1202 */ 1203 if (tcp_do_eifel_detect && 1204 (to.to_flags & TOF_TS) && to.to_tsecr && 1205 (tp->t_flags & TF_FIRSTACCACK)) { 1206 /* Eifel detection applicable. */ 1207 if (to.to_tsecr < tp->t_rexmtTS) { 1208 tcp_revert_congestion_state(tp); 1209 ++tcpstat.tcps_eifeldetected; 1210 } 1211 } else if (tp->t_rxtshift == 1 && 1212 ticks < tp->t_badrxtwin) { 1213 tcp_revert_congestion_state(tp); 1214 ++tcpstat.tcps_rttdetected; 1215 } 1216 tp->t_flags &= ~(TF_FIRSTACCACK | 1217 TF_FASTREXMT | TF_EARLYREXMT); 1218 /* 1219 * Recalculate the retransmit timer / rtt. 1220 * 1221 * Some machines (certain windows boxes) 1222 * send broken timestamp replies during the 1223 * SYN+ACK phase, ignore timestamps of 0. 1224 */ 1225 if ((to.to_flags & TOF_TS) && to.to_tsecr) { 1226 tcp_xmit_timer(tp, 1227 ticks - to.to_tsecr + 1); 1228 } else if (tp->t_rtttime && 1229 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1230 tcp_xmit_timer(tp, 1231 ticks - tp->t_rtttime); 1232 } 1233 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1234 acked = th->th_ack - tp->snd_una; 1235 tcpstat.tcps_rcvackpack++; 1236 tcpstat.tcps_rcvackbyte += acked; 1237 sbdrop(&so->so_snd.sb, acked); 1238 tp->snd_recover = th->th_ack - 1; 1239 tp->snd_una = th->th_ack; 1240 tp->t_dupacks = 0; 1241 /* 1242 * Update window information. 1243 */ 1244 if (tiwin != tp->snd_wnd && 1245 acceptable_window_update(tp, th, tiwin)) { 1246 /* keep track of pure window updates */ 1247 if (tp->snd_wl2 == th->th_ack && 1248 tiwin > tp->snd_wnd) 1249 tcpstat.tcps_rcvwinupd++; 1250 tp->snd_wnd = tiwin; 1251 tp->snd_wl1 = th->th_seq; 1252 tp->snd_wl2 = th->th_ack; 1253 if (tp->snd_wnd > tp->max_sndwnd) 1254 tp->max_sndwnd = tp->snd_wnd; 1255 } 1256 m_freem(m); 1257 ND6_HINT(tp); /* some progress has been done */ 1258 /* 1259 * If all outstanding data are acked, stop 1260 * retransmit timer, otherwise restart timer 1261 * using current (possibly backed-off) value. 1262 * If process is waiting for space, 1263 * wakeup/selwakeup/signal. If data 1264 * are ready to send, let tcp_output 1265 * decide between more output or persist. 1266 */ 1267 if (tp->snd_una == tp->snd_max) { 1268 tcp_callout_stop(tp, tp->tt_rexmt); 1269 } else if (!tcp_callout_active(tp, 1270 tp->tt_persist)) { 1271 tcp_callout_reset(tp, tp->tt_rexmt, 1272 tp->t_rxtcur, tcp_timer_rexmt); 1273 } 1274 sowwakeup(so); 1275 if (so->so_snd.ssb_cc > 0) 1276 tcp_output(tp); 1277 return(IPPROTO_DONE); 1278 } 1279 } else if (tiwin == tp->snd_wnd && 1280 th->th_ack == tp->snd_una && 1281 LIST_EMPTY(&tp->t_segq) && 1282 tlen <= ssb_space(&so->so_rcv)) { 1283 u_long newsize = 0; /* automatic sockbuf scaling */ 1284 /* 1285 * This is a pure, in-sequence data packet 1286 * with nothing on the reassembly queue and 1287 * we have enough buffer space to take it. 1288 */ 1289 ++tcpstat.tcps_preddat; 1290 tp->rcv_nxt += tlen; 1291 tcpstat.tcps_rcvpack++; 1292 tcpstat.tcps_rcvbyte += tlen; 1293 ND6_HINT(tp); /* some progress has been done */ 1294 /* 1295 * Automatic sizing of receive socket buffer. Often the send 1296 * buffer size is not optimally adjusted to the actual network 1297 * conditions at hand (delay bandwidth product). Setting the 1298 * buffer size too small limits throughput on links with high 1299 * bandwidth and high delay (eg. trans-continental/oceanic links). 1300 * 1301 * On the receive side the socket buffer memory is only rarely 1302 * used to any significant extent. This allows us to be much 1303 * more aggressive in scaling the receive socket buffer. For 1304 * the case that the buffer space is actually used to a large 1305 * extent and we run out of kernel memory we can simply drop 1306 * the new segments; TCP on the sender will just retransmit it 1307 * later. Setting the buffer size too big may only consume too 1308 * much kernel memory if the application doesn't read() from 1309 * the socket or packet loss or reordering makes use of the 1310 * reassembly queue. 1311 * 1312 * The criteria to step up the receive buffer one notch are: 1313 * 1. the number of bytes received during the time it takes 1314 * one timestamp to be reflected back to us (the RTT); 1315 * 2. received bytes per RTT is within seven eighth of the 1316 * current socket buffer size; 1317 * 3. receive buffer size has not hit maximal automatic size; 1318 * 1319 * This algorithm does one step per RTT at most and only if 1320 * we receive a bulk stream w/o packet losses or reorderings. 1321 * Shrinking the buffer during idle times is not necessary as 1322 * it doesn't consume any memory when idle. 1323 * 1324 * TODO: Only step up if the application is actually serving 1325 * the buffer to better manage the socket buffer resources. 1326 */ 1327 if (tcp_do_autorcvbuf && 1328 to.to_tsecr && 1329 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) { 1330 if (to.to_tsecr > tp->rfbuf_ts && 1331 to.to_tsecr - tp->rfbuf_ts < hz) { 1332 if (tp->rfbuf_cnt > 1333 (so->so_rcv.ssb_hiwat / 8 * 7) && 1334 so->so_rcv.ssb_hiwat < 1335 tcp_autorcvbuf_max) { 1336 newsize = 1337 ulmin(so->so_rcv.ssb_hiwat + 1338 tcp_autorcvbuf_inc, 1339 tcp_autorcvbuf_max); 1340 } 1341 /* Start over with next RTT. */ 1342 tp->rfbuf_ts = 0; 1343 tp->rfbuf_cnt = 0; 1344 } else 1345 tp->rfbuf_cnt += tlen; /* add up */ 1346 } 1347 /* 1348 * Add data to socket buffer. 1349 */ 1350 if (so->so_state & SS_CANTRCVMORE) { 1351 m_freem(m); 1352 } else { 1353 /* 1354 * Set new socket buffer size, give up when 1355 * limit is reached. 1356 * 1357 * Adjusting the size can mess up ACK 1358 * sequencing when pure window updates are 1359 * being avoided (which is the default), 1360 * so force an ack. 1361 */ 1362 lwkt_gettoken(&so->so_rcv.ssb_token); 1363 if (newsize) { 1364 tp->t_flags |= TF_RXRESIZED; 1365 if (!ssb_reserve(&so->so_rcv, newsize, 1366 so, NULL)) { 1367 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 1368 } 1369 if (newsize >= 1370 (TCP_MAXWIN << tp->rcv_scale)) { 1371 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 1372 } 1373 } 1374 m_adj(m, drop_hdrlen); /* delayed header drop */ 1375 ssb_appendstream(&so->so_rcv, m); 1376 lwkt_reltoken(&so->so_rcv.ssb_token); 1377 } 1378 sorwakeup(so); 1379 /* 1380 * This code is responsible for most of the ACKs 1381 * the TCP stack sends back after receiving a data 1382 * packet. Note that the DELAY_ACK check fails if 1383 * the delack timer is already running, which results 1384 * in an ack being sent every other packet (which is 1385 * what we want). 1386 * 1387 * We then further aggregate acks by not actually 1388 * sending one until the protocol thread has completed 1389 * processing the current backlog of packets. This 1390 * does not delay the ack any further, but allows us 1391 * to take advantage of the packet aggregation that 1392 * high speed NICs do (usually blocks of 8-10 packets) 1393 * to send a single ack rather then four or five acks, 1394 * greatly reducing the ack rate, the return channel 1395 * bandwidth, and the protocol overhead on both ends. 1396 * 1397 * Since this also has the effect of slowing down 1398 * the exponential slow-start ramp-up, systems with 1399 * very large bandwidth-delay products might want 1400 * to turn the feature off. 1401 */ 1402 if (DELAY_ACK(tp)) { 1403 tcp_callout_reset(tp, tp->tt_delack, 1404 tcp_delacktime, tcp_timer_delack); 1405 } else if (tcp_aggregate_acks) { 1406 tp->t_flags |= TF_ACKNOW; 1407 if (!(tp->t_flags & TF_ONOUTPUTQ)) { 1408 tp->t_flags |= TF_ONOUTPUTQ; 1409 tp->tt_cpu = mycpu->gd_cpuid; 1410 TAILQ_INSERT_TAIL( 1411 &tcpcbackq[tp->tt_cpu], 1412 tp, t_outputq); 1413 } 1414 } else { 1415 tp->t_flags |= TF_ACKNOW; 1416 tcp_output(tp); 1417 } 1418 return(IPPROTO_DONE); 1419 } 1420 } 1421 1422 /* 1423 * Calculate amount of space in receive window, 1424 * and then do TCP input processing. 1425 * Receive window is amount of space in rcv queue, 1426 * but not less than advertised window. 1427 */ 1428 recvwin = ssb_space(&so->so_rcv); 1429 if (recvwin < 0) 1430 recvwin = 0; 1431 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt)); 1432 1433 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1434 tp->rfbuf_ts = 0; 1435 tp->rfbuf_cnt = 0; 1436 1437 switch (tp->t_state) { 1438 /* 1439 * If the state is SYN_RECEIVED: 1440 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1441 */ 1442 case TCPS_SYN_RECEIVED: 1443 if ((thflags & TH_ACK) && 1444 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1445 SEQ_GT(th->th_ack, tp->snd_max))) { 1446 rstreason = BANDLIM_RST_OPENPORT; 1447 goto dropwithreset; 1448 } 1449 break; 1450 1451 /* 1452 * If the state is SYN_SENT: 1453 * if seg contains an ACK, but not for our SYN, drop the input. 1454 * if seg contains a RST, then drop the connection. 1455 * if seg does not contain SYN, then drop it. 1456 * Otherwise this is an acceptable SYN segment 1457 * initialize tp->rcv_nxt and tp->irs 1458 * if seg contains ack then advance tp->snd_una 1459 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1460 * arrange for segment to be acked (eventually) 1461 * continue processing rest of data/controls, beginning with URG 1462 */ 1463 case TCPS_SYN_SENT: 1464 if ((thflags & TH_ACK) && 1465 (SEQ_LEQ(th->th_ack, tp->iss) || 1466 SEQ_GT(th->th_ack, tp->snd_max))) { 1467 rstreason = BANDLIM_UNLIMITED; 1468 goto dropwithreset; 1469 } 1470 if (thflags & TH_RST) { 1471 if (thflags & TH_ACK) 1472 tp = tcp_drop(tp, ECONNREFUSED); 1473 goto drop; 1474 } 1475 if (!(thflags & TH_SYN)) 1476 goto drop; 1477 tp->snd_wnd = th->th_win; /* initial send window */ 1478 1479 tp->irs = th->th_seq; 1480 tcp_rcvseqinit(tp); 1481 if (thflags & TH_ACK) { 1482 /* Our SYN was acked. */ 1483 tcpstat.tcps_connects++; 1484 soisconnected(so); 1485 /* Do window scaling on this connection? */ 1486 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1487 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 1488 tp->snd_scale = tp->requested_s_scale; 1489 tp->rcv_scale = tp->request_r_scale; 1490 } 1491 tp->rcv_adv += tp->rcv_wnd; 1492 tp->snd_una++; /* SYN is acked */ 1493 tcp_callout_stop(tp, tp->tt_rexmt); 1494 /* 1495 * If there's data, delay ACK; if there's also a FIN 1496 * ACKNOW will be turned on later. 1497 */ 1498 if (DELAY_ACK(tp) && tlen != 0) { 1499 tcp_callout_reset(tp, tp->tt_delack, 1500 tcp_delacktime, tcp_timer_delack); 1501 } else { 1502 tp->t_flags |= TF_ACKNOW; 1503 } 1504 /* 1505 * Received <SYN,ACK> in SYN_SENT[*] state. 1506 * Transitions: 1507 * SYN_SENT --> ESTABLISHED 1508 * SYN_SENT* --> FIN_WAIT_1 1509 */ 1510 tp->t_starttime = ticks; 1511 if (tp->t_flags & TF_NEEDFIN) { 1512 tp->t_state = TCPS_FIN_WAIT_1; 1513 tp->t_flags &= ~TF_NEEDFIN; 1514 thflags &= ~TH_SYN; 1515 } else { 1516 tp->t_state = TCPS_ESTABLISHED; 1517 tcp_callout_reset(tp, tp->tt_keep, 1518 tcp_getkeepidle(tp), 1519 tcp_timer_keep); 1520 } 1521 } else { 1522 /* 1523 * Received initial SYN in SYN-SENT[*] state => 1524 * simultaneous open. 1525 * Do 3-way handshake: 1526 * SYN-SENT -> SYN-RECEIVED 1527 * SYN-SENT* -> SYN-RECEIVED* 1528 */ 1529 tp->t_flags |= TF_ACKNOW; 1530 tcp_callout_stop(tp, tp->tt_rexmt); 1531 tp->t_state = TCPS_SYN_RECEIVED; 1532 } 1533 1534 trimthenstep6: 1535 /* 1536 * Advance th->th_seq to correspond to first data byte. 1537 * If data, trim to stay within window, 1538 * dropping FIN if necessary. 1539 */ 1540 th->th_seq++; 1541 if (tlen > tp->rcv_wnd) { 1542 todrop = tlen - tp->rcv_wnd; 1543 m_adj(m, -todrop); 1544 tlen = tp->rcv_wnd; 1545 thflags &= ~TH_FIN; 1546 tcpstat.tcps_rcvpackafterwin++; 1547 tcpstat.tcps_rcvbyteafterwin += todrop; 1548 } 1549 tp->snd_wl1 = th->th_seq - 1; 1550 tp->rcv_up = th->th_seq; 1551 /* 1552 * Client side of transaction: already sent SYN and data. 1553 * If the remote host used T/TCP to validate the SYN, 1554 * our data will be ACK'd; if so, enter normal data segment 1555 * processing in the middle of step 5, ack processing. 1556 * Otherwise, goto step 6. 1557 */ 1558 if (thflags & TH_ACK) 1559 goto process_ACK; 1560 1561 goto step6; 1562 1563 /* 1564 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1565 * do normal processing (we no longer bother with T/TCP). 1566 */ 1567 case TCPS_LAST_ACK: 1568 case TCPS_CLOSING: 1569 case TCPS_TIME_WAIT: 1570 break; /* continue normal processing */ 1571 } 1572 1573 /* 1574 * States other than LISTEN or SYN_SENT. 1575 * First check the RST flag and sequence number since reset segments 1576 * are exempt from the timestamp and connection count tests. This 1577 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1578 * below which allowed reset segments in half the sequence space 1579 * to fall though and be processed (which gives forged reset 1580 * segments with a random sequence number a 50 percent chance of 1581 * killing a connection). 1582 * Then check timestamp, if present. 1583 * Then check the connection count, if present. 1584 * Then check that at least some bytes of segment are within 1585 * receive window. If segment begins before rcv_nxt, 1586 * drop leading data (and SYN); if nothing left, just ack. 1587 * 1588 * 1589 * If the RST bit is set, check the sequence number to see 1590 * if this is a valid reset segment. 1591 * RFC 793 page 37: 1592 * In all states except SYN-SENT, all reset (RST) segments 1593 * are validated by checking their SEQ-fields. A reset is 1594 * valid if its sequence number is in the window. 1595 * Note: this does not take into account delayed ACKs, so 1596 * we should test against last_ack_sent instead of rcv_nxt. 1597 * The sequence number in the reset segment is normally an 1598 * echo of our outgoing acknowledgement numbers, but some hosts 1599 * send a reset with the sequence number at the rightmost edge 1600 * of our receive window, and we have to handle this case. 1601 * If we have multiple segments in flight, the intial reset 1602 * segment sequence numbers will be to the left of last_ack_sent, 1603 * but they will eventually catch up. 1604 * In any case, it never made sense to trim reset segments to 1605 * fit the receive window since RFC 1122 says: 1606 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1607 * 1608 * A TCP SHOULD allow a received RST segment to include data. 1609 * 1610 * DISCUSSION 1611 * It has been suggested that a RST segment could contain 1612 * ASCII text that encoded and explained the cause of the 1613 * RST. No standard has yet been established for such 1614 * data. 1615 * 1616 * If the reset segment passes the sequence number test examine 1617 * the state: 1618 * SYN_RECEIVED STATE: 1619 * If passive open, return to LISTEN state. 1620 * If active open, inform user that connection was refused. 1621 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1622 * Inform user that connection was reset, and close tcb. 1623 * CLOSING, LAST_ACK STATES: 1624 * Close the tcb. 1625 * TIME_WAIT STATE: 1626 * Drop the segment - see Stevens, vol. 2, p. 964 and 1627 * RFC 1337. 1628 */ 1629 if (thflags & TH_RST) { 1630 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 1631 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1632 switch (tp->t_state) { 1633 1634 case TCPS_SYN_RECEIVED: 1635 so->so_error = ECONNREFUSED; 1636 goto close; 1637 1638 case TCPS_ESTABLISHED: 1639 case TCPS_FIN_WAIT_1: 1640 case TCPS_FIN_WAIT_2: 1641 case TCPS_CLOSE_WAIT: 1642 so->so_error = ECONNRESET; 1643 close: 1644 tp->t_state = TCPS_CLOSED; 1645 tcpstat.tcps_drops++; 1646 tp = tcp_close(tp); 1647 break; 1648 1649 case TCPS_CLOSING: 1650 case TCPS_LAST_ACK: 1651 tp = tcp_close(tp); 1652 break; 1653 1654 case TCPS_TIME_WAIT: 1655 break; 1656 } 1657 } 1658 goto drop; 1659 } 1660 1661 /* 1662 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1663 * and it's less than ts_recent, drop it. 1664 */ 1665 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 && 1666 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1667 1668 /* Check to see if ts_recent is over 24 days old. */ 1669 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1670 /* 1671 * Invalidate ts_recent. If this segment updates 1672 * ts_recent, the age will be reset later and ts_recent 1673 * will get a valid value. If it does not, setting 1674 * ts_recent to zero will at least satisfy the 1675 * requirement that zero be placed in the timestamp 1676 * echo reply when ts_recent isn't valid. The 1677 * age isn't reset until we get a valid ts_recent 1678 * because we don't want out-of-order segments to be 1679 * dropped when ts_recent is old. 1680 */ 1681 tp->ts_recent = 0; 1682 } else { 1683 tcpstat.tcps_rcvduppack++; 1684 tcpstat.tcps_rcvdupbyte += tlen; 1685 tcpstat.tcps_pawsdrop++; 1686 if (tlen) 1687 goto dropafterack; 1688 goto drop; 1689 } 1690 } 1691 1692 /* 1693 * In the SYN-RECEIVED state, validate that the packet belongs to 1694 * this connection before trimming the data to fit the receive 1695 * window. Check the sequence number versus IRS since we know 1696 * the sequence numbers haven't wrapped. This is a partial fix 1697 * for the "LAND" DoS attack. 1698 */ 1699 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1700 rstreason = BANDLIM_RST_OPENPORT; 1701 goto dropwithreset; 1702 } 1703 1704 todrop = tp->rcv_nxt - th->th_seq; 1705 if (todrop > 0) { 1706 if (TCP_DO_SACK(tp)) { 1707 /* Report duplicate segment at head of packet. */ 1708 tp->reportblk.rblk_start = th->th_seq; 1709 tp->reportblk.rblk_end = th->th_seq + tlen; 1710 if (thflags & TH_FIN) 1711 ++tp->reportblk.rblk_end; 1712 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt)) 1713 tp->reportblk.rblk_end = tp->rcv_nxt; 1714 tp->t_flags |= (TF_DUPSEG | TF_SACKLEFT | TF_ACKNOW); 1715 } 1716 if (thflags & TH_SYN) { 1717 thflags &= ~TH_SYN; 1718 th->th_seq++; 1719 if (th->th_urp > 1) 1720 th->th_urp--; 1721 else 1722 thflags &= ~TH_URG; 1723 todrop--; 1724 } 1725 /* 1726 * Following if statement from Stevens, vol. 2, p. 960. 1727 */ 1728 if (todrop > tlen || 1729 (todrop == tlen && !(thflags & TH_FIN))) { 1730 /* 1731 * Any valid FIN must be to the left of the window. 1732 * At this point the FIN must be a duplicate or out 1733 * of sequence; drop it. 1734 */ 1735 thflags &= ~TH_FIN; 1736 1737 /* 1738 * Send an ACK to resynchronize and drop any data. 1739 * But keep on processing for RST or ACK. 1740 */ 1741 tp->t_flags |= TF_ACKNOW; 1742 todrop = tlen; 1743 tcpstat.tcps_rcvduppack++; 1744 tcpstat.tcps_rcvdupbyte += todrop; 1745 } else { 1746 tcpstat.tcps_rcvpartduppack++; 1747 tcpstat.tcps_rcvpartdupbyte += todrop; 1748 } 1749 drop_hdrlen += todrop; /* drop from the top afterwards */ 1750 th->th_seq += todrop; 1751 tlen -= todrop; 1752 if (th->th_urp > todrop) 1753 th->th_urp -= todrop; 1754 else { 1755 thflags &= ~TH_URG; 1756 th->th_urp = 0; 1757 } 1758 } 1759 1760 /* 1761 * If new data are received on a connection after the 1762 * user processes are gone, then RST the other end. 1763 */ 1764 if ((so->so_state & SS_NOFDREF) && 1765 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1766 tp = tcp_close(tp); 1767 tcpstat.tcps_rcvafterclose++; 1768 rstreason = BANDLIM_UNLIMITED; 1769 goto dropwithreset; 1770 } 1771 1772 /* 1773 * If segment ends after window, drop trailing data 1774 * (and PUSH and FIN); if nothing left, just ACK. 1775 */ 1776 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1777 if (todrop > 0) { 1778 tcpstat.tcps_rcvpackafterwin++; 1779 if (todrop >= tlen) { 1780 tcpstat.tcps_rcvbyteafterwin += tlen; 1781 /* 1782 * If a new connection request is received 1783 * while in TIME_WAIT, drop the old connection 1784 * and start over if the sequence numbers 1785 * are above the previous ones. 1786 */ 1787 if (thflags & TH_SYN && 1788 tp->t_state == TCPS_TIME_WAIT && 1789 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1790 tp = tcp_close(tp); 1791 goto findpcb; 1792 } 1793 /* 1794 * If window is closed can only take segments at 1795 * window edge, and have to drop data and PUSH from 1796 * incoming segments. Continue processing, but 1797 * remember to ack. Otherwise, drop segment 1798 * and ack. 1799 */ 1800 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1801 tp->t_flags |= TF_ACKNOW; 1802 tcpstat.tcps_rcvwinprobe++; 1803 } else 1804 goto dropafterack; 1805 } else 1806 tcpstat.tcps_rcvbyteafterwin += todrop; 1807 m_adj(m, -todrop); 1808 tlen -= todrop; 1809 thflags &= ~(TH_PUSH | TH_FIN); 1810 } 1811 1812 /* 1813 * If last ACK falls within this segment's sequence numbers, 1814 * record its timestamp. 1815 * NOTE: 1816 * 1) That the test incorporates suggestions from the latest 1817 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1818 * 2) That updating only on newer timestamps interferes with 1819 * our earlier PAWS tests, so this check should be solely 1820 * predicated on the sequence space of this segment. 1821 * 3) That we modify the segment boundary check to be 1822 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN 1823 * instead of RFC1323's 1824 * Last.ACK.Sent < SEG.SEQ + SEG.LEN, 1825 * This modified check allows us to overcome RFC1323's 1826 * limitations as described in Stevens TCP/IP Illustrated 1827 * Vol. 2 p.869. In such cases, we can still calculate the 1828 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1829 */ 1830 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1831 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen 1832 + ((thflags & TH_SYN) != 0) 1833 + ((thflags & TH_FIN) != 0)))) { 1834 tp->ts_recent_age = ticks; 1835 tp->ts_recent = to.to_tsval; 1836 } 1837 1838 /* 1839 * If a SYN is in the window, then this is an 1840 * error and we send an RST and drop the connection. 1841 */ 1842 if (thflags & TH_SYN) { 1843 tp = tcp_drop(tp, ECONNRESET); 1844 rstreason = BANDLIM_UNLIMITED; 1845 goto dropwithreset; 1846 } 1847 1848 /* 1849 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1850 * flag is on (half-synchronized state), then queue data for 1851 * later processing; else drop segment and return. 1852 */ 1853 if (!(thflags & TH_ACK)) { 1854 if (tp->t_state == TCPS_SYN_RECEIVED || 1855 (tp->t_flags & TF_NEEDSYN)) 1856 goto step6; 1857 else 1858 goto drop; 1859 } 1860 1861 /* 1862 * Ack processing. 1863 */ 1864 switch (tp->t_state) { 1865 /* 1866 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter 1867 * ESTABLISHED state and continue processing. 1868 * The ACK was checked above. 1869 */ 1870 case TCPS_SYN_RECEIVED: 1871 1872 tcpstat.tcps_connects++; 1873 soisconnected(so); 1874 /* Do window scaling? */ 1875 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 1876 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 1877 tp->snd_scale = tp->requested_s_scale; 1878 tp->rcv_scale = tp->request_r_scale; 1879 } 1880 /* 1881 * Make transitions: 1882 * SYN-RECEIVED -> ESTABLISHED 1883 * SYN-RECEIVED* -> FIN-WAIT-1 1884 */ 1885 tp->t_starttime = ticks; 1886 if (tp->t_flags & TF_NEEDFIN) { 1887 tp->t_state = TCPS_FIN_WAIT_1; 1888 tp->t_flags &= ~TF_NEEDFIN; 1889 } else { 1890 tp->t_state = TCPS_ESTABLISHED; 1891 tcp_callout_reset(tp, tp->tt_keep, 1892 tcp_getkeepidle(tp), 1893 tcp_timer_keep); 1894 } 1895 /* 1896 * If segment contains data or ACK, will call tcp_reass() 1897 * later; if not, do so now to pass queued data to user. 1898 */ 1899 if (tlen == 0 && !(thflags & TH_FIN)) 1900 tcp_reass(tp, NULL, NULL, NULL); 1901 /* fall into ... */ 1902 1903 /* 1904 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1905 * ACKs. If the ack is in the range 1906 * tp->snd_una < th->th_ack <= tp->snd_max 1907 * then advance tp->snd_una to th->th_ack and drop 1908 * data from the retransmission queue. If this ACK reflects 1909 * more up to date window information we update our window information. 1910 */ 1911 case TCPS_ESTABLISHED: 1912 case TCPS_FIN_WAIT_1: 1913 case TCPS_FIN_WAIT_2: 1914 case TCPS_CLOSE_WAIT: 1915 case TCPS_CLOSING: 1916 case TCPS_LAST_ACK: 1917 case TCPS_TIME_WAIT: 1918 1919 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1920 if (TCP_DO_SACK(tp)) 1921 tcp_sack_update_scoreboard(tp, &to); 1922 if (tlen != 0 || tiwin != tp->snd_wnd) { 1923 tp->t_dupacks = 0; 1924 break; 1925 } 1926 tcpstat.tcps_rcvdupack++; 1927 if (!tcp_callout_active(tp, tp->tt_rexmt) || 1928 th->th_ack != tp->snd_una) { 1929 tp->t_dupacks = 0; 1930 break; 1931 } 1932 /* 1933 * We have outstanding data (other than 1934 * a window probe), this is a completely 1935 * duplicate ack (ie, window info didn't 1936 * change), the ack is the biggest we've 1937 * seen and we've seen exactly our rexmt 1938 * threshhold of them, so assume a packet 1939 * has been dropped and retransmit it. 1940 * Kludge snd_nxt & the congestion 1941 * window so we send only this one 1942 * packet. 1943 */ 1944 if (IN_FASTRECOVERY(tp)) { 1945 if (TCP_DO_SACK(tp)) { 1946 /* No artifical cwnd inflation. */ 1947 tcp_sack_rexmt(tp, th); 1948 } else { 1949 /* 1950 * Dup acks mean that packets 1951 * have left the network 1952 * (they're now cached at the 1953 * receiver) so bump cwnd by 1954 * the amount in the receiver 1955 * to keep a constant cwnd 1956 * packets in the network. 1957 */ 1958 tp->snd_cwnd += tp->t_maxseg; 1959 tcp_output(tp); 1960 } 1961 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) { 1962 tp->t_dupacks = 0; 1963 break; 1964 } else if (++tp->t_dupacks == tcprexmtthresh) { 1965 tcp_seq old_snd_nxt; 1966 u_int win; 1967 1968 fastretransmit: 1969 if (tcp_do_eifel_detect && 1970 (tp->t_flags & TF_RCVD_TSTMP)) { 1971 tcp_save_congestion_state(tp); 1972 tp->t_flags |= TF_FASTREXMT; 1973 } 1974 /* 1975 * We know we're losing at the current 1976 * window size, so do congestion avoidance: 1977 * set ssthresh to half the current window 1978 * and pull our congestion window back to the 1979 * new ssthresh. 1980 */ 1981 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1982 tp->t_maxseg; 1983 if (win < 2) 1984 win = 2; 1985 tp->snd_ssthresh = win * tp->t_maxseg; 1986 ENTER_FASTRECOVERY(tp); 1987 tp->snd_recover = tp->snd_max; 1988 tcp_callout_stop(tp, tp->tt_rexmt); 1989 tp->t_rtttime = 0; 1990 old_snd_nxt = tp->snd_nxt; 1991 tp->snd_nxt = th->th_ack; 1992 tp->snd_cwnd = tp->t_maxseg; 1993 tcp_output(tp); 1994 ++tcpstat.tcps_sndfastrexmit; 1995 tp->snd_cwnd = tp->snd_ssthresh; 1996 tp->rexmt_high = tp->snd_nxt; 1997 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 1998 tp->snd_nxt = old_snd_nxt; 1999 KASSERT(tp->snd_limited <= 2, 2000 ("tp->snd_limited too big")); 2001 if (TCP_DO_SACK(tp)) 2002 tcp_sack_rexmt(tp, th); 2003 else 2004 tp->snd_cwnd += tp->t_maxseg * 2005 (tp->t_dupacks - tp->snd_limited); 2006 } else if (tcp_do_limitedtransmit) { 2007 u_long oldcwnd = tp->snd_cwnd; 2008 tcp_seq oldsndmax = tp->snd_max; 2009 tcp_seq oldsndnxt = tp->snd_nxt; 2010 /* outstanding data */ 2011 uint32_t ownd = tp->snd_max - tp->snd_una; 2012 u_int sent; 2013 2014 #define iceildiv(n, d) (((n)+(d)-1) / (d)) 2015 2016 KASSERT(tp->t_dupacks == 1 || 2017 tp->t_dupacks == 2, 2018 ("dupacks not 1 or 2")); 2019 if (tp->t_dupacks == 1) 2020 tp->snd_limited = 0; 2021 tp->snd_nxt = tp->snd_max; 2022 tp->snd_cwnd = ownd + 2023 (tp->t_dupacks - tp->snd_limited) * 2024 tp->t_maxseg; 2025 tcp_output(tp); 2026 2027 /* 2028 * Other acks may have been processed, 2029 * snd_nxt cannot be reset to a value less 2030 * then snd_una. 2031 */ 2032 if (SEQ_LT(oldsndnxt, oldsndmax)) { 2033 if (SEQ_GT(oldsndnxt, tp->snd_una)) 2034 tp->snd_nxt = oldsndnxt; 2035 else 2036 tp->snd_nxt = tp->snd_una; 2037 } 2038 tp->snd_cwnd = oldcwnd; 2039 sent = tp->snd_max - oldsndmax; 2040 if (sent > tp->t_maxseg) { 2041 KASSERT((tp->t_dupacks == 2 && 2042 tp->snd_limited == 0) || 2043 (sent == tp->t_maxseg + 1 && 2044 tp->t_flags & TF_SENTFIN), 2045 ("sent too much")); 2046 KASSERT(sent <= tp->t_maxseg * 2, 2047 ("sent too many segments")); 2048 tp->snd_limited = 2; 2049 tcpstat.tcps_sndlimited += 2; 2050 } else if (sent > 0) { 2051 ++tp->snd_limited; 2052 ++tcpstat.tcps_sndlimited; 2053 } else if (tcp_do_early_retransmit && 2054 (tcp_do_eifel_detect && 2055 (tp->t_flags & TF_RCVD_TSTMP)) && 2056 ownd < 4 * tp->t_maxseg && 2057 tp->t_dupacks + 1 >= 2058 iceildiv(ownd, tp->t_maxseg) && 2059 (!TCP_DO_SACK(tp) || 2060 ownd <= tp->t_maxseg || 2061 tcp_sack_has_sacked(&tp->scb, 2062 ownd - tp->t_maxseg))) { 2063 ++tcpstat.tcps_sndearlyrexmit; 2064 tp->t_flags |= TF_EARLYREXMT; 2065 goto fastretransmit; 2066 } 2067 } 2068 goto drop; 2069 } 2070 2071 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una")); 2072 tp->t_dupacks = 0; 2073 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2074 /* 2075 * Detected optimistic ACK attack. 2076 * Force slow-start to de-synchronize attack. 2077 */ 2078 tp->snd_cwnd = tp->t_maxseg; 2079 tp->snd_wacked = 0; 2080 2081 tcpstat.tcps_rcvacktoomuch++; 2082 goto dropafterack; 2083 } 2084 /* 2085 * If we reach this point, ACK is not a duplicate, 2086 * i.e., it ACKs something we sent. 2087 */ 2088 if (tp->t_flags & TF_NEEDSYN) { 2089 /* 2090 * T/TCP: Connection was half-synchronized, and our 2091 * SYN has been ACK'd (so connection is now fully 2092 * synchronized). Go to non-starred state, 2093 * increment snd_una for ACK of SYN, and check if 2094 * we can do window scaling. 2095 */ 2096 tp->t_flags &= ~TF_NEEDSYN; 2097 tp->snd_una++; 2098 /* Do window scaling? */ 2099 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 2100 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 2101 tp->snd_scale = tp->requested_s_scale; 2102 tp->rcv_scale = tp->request_r_scale; 2103 } 2104 } 2105 2106 process_ACK: 2107 acked = th->th_ack - tp->snd_una; 2108 tcpstat.tcps_rcvackpack++; 2109 tcpstat.tcps_rcvackbyte += acked; 2110 2111 if (tcp_do_eifel_detect && acked > 0 && 2112 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) && 2113 (tp->t_flags & TF_FIRSTACCACK)) { 2114 /* Eifel detection applicable. */ 2115 if (to.to_tsecr < tp->t_rexmtTS) { 2116 ++tcpstat.tcps_eifeldetected; 2117 tcp_revert_congestion_state(tp); 2118 if (tp->t_rxtshift == 1 && 2119 ticks >= tp->t_badrxtwin) 2120 ++tcpstat.tcps_rttcantdetect; 2121 } 2122 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2123 /* 2124 * If we just performed our first retransmit, 2125 * and the ACK arrives within our recovery window, 2126 * then it was a mistake to do the retransmit 2127 * in the first place. Recover our original cwnd 2128 * and ssthresh, and proceed to transmit where we 2129 * left off. 2130 */ 2131 tcp_revert_congestion_state(tp); 2132 ++tcpstat.tcps_rttdetected; 2133 } 2134 2135 /* 2136 * If we have a timestamp reply, update smoothed 2137 * round trip time. If no timestamp is present but 2138 * transmit timer is running and timed sequence 2139 * number was acked, update smoothed round trip time. 2140 * Since we now have an rtt measurement, cancel the 2141 * timer backoff (cf., Phil Karn's retransmit alg.). 2142 * Recompute the initial retransmit timer. 2143 * 2144 * Some machines (certain windows boxes) send broken 2145 * timestamp replies during the SYN+ACK phase, ignore 2146 * timestamps of 0. 2147 */ 2148 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) 2149 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2150 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) 2151 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2152 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2153 2154 /* 2155 * If no data (only SYN) was ACK'd, 2156 * skip rest of ACK processing. 2157 */ 2158 if (acked == 0) 2159 goto step6; 2160 2161 /* Stop looking for an acceptable ACK since one was received. */ 2162 tp->t_flags &= ~(TF_FIRSTACCACK | TF_FASTREXMT | TF_EARLYREXMT); 2163 2164 if (acked > so->so_snd.ssb_cc) { 2165 tp->snd_wnd -= so->so_snd.ssb_cc; 2166 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc); 2167 ourfinisacked = TRUE; 2168 } else { 2169 sbdrop(&so->so_snd.sb, acked); 2170 tp->snd_wnd -= acked; 2171 ourfinisacked = FALSE; 2172 } 2173 sowwakeup(so); 2174 2175 /* 2176 * Update window information. 2177 * Don't look at window if no ACK: 2178 * TAC's send garbage on first SYN. 2179 */ 2180 if (SEQ_LT(tp->snd_wl1, th->th_seq) || 2181 (tp->snd_wl1 == th->th_seq && 2182 (SEQ_LT(tp->snd_wl2, th->th_ack) || 2183 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) { 2184 /* keep track of pure window updates */ 2185 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2186 tiwin > tp->snd_wnd) 2187 tcpstat.tcps_rcvwinupd++; 2188 tp->snd_wnd = tiwin; 2189 tp->snd_wl1 = th->th_seq; 2190 tp->snd_wl2 = th->th_ack; 2191 if (tp->snd_wnd > tp->max_sndwnd) 2192 tp->max_sndwnd = tp->snd_wnd; 2193 needoutput = TRUE; 2194 } 2195 2196 tp->snd_una = th->th_ack; 2197 if (TCP_DO_SACK(tp)) 2198 tcp_sack_update_scoreboard(tp, &to); 2199 if (IN_FASTRECOVERY(tp)) { 2200 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2201 EXIT_FASTRECOVERY(tp); 2202 needoutput = TRUE; 2203 /* 2204 * If the congestion window was inflated 2205 * to account for the other side's 2206 * cached packets, retract it. 2207 */ 2208 if (!TCP_DO_SACK(tp)) 2209 tp->snd_cwnd = tp->snd_ssthresh; 2210 2211 /* 2212 * Window inflation should have left us 2213 * with approximately snd_ssthresh outstanding 2214 * data. But, in case we would be inclined 2215 * to send a burst, better do it using 2216 * slow start. 2217 */ 2218 if (SEQ_GT(th->th_ack + tp->snd_cwnd, 2219 tp->snd_max + 2 * tp->t_maxseg)) 2220 tp->snd_cwnd = 2221 (tp->snd_max - tp->snd_una) + 2222 2 * tp->t_maxseg; 2223 2224 tp->snd_wacked = 0; 2225 } else { 2226 if (TCP_DO_SACK(tp)) { 2227 tp->snd_max_rexmt = tp->snd_max; 2228 tcp_sack_rexmt(tp, th); 2229 } else { 2230 tcp_newreno_partial_ack(tp, th, acked); 2231 } 2232 needoutput = FALSE; 2233 } 2234 } else { 2235 /* 2236 * Open the congestion window. When in slow-start, 2237 * open exponentially: maxseg per packet. Otherwise, 2238 * open linearly: maxseg per window. 2239 */ 2240 if (tp->snd_cwnd <= tp->snd_ssthresh) { 2241 u_int abc_sslimit = 2242 (SEQ_LT(tp->snd_nxt, tp->snd_max) ? 2243 tp->t_maxseg : 2 * tp->t_maxseg); 2244 2245 /* slow-start */ 2246 tp->snd_cwnd += tcp_do_abc ? 2247 min(acked, abc_sslimit) : tp->t_maxseg; 2248 } else { 2249 /* linear increase */ 2250 tp->snd_wacked += tcp_do_abc ? acked : 2251 tp->t_maxseg; 2252 if (tp->snd_wacked >= tp->snd_cwnd) { 2253 tp->snd_wacked -= tp->snd_cwnd; 2254 tp->snd_cwnd += tp->t_maxseg; 2255 } 2256 } 2257 tp->snd_cwnd = min(tp->snd_cwnd, 2258 TCP_MAXWIN << tp->snd_scale); 2259 tp->snd_recover = th->th_ack - 1; 2260 } 2261 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2262 tp->snd_nxt = tp->snd_una; 2263 2264 /* 2265 * If all outstanding data is acked, stop retransmit 2266 * timer and remember to restart (more output or persist). 2267 * If there is more data to be acked, restart retransmit 2268 * timer, using current (possibly backed-off) value. 2269 */ 2270 if (th->th_ack == tp->snd_max) { 2271 tcp_callout_stop(tp, tp->tt_rexmt); 2272 needoutput = TRUE; 2273 } else if (!tcp_callout_active(tp, tp->tt_persist)) { 2274 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur, 2275 tcp_timer_rexmt); 2276 } 2277 2278 switch (tp->t_state) { 2279 /* 2280 * In FIN_WAIT_1 STATE in addition to the processing 2281 * for the ESTABLISHED state if our FIN is now acknowledged 2282 * then enter FIN_WAIT_2. 2283 */ 2284 case TCPS_FIN_WAIT_1: 2285 if (ourfinisacked) { 2286 /* 2287 * If we can't receive any more 2288 * data, then closing user can proceed. 2289 * Starting the timer is contrary to the 2290 * specification, but if we don't get a FIN 2291 * we'll hang forever. 2292 */ 2293 if (so->so_state & SS_CANTRCVMORE) { 2294 soisdisconnected(so); 2295 tcp_callout_reset(tp, tp->tt_2msl, 2296 tp->t_maxidle, tcp_timer_2msl); 2297 } 2298 tp->t_state = TCPS_FIN_WAIT_2; 2299 } 2300 break; 2301 2302 /* 2303 * In CLOSING STATE in addition to the processing for 2304 * the ESTABLISHED state if the ACK acknowledges our FIN 2305 * then enter the TIME-WAIT state, otherwise ignore 2306 * the segment. 2307 */ 2308 case TCPS_CLOSING: 2309 if (ourfinisacked) { 2310 tp->t_state = TCPS_TIME_WAIT; 2311 tcp_canceltimers(tp); 2312 tcp_callout_reset(tp, tp->tt_2msl, 2313 2 * tcp_rmx_msl(tp), 2314 tcp_timer_2msl); 2315 soisdisconnected(so); 2316 } 2317 break; 2318 2319 /* 2320 * In LAST_ACK, we may still be waiting for data to drain 2321 * and/or to be acked, as well as for the ack of our FIN. 2322 * If our FIN is now acknowledged, delete the TCB, 2323 * enter the closed state and return. 2324 */ 2325 case TCPS_LAST_ACK: 2326 if (ourfinisacked) { 2327 tp = tcp_close(tp); 2328 goto drop; 2329 } 2330 break; 2331 2332 /* 2333 * In TIME_WAIT state the only thing that should arrive 2334 * is a retransmission of the remote FIN. Acknowledge 2335 * it and restart the finack timer. 2336 */ 2337 case TCPS_TIME_WAIT: 2338 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2339 tcp_timer_2msl); 2340 goto dropafterack; 2341 } 2342 } 2343 2344 step6: 2345 /* 2346 * Update window information. 2347 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2348 */ 2349 if ((thflags & TH_ACK) && 2350 acceptable_window_update(tp, th, tiwin)) { 2351 /* keep track of pure window updates */ 2352 if (tlen == 0 && tp->snd_wl2 == th->th_ack && 2353 tiwin > tp->snd_wnd) 2354 tcpstat.tcps_rcvwinupd++; 2355 tp->snd_wnd = tiwin; 2356 tp->snd_wl1 = th->th_seq; 2357 tp->snd_wl2 = th->th_ack; 2358 if (tp->snd_wnd > tp->max_sndwnd) 2359 tp->max_sndwnd = tp->snd_wnd; 2360 needoutput = TRUE; 2361 } 2362 2363 /* 2364 * Process segments with URG. 2365 */ 2366 if ((thflags & TH_URG) && th->th_urp && 2367 !TCPS_HAVERCVDFIN(tp->t_state)) { 2368 /* 2369 * This is a kludge, but if we receive and accept 2370 * random urgent pointers, we'll crash in 2371 * soreceive. It's hard to imagine someone 2372 * actually wanting to send this much urgent data. 2373 */ 2374 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) { 2375 th->th_urp = 0; /* XXX */ 2376 thflags &= ~TH_URG; /* XXX */ 2377 goto dodata; /* XXX */ 2378 } 2379 /* 2380 * If this segment advances the known urgent pointer, 2381 * then mark the data stream. This should not happen 2382 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2383 * a FIN has been received from the remote side. 2384 * In these states we ignore the URG. 2385 * 2386 * According to RFC961 (Assigned Protocols), 2387 * the urgent pointer points to the last octet 2388 * of urgent data. We continue, however, 2389 * to consider it to indicate the first octet 2390 * of data past the urgent section as the original 2391 * spec states (in one of two places). 2392 */ 2393 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 2394 tp->rcv_up = th->th_seq + th->th_urp; 2395 so->so_oobmark = so->so_rcv.ssb_cc + 2396 (tp->rcv_up - tp->rcv_nxt) - 1; 2397 if (so->so_oobmark == 0) 2398 sosetstate(so, SS_RCVATMARK); 2399 sohasoutofband(so); 2400 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2401 } 2402 /* 2403 * Remove out of band data so doesn't get presented to user. 2404 * This can happen independent of advancing the URG pointer, 2405 * but if two URG's are pending at once, some out-of-band 2406 * data may creep in... ick. 2407 */ 2408 if (th->th_urp <= (u_long)tlen && 2409 !(so->so_options & SO_OOBINLINE)) { 2410 /* hdr drop is delayed */ 2411 tcp_pulloutofband(so, th, m, drop_hdrlen); 2412 } 2413 } else { 2414 /* 2415 * If no out of band data is expected, 2416 * pull receive urgent pointer along 2417 * with the receive window. 2418 */ 2419 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2420 tp->rcv_up = tp->rcv_nxt; 2421 } 2422 2423 dodata: /* XXX */ 2424 /* 2425 * Process the segment text, merging it into the TCP sequencing queue, 2426 * and arranging for acknowledgment of receipt if necessary. 2427 * This process logically involves adjusting tp->rcv_wnd as data 2428 * is presented to the user (this happens in tcp_usrreq.c, 2429 * case PRU_RCVD). If a FIN has already been received on this 2430 * connection then we just ignore the text. 2431 */ 2432 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) { 2433 m_adj(m, drop_hdrlen); /* delayed header drop */ 2434 /* 2435 * Insert segment which includes th into TCP reassembly queue 2436 * with control block tp. Set thflags to whether reassembly now 2437 * includes a segment with FIN. This handles the common case 2438 * inline (segment is the next to be received on an established 2439 * connection, and the queue is empty), avoiding linkage into 2440 * and removal from the queue and repetition of various 2441 * conversions. 2442 * Set DELACK for segments received in order, but ack 2443 * immediately when segments are out of order (so 2444 * fast retransmit can work). 2445 */ 2446 if (th->th_seq == tp->rcv_nxt && 2447 LIST_EMPTY(&tp->t_segq) && 2448 TCPS_HAVEESTABLISHED(tp->t_state)) { 2449 if (DELAY_ACK(tp)) { 2450 tcp_callout_reset(tp, tp->tt_delack, 2451 tcp_delacktime, tcp_timer_delack); 2452 } else { 2453 tp->t_flags |= TF_ACKNOW; 2454 } 2455 tp->rcv_nxt += tlen; 2456 thflags = th->th_flags & TH_FIN; 2457 tcpstat.tcps_rcvpack++; 2458 tcpstat.tcps_rcvbyte += tlen; 2459 ND6_HINT(tp); 2460 if (so->so_state & SS_CANTRCVMORE) { 2461 m_freem(m); 2462 } else { 2463 lwkt_gettoken(&so->so_rcv.ssb_token); 2464 ssb_appendstream(&so->so_rcv, m); 2465 lwkt_reltoken(&so->so_rcv.ssb_token); 2466 } 2467 sorwakeup(so); 2468 } else { 2469 if (!(tp->t_flags & TF_DUPSEG)) { 2470 /* Initialize SACK report block. */ 2471 tp->reportblk.rblk_start = th->th_seq; 2472 tp->reportblk.rblk_end = th->th_seq + tlen + 2473 ((thflags & TH_FIN) != 0); 2474 } 2475 thflags = tcp_reass(tp, th, &tlen, m); 2476 tp->t_flags |= TF_ACKNOW; 2477 } 2478 2479 /* 2480 * Note the amount of data that peer has sent into 2481 * our window, in order to estimate the sender's 2482 * buffer size. 2483 */ 2484 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2485 } else { 2486 m_freem(m); 2487 thflags &= ~TH_FIN; 2488 } 2489 2490 /* 2491 * If FIN is received ACK the FIN and let the user know 2492 * that the connection is closing. 2493 */ 2494 if (thflags & TH_FIN) { 2495 if (!TCPS_HAVERCVDFIN(tp->t_state)) { 2496 socantrcvmore(so); 2497 /* 2498 * If connection is half-synchronized 2499 * (ie NEEDSYN flag on) then delay ACK, 2500 * so it may be piggybacked when SYN is sent. 2501 * Otherwise, since we received a FIN then no 2502 * more input can be expected, send ACK now. 2503 */ 2504 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) { 2505 tcp_callout_reset(tp, tp->tt_delack, 2506 tcp_delacktime, tcp_timer_delack); 2507 } else { 2508 tp->t_flags |= TF_ACKNOW; 2509 } 2510 tp->rcv_nxt++; 2511 } 2512 2513 switch (tp->t_state) { 2514 /* 2515 * In SYN_RECEIVED and ESTABLISHED STATES 2516 * enter the CLOSE_WAIT state. 2517 */ 2518 case TCPS_SYN_RECEIVED: 2519 tp->t_starttime = ticks; 2520 /*FALLTHROUGH*/ 2521 case TCPS_ESTABLISHED: 2522 tp->t_state = TCPS_CLOSE_WAIT; 2523 break; 2524 2525 /* 2526 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2527 * enter the CLOSING state. 2528 */ 2529 case TCPS_FIN_WAIT_1: 2530 tp->t_state = TCPS_CLOSING; 2531 break; 2532 2533 /* 2534 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2535 * starting the time-wait timer, turning off the other 2536 * standard timers. 2537 */ 2538 case TCPS_FIN_WAIT_2: 2539 tp->t_state = TCPS_TIME_WAIT; 2540 tcp_canceltimers(tp); 2541 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2542 tcp_timer_2msl); 2543 soisdisconnected(so); 2544 break; 2545 2546 /* 2547 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2548 */ 2549 case TCPS_TIME_WAIT: 2550 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), 2551 tcp_timer_2msl); 2552 break; 2553 } 2554 } 2555 2556 #ifdef TCPDEBUG 2557 if (so->so_options & SO_DEBUG) 2558 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2559 #endif 2560 2561 /* 2562 * Return any desired output. 2563 */ 2564 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2565 tcp_output(tp); 2566 return(IPPROTO_DONE); 2567 2568 dropafterack: 2569 /* 2570 * Generate an ACK dropping incoming segment if it occupies 2571 * sequence space, where the ACK reflects our state. 2572 * 2573 * We can now skip the test for the RST flag since all 2574 * paths to this code happen after packets containing 2575 * RST have been dropped. 2576 * 2577 * In the SYN-RECEIVED state, don't send an ACK unless the 2578 * segment we received passes the SYN-RECEIVED ACK test. 2579 * If it fails send a RST. This breaks the loop in the 2580 * "LAND" DoS attack, and also prevents an ACK storm 2581 * between two listening ports that have been sent forged 2582 * SYN segments, each with the source address of the other. 2583 */ 2584 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2585 (SEQ_GT(tp->snd_una, th->th_ack) || 2586 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2587 rstreason = BANDLIM_RST_OPENPORT; 2588 goto dropwithreset; 2589 } 2590 #ifdef TCPDEBUG 2591 if (so->so_options & SO_DEBUG) 2592 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2593 #endif 2594 m_freem(m); 2595 tp->t_flags |= TF_ACKNOW; 2596 tcp_output(tp); 2597 return(IPPROTO_DONE); 2598 2599 dropwithreset: 2600 /* 2601 * Generate a RST, dropping incoming segment. 2602 * Make ACK acceptable to originator of segment. 2603 * Don't bother to respond if destination was broadcast/multicast. 2604 */ 2605 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) 2606 goto drop; 2607 if (isipv6) { 2608 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2609 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2610 goto drop; 2611 } else { 2612 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2613 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2614 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2615 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2616 goto drop; 2617 } 2618 /* IPv6 anycast check is done at tcp6_input() */ 2619 2620 /* 2621 * Perform bandwidth limiting. 2622 */ 2623 #ifdef ICMP_BANDLIM 2624 if (badport_bandlim(rstreason) < 0) 2625 goto drop; 2626 #endif 2627 2628 #ifdef TCPDEBUG 2629 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2630 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2631 #endif 2632 if (thflags & TH_ACK) 2633 /* mtod() below is safe as long as hdr dropping is delayed */ 2634 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack, 2635 TH_RST); 2636 else { 2637 if (thflags & TH_SYN) 2638 tlen++; 2639 /* mtod() below is safe as long as hdr dropping is delayed */ 2640 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen, 2641 (tcp_seq)0, TH_RST | TH_ACK); 2642 } 2643 return(IPPROTO_DONE); 2644 2645 drop: 2646 /* 2647 * Drop space held by incoming segment and return. 2648 */ 2649 #ifdef TCPDEBUG 2650 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2651 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); 2652 #endif 2653 m_freem(m); 2654 return(IPPROTO_DONE); 2655 } 2656 2657 /* 2658 * Parse TCP options and place in tcpopt. 2659 */ 2660 static void 2661 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn) 2662 { 2663 int opt, optlen, i; 2664 2665 to->to_flags = 0; 2666 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2667 opt = cp[0]; 2668 if (opt == TCPOPT_EOL) 2669 break; 2670 if (opt == TCPOPT_NOP) 2671 optlen = 1; 2672 else { 2673 if (cnt < 2) 2674 break; 2675 optlen = cp[1]; 2676 if (optlen < 2 || optlen > cnt) 2677 break; 2678 } 2679 switch (opt) { 2680 case TCPOPT_MAXSEG: 2681 if (optlen != TCPOLEN_MAXSEG) 2682 continue; 2683 if (!is_syn) 2684 continue; 2685 to->to_flags |= TOF_MSS; 2686 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss); 2687 to->to_mss = ntohs(to->to_mss); 2688 break; 2689 case TCPOPT_WINDOW: 2690 if (optlen != TCPOLEN_WINDOW) 2691 continue; 2692 if (!is_syn) 2693 continue; 2694 to->to_flags |= TOF_SCALE; 2695 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); 2696 break; 2697 case TCPOPT_TIMESTAMP: 2698 if (optlen != TCPOLEN_TIMESTAMP) 2699 continue; 2700 to->to_flags |= TOF_TS; 2701 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval); 2702 to->to_tsval = ntohl(to->to_tsval); 2703 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr); 2704 to->to_tsecr = ntohl(to->to_tsecr); 2705 /* 2706 * If echoed timestamp is later than the current time, 2707 * fall back to non RFC1323 RTT calculation. 2708 */ 2709 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks)) 2710 to->to_tsecr = 0; 2711 break; 2712 case TCPOPT_SACK_PERMITTED: 2713 if (optlen != TCPOLEN_SACK_PERMITTED) 2714 continue; 2715 if (!is_syn) 2716 continue; 2717 to->to_flags |= TOF_SACK_PERMITTED; 2718 break; 2719 case TCPOPT_SACK: 2720 if ((optlen - 2) & 0x07) /* not multiple of 8 */ 2721 continue; 2722 to->to_nsackblocks = (optlen - 2) / 8; 2723 to->to_sackblocks = (struct raw_sackblock *) (cp + 2); 2724 to->to_flags |= TOF_SACK; 2725 for (i = 0; i < to->to_nsackblocks; i++) { 2726 struct raw_sackblock *r = &to->to_sackblocks[i]; 2727 2728 r->rblk_start = ntohl(r->rblk_start); 2729 r->rblk_end = ntohl(r->rblk_end); 2730 } 2731 break; 2732 #ifdef TCP_SIGNATURE 2733 /* 2734 * XXX In order to reply to a host which has set the 2735 * TCP_SIGNATURE option in its initial SYN, we have to 2736 * record the fact that the option was observed here 2737 * for the syncache code to perform the correct response. 2738 */ 2739 case TCPOPT_SIGNATURE: 2740 if (optlen != TCPOLEN_SIGNATURE) 2741 continue; 2742 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); 2743 break; 2744 #endif /* TCP_SIGNATURE */ 2745 default: 2746 continue; 2747 } 2748 } 2749 } 2750 2751 /* 2752 * Pull out of band byte out of a segment so 2753 * it doesn't appear in the user's data queue. 2754 * It is still reflected in the segment length for 2755 * sequencing purposes. 2756 * "off" is the delayed to be dropped hdrlen. 2757 */ 2758 static void 2759 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off) 2760 { 2761 int cnt = off + th->th_urp - 1; 2762 2763 while (cnt >= 0) { 2764 if (m->m_len > cnt) { 2765 char *cp = mtod(m, caddr_t) + cnt; 2766 struct tcpcb *tp = sototcpcb(so); 2767 2768 tp->t_iobc = *cp; 2769 tp->t_oobflags |= TCPOOB_HAVEDATA; 2770 bcopy(cp + 1, cp, m->m_len - cnt - 1); 2771 m->m_len--; 2772 if (m->m_flags & M_PKTHDR) 2773 m->m_pkthdr.len--; 2774 return; 2775 } 2776 cnt -= m->m_len; 2777 m = m->m_next; 2778 if (m == 0) 2779 break; 2780 } 2781 panic("tcp_pulloutofband"); 2782 } 2783 2784 /* 2785 * Collect new round-trip time estimate 2786 * and update averages and current timeout. 2787 */ 2788 static void 2789 tcp_xmit_timer(struct tcpcb *tp, int rtt) 2790 { 2791 int delta; 2792 2793 tcpstat.tcps_rttupdated++; 2794 tp->t_rttupdated++; 2795 if (tp->t_srtt != 0) { 2796 /* 2797 * srtt is stored as fixed point with 5 bits after the 2798 * binary point (i.e., scaled by 8). The following magic 2799 * is equivalent to the smoothing algorithm in rfc793 with 2800 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2801 * point). Adjust rtt to origin 0. 2802 */ 2803 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2804 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2805 2806 if ((tp->t_srtt += delta) <= 0) 2807 tp->t_srtt = 1; 2808 2809 /* 2810 * We accumulate a smoothed rtt variance (actually, a 2811 * smoothed mean difference), then set the retransmit 2812 * timer to smoothed rtt + 4 times the smoothed variance. 2813 * rttvar is stored as fixed point with 4 bits after the 2814 * binary point (scaled by 16). The following is 2815 * equivalent to rfc793 smoothing with an alpha of .75 2816 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2817 * rfc793's wired-in beta. 2818 */ 2819 if (delta < 0) 2820 delta = -delta; 2821 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2822 if ((tp->t_rttvar += delta) <= 0) 2823 tp->t_rttvar = 1; 2824 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2825 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2826 } else { 2827 /* 2828 * No rtt measurement yet - use the unsmoothed rtt. 2829 * Set the variance to half the rtt (so our first 2830 * retransmit happens at 3*rtt). 2831 */ 2832 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2833 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2834 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2835 } 2836 tp->t_rtttime = 0; 2837 tp->t_rxtshift = 0; 2838 2839 /* 2840 * the retransmit should happen at rtt + 4 * rttvar. 2841 * Because of the way we do the smoothing, srtt and rttvar 2842 * will each average +1/2 tick of bias. When we compute 2843 * the retransmit timer, we want 1/2 tick of rounding and 2844 * 1 extra tick because of +-1/2 tick uncertainty in the 2845 * firing of the timer. The bias will give us exactly the 2846 * 1.5 tick we need. But, because the bias is 2847 * statistical, we have to test that we don't drop below 2848 * the minimum feasible timer (which is 2 ticks). 2849 */ 2850 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2851 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2852 2853 /* 2854 * We received an ack for a packet that wasn't retransmitted; 2855 * it is probably safe to discard any error indications we've 2856 * received recently. This isn't quite right, but close enough 2857 * for now (a route might have failed after we sent a segment, 2858 * and the return path might not be symmetrical). 2859 */ 2860 tp->t_softerror = 0; 2861 } 2862 2863 /* 2864 * Determine a reasonable value for maxseg size. 2865 * If the route is known, check route for mtu. 2866 * If none, use an mss that can be handled on the outgoing 2867 * interface without forcing IP to fragment; if bigger than 2868 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2869 * to utilize large mbufs. If no route is found, route has no mtu, 2870 * or the destination isn't local, use a default, hopefully conservative 2871 * size (usually 512 or the default IP max size, but no more than the mtu 2872 * of the interface), as we can't discover anything about intervening 2873 * gateways or networks. We also initialize the congestion/slow start 2874 * window to be a single segment if the destination isn't local. 2875 * While looking at the routing entry, we also initialize other path-dependent 2876 * parameters from pre-set or cached values in the routing entry. 2877 * 2878 * Also take into account the space needed for options that we 2879 * send regularly. Make maxseg shorter by that amount to assure 2880 * that we can send maxseg amount of data even when the options 2881 * are present. Store the upper limit of the length of options plus 2882 * data in maxopd. 2883 * 2884 * NOTE that this routine is only called when we process an incoming 2885 * segment, for outgoing segments only tcp_mssopt is called. 2886 */ 2887 void 2888 tcp_mss(struct tcpcb *tp, int offer) 2889 { 2890 struct rtentry *rt; 2891 struct ifnet *ifp; 2892 int rtt, mss; 2893 u_long bufsize; 2894 struct inpcb *inp = tp->t_inpcb; 2895 struct socket *so; 2896 #ifdef INET6 2897 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 2898 size_t min_protoh = isipv6 ? 2899 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 2900 sizeof(struct tcpiphdr); 2901 #else 2902 const boolean_t isipv6 = FALSE; 2903 const size_t min_protoh = sizeof(struct tcpiphdr); 2904 #endif 2905 2906 if (isipv6) 2907 rt = tcp_rtlookup6(&inp->inp_inc); 2908 else 2909 rt = tcp_rtlookup(&inp->inp_inc); 2910 if (rt == NULL) { 2911 tp->t_maxopd = tp->t_maxseg = 2912 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 2913 return; 2914 } 2915 ifp = rt->rt_ifp; 2916 so = inp->inp_socket; 2917 2918 /* 2919 * Offer == 0 means that there was no MSS on the SYN segment, 2920 * in this case we use either the interface mtu or tcp_mssdflt. 2921 * 2922 * An offer which is too large will be cut down later. 2923 */ 2924 if (offer == 0) { 2925 if (isipv6) { 2926 if (in6_localaddr(&inp->in6p_faddr)) { 2927 offer = ND_IFINFO(rt->rt_ifp)->linkmtu - 2928 min_protoh; 2929 } else { 2930 offer = tcp_v6mssdflt; 2931 } 2932 } else { 2933 if (in_localaddr(inp->inp_faddr)) 2934 offer = ifp->if_mtu - min_protoh; 2935 else 2936 offer = tcp_mssdflt; 2937 } 2938 } 2939 2940 /* 2941 * Prevent DoS attack with too small MSS. Round up 2942 * to at least minmss. 2943 * 2944 * Sanity check: make sure that maxopd will be large 2945 * enough to allow some data on segments even is the 2946 * all the option space is used (40bytes). Otherwise 2947 * funny things may happen in tcp_output. 2948 */ 2949 offer = max(offer, tcp_minmss); 2950 offer = max(offer, 64); 2951 2952 rt->rt_rmx.rmx_mssopt = offer; 2953 2954 /* 2955 * While we're here, check if there's an initial rtt 2956 * or rttvar. Convert from the route-table units 2957 * to scaled multiples of the slow timeout timer. 2958 */ 2959 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 2960 /* 2961 * XXX the lock bit for RTT indicates that the value 2962 * is also a minimum value; this is subject to time. 2963 */ 2964 if (rt->rt_rmx.rmx_locks & RTV_RTT) 2965 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); 2966 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 2967 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 2968 tcpstat.tcps_usedrtt++; 2969 if (rt->rt_rmx.rmx_rttvar) { 2970 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 2971 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 2972 tcpstat.tcps_usedrttvar++; 2973 } else { 2974 /* default variation is +- 1 rtt */ 2975 tp->t_rttvar = 2976 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 2977 } 2978 TCPT_RANGESET(tp->t_rxtcur, 2979 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 2980 tp->t_rttmin, TCPTV_REXMTMAX); 2981 } 2982 2983 /* 2984 * if there's an mtu associated with the route, use it 2985 * else, use the link mtu. Take the smaller of mss or offer 2986 * as our final mss. 2987 */ 2988 if (rt->rt_rmx.rmx_mtu) { 2989 mss = rt->rt_rmx.rmx_mtu - min_protoh; 2990 } else { 2991 if (isipv6) 2992 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh; 2993 else 2994 mss = ifp->if_mtu - min_protoh; 2995 } 2996 mss = min(mss, offer); 2997 2998 /* 2999 * maxopd stores the maximum length of data AND options 3000 * in a segment; maxseg is the amount of data in a normal 3001 * segment. We need to store this value (maxopd) apart 3002 * from maxseg, because now every segment carries options 3003 * and thus we normally have somewhat less data in segments. 3004 */ 3005 tp->t_maxopd = mss; 3006 3007 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 3008 ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3009 mss -= TCPOLEN_TSTAMP_APPA; 3010 3011 #if (MCLBYTES & (MCLBYTES - 1)) == 0 3012 if (mss > MCLBYTES) 3013 mss &= ~(MCLBYTES-1); 3014 #else 3015 if (mss > MCLBYTES) 3016 mss = mss / MCLBYTES * MCLBYTES; 3017 #endif 3018 /* 3019 * If there's a pipesize, change the socket buffer 3020 * to that size. Make the socket buffers an integral 3021 * number of mss units; if the mss is larger than 3022 * the socket buffer, decrease the mss. 3023 */ 3024 #ifdef RTV_SPIPE 3025 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) 3026 #endif 3027 bufsize = so->so_snd.ssb_hiwat; 3028 if (bufsize < mss) 3029 mss = bufsize; 3030 else { 3031 bufsize = roundup(bufsize, mss); 3032 if (bufsize > sb_max) 3033 bufsize = sb_max; 3034 if (bufsize > so->so_snd.ssb_hiwat) 3035 ssb_reserve(&so->so_snd, bufsize, so, NULL); 3036 } 3037 tp->t_maxseg = mss; 3038 3039 #ifdef RTV_RPIPE 3040 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) 3041 #endif 3042 bufsize = so->so_rcv.ssb_hiwat; 3043 if (bufsize > mss) { 3044 bufsize = roundup(bufsize, mss); 3045 if (bufsize > sb_max) 3046 bufsize = sb_max; 3047 if (bufsize > so->so_rcv.ssb_hiwat) { 3048 lwkt_gettoken(&so->so_rcv.ssb_token); 3049 ssb_reserve(&so->so_rcv, bufsize, so, NULL); 3050 lwkt_reltoken(&so->so_rcv.ssb_token); 3051 } 3052 } 3053 3054 /* 3055 * Set the slow-start flight size depending on whether this 3056 * is a local network or not. 3057 */ 3058 if (tcp_do_rfc3390) 3059 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3060 else 3061 tp->snd_cwnd = mss; 3062 3063 if (rt->rt_rmx.rmx_ssthresh) { 3064 /* 3065 * There's some sort of gateway or interface 3066 * buffer limit on the path. Use this to set 3067 * the slow start threshhold, but set the 3068 * threshold to no less than 2*mss. 3069 */ 3070 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 3071 tcpstat.tcps_usedssthresh++; 3072 } 3073 } 3074 3075 /* 3076 * Determine the MSS option to send on an outgoing SYN. 3077 */ 3078 int 3079 tcp_mssopt(struct tcpcb *tp) 3080 { 3081 struct rtentry *rt; 3082 #ifdef INET6 3083 boolean_t isipv6 = 3084 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3085 int min_protoh = isipv6 ? 3086 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 3087 sizeof(struct tcpiphdr); 3088 #else 3089 const boolean_t isipv6 = FALSE; 3090 const size_t min_protoh = sizeof(struct tcpiphdr); 3091 #endif 3092 3093 if (isipv6) 3094 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc); 3095 else 3096 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc); 3097 if (rt == NULL) 3098 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); 3099 3100 return (rt->rt_ifp->if_mtu - min_protoh); 3101 } 3102 3103 /* 3104 * When a partial ack arrives, force the retransmission of the 3105 * next unacknowledged segment. Do not exit Fast Recovery. 3106 * 3107 * Implement the Slow-but-Steady variant of NewReno by restarting the 3108 * the retransmission timer. Turn it off here so it can be restarted 3109 * later in tcp_output(). 3110 */ 3111 static void 3112 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked) 3113 { 3114 tcp_seq old_snd_nxt = tp->snd_nxt; 3115 u_long ocwnd = tp->snd_cwnd; 3116 3117 tcp_callout_stop(tp, tp->tt_rexmt); 3118 tp->t_rtttime = 0; 3119 tp->snd_nxt = th->th_ack; 3120 /* Set snd_cwnd to one segment beyond acknowledged offset. */ 3121 tp->snd_cwnd = tp->t_maxseg; 3122 tp->t_flags |= TF_ACKNOW; 3123 tcp_output(tp); 3124 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3125 tp->snd_nxt = old_snd_nxt; 3126 /* partial window deflation */ 3127 if (ocwnd > acked) 3128 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg; 3129 else 3130 tp->snd_cwnd = tp->t_maxseg; 3131 } 3132 3133 /* 3134 * In contrast to the Slow-but-Steady NewReno variant, 3135 * we do not reset the retransmission timer for SACK retransmissions, 3136 * except when retransmitting snd_una. 3137 */ 3138 static void 3139 tcp_sack_rexmt(struct tcpcb *tp, struct tcphdr *th) 3140 { 3141 uint32_t pipe, seglen; 3142 tcp_seq nextrexmt; 3143 boolean_t lostdup; 3144 tcp_seq old_snd_nxt = tp->snd_nxt; 3145 u_long ocwnd = tp->snd_cwnd; 3146 int nseg = 0; /* consecutive new segments */ 3147 #define MAXBURST 4 /* limit burst of new packets on partial ack */ 3148 3149 tp->t_rtttime = 0; 3150 pipe = tcp_sack_compute_pipe(tp); 3151 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg && 3152 (!tcp_do_smartsack || nseg < MAXBURST) && 3153 tcp_sack_nextseg(tp, &nextrexmt, &seglen, &lostdup)) { 3154 uint32_t sent; 3155 tcp_seq old_snd_max; 3156 int error; 3157 3158 if (nextrexmt == tp->snd_max) 3159 ++nseg; 3160 tp->snd_nxt = nextrexmt; 3161 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen; 3162 old_snd_max = tp->snd_max; 3163 if (nextrexmt == tp->snd_una) 3164 tcp_callout_stop(tp, tp->tt_rexmt); 3165 error = tcp_output(tp); 3166 if (error != 0) 3167 break; 3168 sent = tp->snd_nxt - nextrexmt; 3169 if (sent <= 0) 3170 break; 3171 if (!lostdup) 3172 pipe += sent; 3173 tcpstat.tcps_sndsackpack++; 3174 tcpstat.tcps_sndsackbyte += sent; 3175 if (SEQ_LT(nextrexmt, old_snd_max) && 3176 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) 3177 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max); 3178 } 3179 if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) 3180 tp->snd_nxt = old_snd_nxt; 3181 tp->snd_cwnd = ocwnd; 3182 } 3183 3184 /* 3185 * Reset idle time and keep-alive timer, typically called when a valid 3186 * tcp packet is received but may also be called when FASTKEEP is set 3187 * to prevent the previous long-timeout from calculating to a drop. 3188 * 3189 * Only update t_rcvtime for non-SYN packets. 3190 * 3191 * Handle the case where one side thinks the connection is established 3192 * but the other side has, say, rebooted without cleaning out the 3193 * connection. The SYNs could be construed as an attack and wind 3194 * up ignored, but in case it isn't an attack we can validate the 3195 * connection by forcing a keepalive. 3196 */ 3197 void 3198 tcp_timer_keep_activity(struct tcpcb *tp, int thflags) 3199 { 3200 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3201 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) { 3202 tp->t_flags |= TF_KEEPALIVE; 3203 tcp_callout_reset(tp, tp->tt_keep, hz / 2, 3204 tcp_timer_keep); 3205 } else { 3206 tp->t_rcvtime = ticks; 3207 tp->t_flags &= ~TF_KEEPALIVE; 3208 tcp_callout_reset(tp, tp->tt_keep, 3209 tcp_getkeepidle(tp), 3210 tcp_timer_keep); 3211 } 3212 } 3213 } 3214 3215 static int 3216 tcp_rmx_msl(const struct tcpcb *tp) 3217 { 3218 struct rtentry *rt; 3219 struct inpcb *inp = tp->t_inpcb; 3220 int msl; 3221 #ifdef INET6 3222 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); 3223 #else 3224 const boolean_t isipv6 = FALSE; 3225 #endif 3226 3227 if (isipv6) 3228 rt = tcp_rtlookup6(&inp->inp_inc); 3229 else 3230 rt = tcp_rtlookup(&inp->inp_inc); 3231 if (rt == NULL || rt->rt_rmx.rmx_msl == 0) 3232 return tcp_msl; 3233 3234 msl = (rt->rt_rmx.rmx_msl * hz) / 1000; 3235 if (msl == 0) 3236 msl = 1; 3237 3238 return msl; 3239 } 3240