1 /* $NetBSD: tcp_subr.c,v 1.262 2015/05/19 17:33:43 kefren Exp $ */ 2 3 /* 4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc. 34 * All rights reserved. 35 * 36 * This code is derived from software contributed to The NetBSD Foundation 37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation 38 * Facility, NASA Ames Research Center. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * POSSIBILITY OF SUCH DAMAGE. 60 */ 61 62 /* 63 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 64 * The Regents of the University of California. All rights reserved. 65 * 66 * Redistribution and use in source and binary forms, with or without 67 * modification, are permitted provided that the following conditions 68 * are met: 69 * 1. Redistributions of source code must retain the above copyright 70 * notice, this list of conditions and the following disclaimer. 71 * 2. Redistributions in binary form must reproduce the above copyright 72 * notice, this list of conditions and the following disclaimer in the 73 * documentation and/or other materials provided with the distribution. 74 * 3. Neither the name of the University nor the names of its contributors 75 * may be used to endorse or promote products derived from this software 76 * without specific prior written permission. 77 * 78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 88 * SUCH DAMAGE. 89 * 90 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 91 */ 92 93 #include <sys/cdefs.h> 94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.262 2015/05/19 17:33:43 kefren Exp $"); 95 96 #include "opt_inet.h" 97 #include "opt_ipsec.h" 98 #include "opt_tcp_compat_42.h" 99 #include "opt_inet_csum.h" 100 #include "opt_mbuftrace.h" 101 102 #include <sys/param.h> 103 #include <sys/atomic.h> 104 #include <sys/proc.h> 105 #include <sys/systm.h> 106 #include <sys/mbuf.h> 107 #include <sys/once.h> 108 #include <sys/socket.h> 109 #include <sys/socketvar.h> 110 #include <sys/protosw.h> 111 #include <sys/errno.h> 112 #include <sys/kernel.h> 113 #include <sys/pool.h> 114 #include <sys/md5.h> 115 #include <sys/cprng.h> 116 117 #include <net/route.h> 118 #include <net/if.h> 119 120 #include <netinet/in.h> 121 #include <netinet/in_systm.h> 122 #include <netinet/ip.h> 123 #include <netinet/in_pcb.h> 124 #include <netinet/ip_var.h> 125 #include <netinet/ip_icmp.h> 126 127 #ifdef INET6 128 #ifndef INET 129 #include <netinet/in.h> 130 #endif 131 #include <netinet/ip6.h> 132 #include <netinet6/in6_pcb.h> 133 #include <netinet6/ip6_var.h> 134 #include <netinet6/in6_var.h> 135 #include <netinet6/ip6protosw.h> 136 #include <netinet/icmp6.h> 137 #include <netinet6/nd6.h> 138 #endif 139 140 #include <netinet/tcp.h> 141 #include <netinet/tcp_fsm.h> 142 #include <netinet/tcp_seq.h> 143 #include <netinet/tcp_timer.h> 144 #include <netinet/tcp_var.h> 145 #include <netinet/tcp_vtw.h> 146 #include <netinet/tcp_private.h> 147 #include <netinet/tcp_congctl.h> 148 #include <netinet/tcpip.h> 149 150 #ifdef IPSEC 151 #include <netipsec/ipsec.h> 152 #include <netipsec/xform.h> 153 #ifdef INET6 154 #include <netipsec/ipsec6.h> 155 #endif 156 #include <netipsec/key.h> 157 #endif /* IPSEC*/ 158 159 160 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */ 161 u_int32_t tcp_now; /* slow ticks, for RFC 1323 timestamps */ 162 163 percpu_t *tcpstat_percpu; 164 165 /* patchable/settable parameters for tcp */ 166 int tcp_mssdflt = TCP_MSS; 167 int tcp_minmss = TCP_MINMSS; 168 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 169 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */ 170 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */ 171 int tcp_do_sack = 1; /* selective acknowledgement */ 172 int tcp_do_win_scale = 1; /* RFC1323 window scaling */ 173 int tcp_do_timestamps = 1; /* RFC1323 timestamps */ 174 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */ 175 int tcp_do_ecn = 0; /* Explicit Congestion Notification */ 176 #ifndef TCP_INIT_WIN 177 #define TCP_INIT_WIN 4 /* initial slow start window */ 178 #endif 179 #ifndef TCP_INIT_WIN_LOCAL 180 #define TCP_INIT_WIN_LOCAL 4 /* initial slow start window for local nets */ 181 #endif 182 /* 183 * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460. 184 * This is to simulate current behavior for iw == 4 185 */ 186 int tcp_init_win_max[] = { 187 1 * 1460, 188 1 * 1460, 189 2 * 1460, 190 2 * 1460, 191 3 * 1460, 192 5 * 1460, 193 6 * 1460, 194 7 * 1460, 195 8 * 1460, 196 9 * 1460, 197 10 * 1460 198 }; 199 int tcp_init_win = TCP_INIT_WIN; 200 int tcp_init_win_local = TCP_INIT_WIN_LOCAL; 201 int tcp_mss_ifmtu = 0; 202 #ifdef TCP_COMPAT_42 203 int tcp_compat_42 = 1; 204 #else 205 int tcp_compat_42 = 0; 206 #endif 207 int tcp_rst_ppslim = 100; /* 100pps */ 208 int tcp_ackdrop_ppslim = 100; /* 100pps */ 209 int tcp_do_loopback_cksum = 0; 210 int tcp_do_abc = 1; /* RFC3465 Appropriate byte counting. */ 211 int tcp_abc_aggressive = 1; /* 1: L=2*SMSS 0: L=1*SMSS */ 212 int tcp_sack_tp_maxholes = 32; 213 int tcp_sack_globalmaxholes = 1024; 214 int tcp_sack_globalholes = 0; 215 int tcp_ecn_maxretries = 1; 216 int tcp_msl_enable = 1; /* enable TIME_WAIT truncation */ 217 int tcp_msl_loop = PR_SLOWHZ; /* MSL for loopback */ 218 int tcp_msl_local = 5 * PR_SLOWHZ; /* MSL for 'local' */ 219 int tcp_msl_remote = TCPTV_MSL; /* MSL otherwise */ 220 int tcp_msl_remote_threshold = TCPTV_SRTTDFLT; /* RTT threshold */ 221 int tcp_rttlocal = 0; /* Use RTT to decide who's 'local' */ 222 223 int tcp4_vtw_enable = 0; /* 1 to enable */ 224 int tcp6_vtw_enable = 0; /* 1 to enable */ 225 int tcp_vtw_was_enabled = 0; 226 int tcp_vtw_entries = 1 << 4; /* 16 vestigial TIME_WAIT entries */ 227 228 /* tcb hash */ 229 #ifndef TCBHASHSIZE 230 #define TCBHASHSIZE 128 231 #endif 232 int tcbhashsize = TCBHASHSIZE; 233 234 /* syn hash parameters */ 235 #define TCP_SYN_HASH_SIZE 293 236 #define TCP_SYN_BUCKET_SIZE 35 237 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE; 238 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE; 239 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE; 240 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE]; 241 242 int tcp_freeq(struct tcpcb *); 243 static int tcp_iss_secret_init(void); 244 245 #ifdef INET 246 static void tcp_mtudisc_callback(struct in_addr); 247 #endif 248 249 #ifdef INET6 250 void tcp6_mtudisc(struct in6pcb *, int); 251 #endif 252 253 static struct pool tcpcb_pool; 254 255 static int tcp_drainwanted; 256 257 #ifdef TCP_CSUM_COUNTERS 258 #include <sys/device.h> 259 260 #if defined(INET) 261 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 262 NULL, "tcp", "hwcsum bad"); 263 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 264 NULL, "tcp", "hwcsum ok"); 265 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 266 NULL, "tcp", "hwcsum data"); 267 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 268 NULL, "tcp", "swcsum"); 269 270 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad); 271 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok); 272 EVCNT_ATTACH_STATIC(tcp_hwcsum_data); 273 EVCNT_ATTACH_STATIC(tcp_swcsum); 274 #endif /* defined(INET) */ 275 276 #if defined(INET6) 277 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 278 NULL, "tcp6", "hwcsum bad"); 279 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 280 NULL, "tcp6", "hwcsum ok"); 281 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 282 NULL, "tcp6", "hwcsum data"); 283 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 284 NULL, "tcp6", "swcsum"); 285 286 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad); 287 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok); 288 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data); 289 EVCNT_ATTACH_STATIC(tcp6_swcsum); 290 #endif /* defined(INET6) */ 291 #endif /* TCP_CSUM_COUNTERS */ 292 293 294 #ifdef TCP_OUTPUT_COUNTERS 295 #include <sys/device.h> 296 297 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 298 NULL, "tcp", "output big header"); 299 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 300 NULL, "tcp", "output predict hit"); 301 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 302 NULL, "tcp", "output predict miss"); 303 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 304 NULL, "tcp", "output copy small"); 305 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 306 NULL, "tcp", "output copy big"); 307 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 308 NULL, "tcp", "output reference big"); 309 310 EVCNT_ATTACH_STATIC(tcp_output_bigheader); 311 EVCNT_ATTACH_STATIC(tcp_output_predict_hit); 312 EVCNT_ATTACH_STATIC(tcp_output_predict_miss); 313 EVCNT_ATTACH_STATIC(tcp_output_copysmall); 314 EVCNT_ATTACH_STATIC(tcp_output_copybig); 315 EVCNT_ATTACH_STATIC(tcp_output_refbig); 316 317 #endif /* TCP_OUTPUT_COUNTERS */ 318 319 #ifdef TCP_REASS_COUNTERS 320 #include <sys/device.h> 321 322 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 323 NULL, "tcp_reass", "calls"); 324 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 325 &tcp_reass_, "tcp_reass", "insert into empty queue"); 326 struct evcnt tcp_reass_iteration[8] = { 327 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"), 328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"), 329 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"), 330 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"), 331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"), 332 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"), 333 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"), 334 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"), 335 }; 336 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 337 &tcp_reass_, "tcp_reass", "prepend to first"); 338 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 339 &tcp_reass_, "tcp_reass", "prepend"); 340 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 341 &tcp_reass_, "tcp_reass", "insert"); 342 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 343 &tcp_reass_, "tcp_reass", "insert at tail"); 344 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 345 &tcp_reass_, "tcp_reass", "append"); 346 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 347 &tcp_reass_, "tcp_reass", "append to tail fragment"); 348 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 349 &tcp_reass_, "tcp_reass", "overlap at end"); 350 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 351 &tcp_reass_, "tcp_reass", "overlap at start"); 352 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 353 &tcp_reass_, "tcp_reass", "duplicate segment"); 354 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 355 &tcp_reass_, "tcp_reass", "duplicate fragment"); 356 357 EVCNT_ATTACH_STATIC(tcp_reass_); 358 EVCNT_ATTACH_STATIC(tcp_reass_empty); 359 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0); 360 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1); 361 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2); 362 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3); 363 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4); 364 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5); 365 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6); 366 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7); 367 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst); 368 EVCNT_ATTACH_STATIC(tcp_reass_prepend); 369 EVCNT_ATTACH_STATIC(tcp_reass_insert); 370 EVCNT_ATTACH_STATIC(tcp_reass_inserttail); 371 EVCNT_ATTACH_STATIC(tcp_reass_append); 372 EVCNT_ATTACH_STATIC(tcp_reass_appendtail); 373 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail); 374 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront); 375 EVCNT_ATTACH_STATIC(tcp_reass_segdup); 376 EVCNT_ATTACH_STATIC(tcp_reass_fragdup); 377 378 #endif /* TCP_REASS_COUNTERS */ 379 380 #ifdef MBUFTRACE 381 struct mowner tcp_mowner = MOWNER_INIT("tcp", ""); 382 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx"); 383 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx"); 384 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock"); 385 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx"); 386 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx"); 387 #endif 388 389 callout_t tcp_slowtimo_ch; 390 391 static int 392 do_tcpinit(void) 393 { 394 395 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize); 396 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl", 397 NULL, IPL_SOFTNET); 398 399 tcp_usrreq_init(); 400 401 /* Initialize timer state. */ 402 tcp_timer_init(); 403 404 /* Initialize the compressed state engine. */ 405 syn_cache_init(); 406 407 /* Initialize the congestion control algorithms. */ 408 tcp_congctl_init(); 409 410 /* Initialize the TCPCB template. */ 411 tcp_tcpcb_template(); 412 413 /* Initialize reassembly queue */ 414 tcpipqent_init(); 415 416 /* SACK */ 417 tcp_sack_init(); 418 419 MOWNER_ATTACH(&tcp_tx_mowner); 420 MOWNER_ATTACH(&tcp_rx_mowner); 421 MOWNER_ATTACH(&tcp_reass_mowner); 422 MOWNER_ATTACH(&tcp_sock_mowner); 423 MOWNER_ATTACH(&tcp_sock_tx_mowner); 424 MOWNER_ATTACH(&tcp_sock_rx_mowner); 425 MOWNER_ATTACH(&tcp_mowner); 426 427 tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS); 428 429 vtw_earlyinit(); 430 431 callout_init(&tcp_slowtimo_ch, CALLOUT_MPSAFE); 432 callout_reset(&tcp_slowtimo_ch, 1, tcp_slowtimo, NULL); 433 434 return 0; 435 } 436 437 void 438 tcp_init_common(unsigned basehlen) 439 { 440 static ONCE_DECL(dotcpinit); 441 unsigned hlen = basehlen + sizeof(struct tcphdr); 442 unsigned oldhlen; 443 444 if (max_linkhdr + hlen > MHLEN) 445 panic("tcp_init"); 446 while ((oldhlen = max_protohdr) < hlen) 447 atomic_cas_uint(&max_protohdr, oldhlen, hlen); 448 449 RUN_ONCE(&dotcpinit, do_tcpinit); 450 } 451 452 /* 453 * Tcp initialization 454 */ 455 void 456 tcp_init(void) 457 { 458 459 icmp_mtudisc_callback_register(tcp_mtudisc_callback); 460 461 tcp_init_common(sizeof(struct ip)); 462 } 463 464 /* 465 * Create template to be used to send tcp packets on a connection. 466 * Call after host entry created, allocates an mbuf and fills 467 * in a skeletal tcp/ip header, minimizing the amount of work 468 * necessary when the connection is used. 469 */ 470 struct mbuf * 471 tcp_template(struct tcpcb *tp) 472 { 473 struct inpcb *inp = tp->t_inpcb; 474 #ifdef INET6 475 struct in6pcb *in6p = tp->t_in6pcb; 476 #endif 477 struct tcphdr *n; 478 struct mbuf *m; 479 int hlen; 480 481 switch (tp->t_family) { 482 case AF_INET: 483 hlen = sizeof(struct ip); 484 if (inp) 485 break; 486 #ifdef INET6 487 if (in6p) { 488 /* mapped addr case */ 489 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr) 490 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr)) 491 break; 492 } 493 #endif 494 return NULL; /*EINVAL*/ 495 #ifdef INET6 496 case AF_INET6: 497 hlen = sizeof(struct ip6_hdr); 498 if (in6p) { 499 /* more sainty check? */ 500 break; 501 } 502 return NULL; /*EINVAL*/ 503 #endif 504 default: 505 hlen = 0; /*pacify gcc*/ 506 return NULL; /*EAFNOSUPPORT*/ 507 } 508 #ifdef DIAGNOSTIC 509 if (hlen + sizeof(struct tcphdr) > MCLBYTES) 510 panic("mclbytes too small for t_template"); 511 #endif 512 m = tp->t_template; 513 if (m && m->m_len == hlen + sizeof(struct tcphdr)) 514 ; 515 else { 516 if (m) 517 m_freem(m); 518 m = tp->t_template = NULL; 519 MGETHDR(m, M_DONTWAIT, MT_HEADER); 520 if (m && hlen + sizeof(struct tcphdr) > MHLEN) { 521 MCLGET(m, M_DONTWAIT); 522 if ((m->m_flags & M_EXT) == 0) { 523 m_free(m); 524 m = NULL; 525 } 526 } 527 if (m == NULL) 528 return NULL; 529 MCLAIM(m, &tcp_mowner); 530 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr); 531 } 532 533 memset(mtod(m, void *), 0, m->m_len); 534 535 n = (struct tcphdr *)(mtod(m, char *) + hlen); 536 537 switch (tp->t_family) { 538 case AF_INET: 539 { 540 struct ipovly *ipov; 541 mtod(m, struct ip *)->ip_v = 4; 542 mtod(m, struct ip *)->ip_hl = hlen >> 2; 543 ipov = mtod(m, struct ipovly *); 544 ipov->ih_pr = IPPROTO_TCP; 545 ipov->ih_len = htons(sizeof(struct tcphdr)); 546 if (inp) { 547 ipov->ih_src = inp->inp_laddr; 548 ipov->ih_dst = inp->inp_faddr; 549 } 550 #ifdef INET6 551 else if (in6p) { 552 /* mapped addr case */ 553 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src, 554 sizeof(ipov->ih_src)); 555 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst, 556 sizeof(ipov->ih_dst)); 557 } 558 #endif 559 /* 560 * Compute the pseudo-header portion of the checksum 561 * now. We incrementally add in the TCP option and 562 * payload lengths later, and then compute the TCP 563 * checksum right before the packet is sent off onto 564 * the wire. 565 */ 566 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr, 567 ipov->ih_dst.s_addr, 568 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 569 break; 570 } 571 #ifdef INET6 572 case AF_INET6: 573 { 574 struct ip6_hdr *ip6; 575 mtod(m, struct ip *)->ip_v = 6; 576 ip6 = mtod(m, struct ip6_hdr *); 577 ip6->ip6_nxt = IPPROTO_TCP; 578 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 579 ip6->ip6_src = in6p->in6p_laddr; 580 ip6->ip6_dst = in6p->in6p_faddr; 581 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK; 582 if (ip6_auto_flowlabel) { 583 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; 584 ip6->ip6_flow |= 585 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 586 } 587 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; 588 ip6->ip6_vfc |= IPV6_VERSION; 589 590 /* 591 * Compute the pseudo-header portion of the checksum 592 * now. We incrementally add in the TCP option and 593 * payload lengths later, and then compute the TCP 594 * checksum right before the packet is sent off onto 595 * the wire. 596 */ 597 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr, 598 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)), 599 htonl(IPPROTO_TCP)); 600 break; 601 } 602 #endif 603 } 604 if (inp) { 605 n->th_sport = inp->inp_lport; 606 n->th_dport = inp->inp_fport; 607 } 608 #ifdef INET6 609 else if (in6p) { 610 n->th_sport = in6p->in6p_lport; 611 n->th_dport = in6p->in6p_fport; 612 } 613 #endif 614 n->th_seq = 0; 615 n->th_ack = 0; 616 n->th_x2 = 0; 617 n->th_off = 5; 618 n->th_flags = 0; 619 n->th_win = 0; 620 n->th_urp = 0; 621 return (m); 622 } 623 624 /* 625 * Send a single message to the TCP at address specified by 626 * the given TCP/IP header. If m == 0, then we make a copy 627 * of the tcpiphdr at ti and send directly to the addressed host. 628 * This is used to force keep alive messages out using the TCP 629 * template for a connection tp->t_template. If flags are given 630 * then we send a message back to the TCP which originated the 631 * segment ti, and discard the mbuf containing it and any other 632 * attached mbufs. 633 * 634 * In any case the ack and sequence number of the transmitted 635 * segment are as specified by the parameters. 636 */ 637 int 638 tcp_respond(struct tcpcb *tp, struct mbuf *mtemplate, struct mbuf *m, 639 struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags) 640 { 641 struct route *ro; 642 int error, tlen, win = 0; 643 int hlen; 644 struct ip *ip; 645 #ifdef INET6 646 struct ip6_hdr *ip6; 647 #endif 648 int family; /* family on packet, not inpcb/in6pcb! */ 649 struct tcphdr *th; 650 struct socket *so; 651 652 if (tp != NULL && (flags & TH_RST) == 0) { 653 #ifdef DIAGNOSTIC 654 if (tp->t_inpcb && tp->t_in6pcb) 655 panic("tcp_respond: both t_inpcb and t_in6pcb are set"); 656 #endif 657 #ifdef INET 658 if (tp->t_inpcb) 659 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 660 #endif 661 #ifdef INET6 662 if (tp->t_in6pcb) 663 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv); 664 #endif 665 } 666 667 th = NULL; /* Quell uninitialized warning */ 668 ip = NULL; 669 #ifdef INET6 670 ip6 = NULL; 671 #endif 672 if (m == 0) { 673 if (!mtemplate) 674 return EINVAL; 675 676 /* get family information from template */ 677 switch (mtod(mtemplate, struct ip *)->ip_v) { 678 case 4: 679 family = AF_INET; 680 hlen = sizeof(struct ip); 681 break; 682 #ifdef INET6 683 case 6: 684 family = AF_INET6; 685 hlen = sizeof(struct ip6_hdr); 686 break; 687 #endif 688 default: 689 return EAFNOSUPPORT; 690 } 691 692 MGETHDR(m, M_DONTWAIT, MT_HEADER); 693 if (m) { 694 MCLAIM(m, &tcp_tx_mowner); 695 MCLGET(m, M_DONTWAIT); 696 if ((m->m_flags & M_EXT) == 0) { 697 m_free(m); 698 m = NULL; 699 } 700 } 701 if (m == NULL) 702 return (ENOBUFS); 703 704 if (tcp_compat_42) 705 tlen = 1; 706 else 707 tlen = 0; 708 709 m->m_data += max_linkhdr; 710 bcopy(mtod(mtemplate, void *), mtod(m, void *), 711 mtemplate->m_len); 712 switch (family) { 713 case AF_INET: 714 ip = mtod(m, struct ip *); 715 th = (struct tcphdr *)(ip + 1); 716 break; 717 #ifdef INET6 718 case AF_INET6: 719 ip6 = mtod(m, struct ip6_hdr *); 720 th = (struct tcphdr *)(ip6 + 1); 721 break; 722 #endif 723 #if 0 724 default: 725 /* noone will visit here */ 726 m_freem(m); 727 return EAFNOSUPPORT; 728 #endif 729 } 730 flags = TH_ACK; 731 } else { 732 733 if ((m->m_flags & M_PKTHDR) == 0) { 734 #if 0 735 printf("non PKTHDR to tcp_respond\n"); 736 #endif 737 m_freem(m); 738 return EINVAL; 739 } 740 #ifdef DIAGNOSTIC 741 if (!th0) 742 panic("th0 == NULL in tcp_respond"); 743 #endif 744 745 /* get family information from m */ 746 switch (mtod(m, struct ip *)->ip_v) { 747 case 4: 748 family = AF_INET; 749 hlen = sizeof(struct ip); 750 ip = mtod(m, struct ip *); 751 break; 752 #ifdef INET6 753 case 6: 754 family = AF_INET6; 755 hlen = sizeof(struct ip6_hdr); 756 ip6 = mtod(m, struct ip6_hdr *); 757 break; 758 #endif 759 default: 760 m_freem(m); 761 return EAFNOSUPPORT; 762 } 763 /* clear h/w csum flags inherited from rx packet */ 764 m->m_pkthdr.csum_flags = 0; 765 766 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2)) 767 tlen = sizeof(*th0); 768 else 769 tlen = th0->th_off << 2; 770 771 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 && 772 mtod(m, char *) + hlen == (char *)th0) { 773 m->m_len = hlen + tlen; 774 m_freem(m->m_next); 775 m->m_next = NULL; 776 } else { 777 struct mbuf *n; 778 779 #ifdef DIAGNOSTIC 780 if (max_linkhdr + hlen + tlen > MCLBYTES) { 781 m_freem(m); 782 return EMSGSIZE; 783 } 784 #endif 785 MGETHDR(n, M_DONTWAIT, MT_HEADER); 786 if (n && max_linkhdr + hlen + tlen > MHLEN) { 787 MCLGET(n, M_DONTWAIT); 788 if ((n->m_flags & M_EXT) == 0) { 789 m_freem(n); 790 n = NULL; 791 } 792 } 793 if (!n) { 794 m_freem(m); 795 return ENOBUFS; 796 } 797 798 MCLAIM(n, &tcp_tx_mowner); 799 n->m_data += max_linkhdr; 800 n->m_len = hlen + tlen; 801 m_copyback(n, 0, hlen, mtod(m, void *)); 802 m_copyback(n, hlen, tlen, (void *)th0); 803 804 m_freem(m); 805 m = n; 806 n = NULL; 807 } 808 809 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 810 switch (family) { 811 case AF_INET: 812 ip = mtod(m, struct ip *); 813 th = (struct tcphdr *)(ip + 1); 814 ip->ip_p = IPPROTO_TCP; 815 xchg(ip->ip_dst, ip->ip_src, struct in_addr); 816 ip->ip_p = IPPROTO_TCP; 817 break; 818 #ifdef INET6 819 case AF_INET6: 820 ip6 = mtod(m, struct ip6_hdr *); 821 th = (struct tcphdr *)(ip6 + 1); 822 ip6->ip6_nxt = IPPROTO_TCP; 823 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 824 ip6->ip6_nxt = IPPROTO_TCP; 825 break; 826 #endif 827 #if 0 828 default: 829 /* noone will visit here */ 830 m_freem(m); 831 return EAFNOSUPPORT; 832 #endif 833 } 834 xchg(th->th_dport, th->th_sport, u_int16_t); 835 #undef xchg 836 tlen = 0; /*be friendly with the following code*/ 837 } 838 th->th_seq = htonl(seq); 839 th->th_ack = htonl(ack); 840 th->th_x2 = 0; 841 if ((flags & TH_SYN) == 0) { 842 if (tp) 843 win >>= tp->rcv_scale; 844 if (win > TCP_MAXWIN) 845 win = TCP_MAXWIN; 846 th->th_win = htons((u_int16_t)win); 847 th->th_off = sizeof (struct tcphdr) >> 2; 848 tlen += sizeof(*th); 849 } else 850 tlen += th->th_off << 2; 851 m->m_len = hlen + tlen; 852 m->m_pkthdr.len = hlen + tlen; 853 m->m_pkthdr.rcvif = NULL; 854 th->th_flags = flags; 855 th->th_urp = 0; 856 857 switch (family) { 858 #ifdef INET 859 case AF_INET: 860 { 861 struct ipovly *ipov = (struct ipovly *)ip; 862 memset(ipov->ih_x1, 0, sizeof ipov->ih_x1); 863 ipov->ih_len = htons((u_int16_t)tlen); 864 865 th->th_sum = 0; 866 th->th_sum = in_cksum(m, hlen + tlen); 867 ip->ip_len = htons(hlen + tlen); 868 ip->ip_ttl = ip_defttl; 869 break; 870 } 871 #endif 872 #ifdef INET6 873 case AF_INET6: 874 { 875 th->th_sum = 0; 876 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 877 tlen); 878 ip6->ip6_plen = htons(tlen); 879 if (tp && tp->t_in6pcb) 880 ip6->ip6_hlim = in6_selecthlim_rt(tp->t_in6pcb); 881 else 882 ip6->ip6_hlim = ip6_defhlim; 883 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK; 884 if (ip6_auto_flowlabel) { 885 ip6->ip6_flow |= 886 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 887 } 888 break; 889 } 890 #endif 891 } 892 893 if (tp && tp->t_inpcb) 894 so = tp->t_inpcb->inp_socket; 895 #ifdef INET6 896 else if (tp && tp->t_in6pcb) 897 so = tp->t_in6pcb->in6p_socket; 898 #endif 899 else 900 so = NULL; 901 902 if (tp != NULL && tp->t_inpcb != NULL) { 903 ro = &tp->t_inpcb->inp_route; 904 #ifdef DIAGNOSTIC 905 if (family != AF_INET) 906 panic("tcp_respond: address family mismatch"); 907 if (!in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr)) { 908 panic("tcp_respond: ip_dst %x != inp_faddr %x", 909 ntohl(ip->ip_dst.s_addr), 910 ntohl(tp->t_inpcb->inp_faddr.s_addr)); 911 } 912 #endif 913 } 914 #ifdef INET6 915 else if (tp != NULL && tp->t_in6pcb != NULL) { 916 ro = (struct route *)&tp->t_in6pcb->in6p_route; 917 #ifdef DIAGNOSTIC 918 if (family == AF_INET) { 919 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr)) 920 panic("tcp_respond: not mapped addr"); 921 if (memcmp(&ip->ip_dst, 922 &tp->t_in6pcb->in6p_faddr.s6_addr32[3], 923 sizeof(ip->ip_dst)) != 0) { 924 panic("tcp_respond: ip_dst != in6p_faddr"); 925 } 926 } else if (family == AF_INET6) { 927 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, 928 &tp->t_in6pcb->in6p_faddr)) 929 panic("tcp_respond: ip6_dst != in6p_faddr"); 930 } else 931 panic("tcp_respond: address family mismatch"); 932 #endif 933 } 934 #endif 935 else 936 ro = NULL; 937 938 switch (family) { 939 #ifdef INET 940 case AF_INET: 941 error = ip_output(m, NULL, ro, 942 (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL, so); 943 break; 944 #endif 945 #ifdef INET6 946 case AF_INET6: 947 error = ip6_output(m, NULL, ro, 0, NULL, so, NULL); 948 break; 949 #endif 950 default: 951 error = EAFNOSUPPORT; 952 break; 953 } 954 955 return (error); 956 } 957 958 /* 959 * Template TCPCB. Rather than zeroing a new TCPCB and initializing 960 * a bunch of members individually, we maintain this template for the 961 * static and mostly-static components of the TCPCB, and copy it into 962 * the new TCPCB instead. 963 */ 964 static struct tcpcb tcpcb_template = { 965 .t_srtt = TCPTV_SRTTBASE, 966 .t_rttmin = TCPTV_MIN, 967 968 .snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT, 969 .snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT, 970 .snd_numholes = 0, 971 .snd_cubic_wmax = 0, 972 .snd_cubic_wmax_last = 0, 973 .snd_cubic_ctime = 0, 974 975 .t_partialacks = -1, 976 .t_bytes_acked = 0, 977 .t_sndrexmitpack = 0, 978 .t_rcvoopack = 0, 979 .t_sndzerowin = 0, 980 }; 981 982 /* 983 * Updates the TCPCB template whenever a parameter that would affect 984 * the template is changed. 985 */ 986 void 987 tcp_tcpcb_template(void) 988 { 989 struct tcpcb *tp = &tcpcb_template; 990 int flags; 991 992 tp->t_peermss = tcp_mssdflt; 993 tp->t_ourmss = tcp_mssdflt; 994 tp->t_segsz = tcp_mssdflt; 995 996 flags = 0; 997 if (tcp_do_rfc1323 && tcp_do_win_scale) 998 flags |= TF_REQ_SCALE; 999 if (tcp_do_rfc1323 && tcp_do_timestamps) 1000 flags |= TF_REQ_TSTMP; 1001 tp->t_flags = flags; 1002 1003 /* 1004 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 1005 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives 1006 * reasonable initial retransmit time. 1007 */ 1008 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1); 1009 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 1010 TCPTV_MIN, TCPTV_REXMTMAX); 1011 1012 /* Keep Alive */ 1013 tp->t_keepinit = tcp_keepinit; 1014 tp->t_keepidle = tcp_keepidle; 1015 tp->t_keepintvl = tcp_keepintvl; 1016 tp->t_keepcnt = tcp_keepcnt; 1017 tp->t_maxidle = tp->t_keepcnt * tp->t_keepintvl; 1018 1019 /* MSL */ 1020 tp->t_msl = TCPTV_MSL; 1021 } 1022 1023 /* 1024 * Create a new TCP control block, making an 1025 * empty reassembly queue and hooking it to the argument 1026 * protocol control block. 1027 */ 1028 /* family selects inpcb, or in6pcb */ 1029 struct tcpcb * 1030 tcp_newtcpcb(int family, void *aux) 1031 { 1032 struct tcpcb *tp; 1033 int i; 1034 1035 /* XXX Consider using a pool_cache for speed. */ 1036 tp = pool_get(&tcpcb_pool, PR_NOWAIT); /* splsoftnet via tcp_usrreq */ 1037 if (tp == NULL) 1038 return (NULL); 1039 memcpy(tp, &tcpcb_template, sizeof(*tp)); 1040 TAILQ_INIT(&tp->segq); 1041 TAILQ_INIT(&tp->timeq); 1042 tp->t_family = family; /* may be overridden later on */ 1043 TAILQ_INIT(&tp->snd_holes); 1044 LIST_INIT(&tp->t_sc); /* XXX can template this */ 1045 1046 /* Don't sweat this loop; hopefully the compiler will unroll it. */ 1047 for (i = 0; i < TCPT_NTIMERS; i++) { 1048 callout_init(&tp->t_timer[i], CALLOUT_MPSAFE); 1049 TCP_TIMER_INIT(tp, i); 1050 } 1051 callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE); 1052 1053 switch (family) { 1054 case AF_INET: 1055 { 1056 struct inpcb *inp = (struct inpcb *)aux; 1057 1058 inp->inp_ip.ip_ttl = ip_defttl; 1059 inp->inp_ppcb = (void *)tp; 1060 1061 tp->t_inpcb = inp; 1062 tp->t_mtudisc = ip_mtudisc; 1063 break; 1064 } 1065 #ifdef INET6 1066 case AF_INET6: 1067 { 1068 struct in6pcb *in6p = (struct in6pcb *)aux; 1069 1070 in6p->in6p_ip6.ip6_hlim = in6_selecthlim_rt(in6p); 1071 in6p->in6p_ppcb = (void *)tp; 1072 1073 tp->t_in6pcb = in6p; 1074 /* for IPv6, always try to run path MTU discovery */ 1075 tp->t_mtudisc = 1; 1076 break; 1077 } 1078 #endif /* INET6 */ 1079 default: 1080 for (i = 0; i < TCPT_NTIMERS; i++) 1081 callout_destroy(&tp->t_timer[i]); 1082 callout_destroy(&tp->t_delack_ch); 1083 pool_put(&tcpcb_pool, tp); /* splsoftnet via tcp_usrreq */ 1084 return (NULL); 1085 } 1086 1087 /* 1088 * Initialize our timebase. When we send timestamps, we take 1089 * the delta from tcp_now -- this means each connection always 1090 * gets a timebase of 1, which makes it, among other things, 1091 * more difficult to determine how long a system has been up, 1092 * and thus how many TCP sequence increments have occurred. 1093 * 1094 * We start with 1, because 0 doesn't work with linux, which 1095 * considers timestamp 0 in a SYN packet as a bug and disables 1096 * timestamps. 1097 */ 1098 tp->ts_timebase = tcp_now - 1; 1099 1100 tcp_congctl_select(tp, tcp_congctl_global_name); 1101 1102 return (tp); 1103 } 1104 1105 /* 1106 * Drop a TCP connection, reporting 1107 * the specified error. If connection is synchronized, 1108 * then send a RST to peer. 1109 */ 1110 struct tcpcb * 1111 tcp_drop(struct tcpcb *tp, int errno) 1112 { 1113 struct socket *so = NULL; 1114 1115 #ifdef DIAGNOSTIC 1116 if (tp->t_inpcb && tp->t_in6pcb) 1117 panic("tcp_drop: both t_inpcb and t_in6pcb are set"); 1118 #endif 1119 #ifdef INET 1120 if (tp->t_inpcb) 1121 so = tp->t_inpcb->inp_socket; 1122 #endif 1123 #ifdef INET6 1124 if (tp->t_in6pcb) 1125 so = tp->t_in6pcb->in6p_socket; 1126 #endif 1127 if (!so) 1128 return NULL; 1129 1130 if (TCPS_HAVERCVDSYN(tp->t_state)) { 1131 tp->t_state = TCPS_CLOSED; 1132 (void) tcp_output(tp); 1133 TCP_STATINC(TCP_STAT_DROPS); 1134 } else 1135 TCP_STATINC(TCP_STAT_CONNDROPS); 1136 if (errno == ETIMEDOUT && tp->t_softerror) 1137 errno = tp->t_softerror; 1138 so->so_error = errno; 1139 return (tcp_close(tp)); 1140 } 1141 1142 /* 1143 * Close a TCP control block: 1144 * discard all space held by the tcp 1145 * discard internet protocol block 1146 * wake up any sleepers 1147 */ 1148 struct tcpcb * 1149 tcp_close(struct tcpcb *tp) 1150 { 1151 struct inpcb *inp; 1152 #ifdef INET6 1153 struct in6pcb *in6p; 1154 #endif 1155 struct socket *so; 1156 #ifdef RTV_RTT 1157 struct rtentry *rt; 1158 #endif 1159 struct route *ro; 1160 int j; 1161 1162 inp = tp->t_inpcb; 1163 #ifdef INET6 1164 in6p = tp->t_in6pcb; 1165 #endif 1166 so = NULL; 1167 ro = NULL; 1168 if (inp) { 1169 so = inp->inp_socket; 1170 ro = &inp->inp_route; 1171 } 1172 #ifdef INET6 1173 else if (in6p) { 1174 so = in6p->in6p_socket; 1175 ro = (struct route *)&in6p->in6p_route; 1176 } 1177 #endif 1178 1179 #ifdef RTV_RTT 1180 /* 1181 * If we sent enough data to get some meaningful characteristics, 1182 * save them in the routing entry. 'Enough' is arbitrarily 1183 * defined as the sendpipesize (default 4K) * 16. This would 1184 * give us 16 rtt samples assuming we only get one sample per 1185 * window (the usual case on a long haul net). 16 samples is 1186 * enough for the srtt filter to converge to within 5% of the correct 1187 * value; fewer samples and we could save a very bogus rtt. 1188 * 1189 * Don't update the default route's characteristics and don't 1190 * update anything that the user "locked". 1191 */ 1192 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) && 1193 ro && (rt = rtcache_validate(ro)) != NULL && 1194 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) { 1195 u_long i = 0; 1196 1197 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 1198 i = tp->t_srtt * 1199 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2)); 1200 if (rt->rt_rmx.rmx_rtt && i) 1201 /* 1202 * filter this update to half the old & half 1203 * the new values, converting scale. 1204 * See route.h and tcp_var.h for a 1205 * description of the scaling constants. 1206 */ 1207 rt->rt_rmx.rmx_rtt = 1208 (rt->rt_rmx.rmx_rtt + i) / 2; 1209 else 1210 rt->rt_rmx.rmx_rtt = i; 1211 } 1212 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 1213 i = tp->t_rttvar * 1214 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2)); 1215 if (rt->rt_rmx.rmx_rttvar && i) 1216 rt->rt_rmx.rmx_rttvar = 1217 (rt->rt_rmx.rmx_rttvar + i) / 2; 1218 else 1219 rt->rt_rmx.rmx_rttvar = i; 1220 } 1221 /* 1222 * update the pipelimit (ssthresh) if it has been updated 1223 * already or if a pipesize was specified & the threshhold 1224 * got below half the pipesize. I.e., wait for bad news 1225 * before we start updating, then update on both good 1226 * and bad news. 1227 */ 1228 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 1229 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) || 1230 i < (rt->rt_rmx.rmx_sendpipe / 2)) { 1231 /* 1232 * convert the limit from user data bytes to 1233 * packets then to packet data bytes. 1234 */ 1235 i = (i + tp->t_segsz / 2) / tp->t_segsz; 1236 if (i < 2) 1237 i = 2; 1238 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr)); 1239 if (rt->rt_rmx.rmx_ssthresh) 1240 rt->rt_rmx.rmx_ssthresh = 1241 (rt->rt_rmx.rmx_ssthresh + i) / 2; 1242 else 1243 rt->rt_rmx.rmx_ssthresh = i; 1244 } 1245 } 1246 #endif /* RTV_RTT */ 1247 /* free the reassembly queue, if any */ 1248 TCP_REASS_LOCK(tp); 1249 (void) tcp_freeq(tp); 1250 TCP_REASS_UNLOCK(tp); 1251 1252 /* free the SACK holes list. */ 1253 tcp_free_sackholes(tp); 1254 tcp_congctl_release(tp); 1255 syn_cache_cleanup(tp); 1256 1257 if (tp->t_template) { 1258 m_free(tp->t_template); 1259 tp->t_template = NULL; 1260 } 1261 1262 /* 1263 * Detaching the pcb will unlock the socket/tcpcb, and stopping 1264 * the timers can also drop the lock. We need to prevent access 1265 * to the tcpcb as it's half torn down. Flag the pcb as dead 1266 * (prevents access by timers) and only then detach it. 1267 */ 1268 tp->t_flags |= TF_DEAD; 1269 if (inp) { 1270 inp->inp_ppcb = 0; 1271 soisdisconnected(so); 1272 in_pcbdetach(inp); 1273 } 1274 #ifdef INET6 1275 else if (in6p) { 1276 in6p->in6p_ppcb = 0; 1277 soisdisconnected(so); 1278 in6_pcbdetach(in6p); 1279 } 1280 #endif 1281 /* 1282 * pcb is no longer visble elsewhere, so we can safely release 1283 * the lock in callout_halt() if needed. 1284 */ 1285 TCP_STATINC(TCP_STAT_CLOSED); 1286 for (j = 0; j < TCPT_NTIMERS; j++) { 1287 callout_halt(&tp->t_timer[j], softnet_lock); 1288 callout_destroy(&tp->t_timer[j]); 1289 } 1290 callout_halt(&tp->t_delack_ch, softnet_lock); 1291 callout_destroy(&tp->t_delack_ch); 1292 pool_put(&tcpcb_pool, tp); 1293 1294 return NULL; 1295 } 1296 1297 int 1298 tcp_freeq(struct tcpcb *tp) 1299 { 1300 struct ipqent *qe; 1301 int rv = 0; 1302 #ifdef TCPREASS_DEBUG 1303 int i = 0; 1304 #endif 1305 1306 TCP_REASS_LOCK_CHECK(tp); 1307 1308 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) { 1309 #ifdef TCPREASS_DEBUG 1310 printf("tcp_freeq[%p,%d]: %u:%u(%u) 0x%02x\n", 1311 tp, i++, qe->ipqe_seq, qe->ipqe_seq + qe->ipqe_len, 1312 qe->ipqe_len, qe->ipqe_flags & (TH_SYN|TH_FIN|TH_RST)); 1313 #endif 1314 TAILQ_REMOVE(&tp->segq, qe, ipqe_q); 1315 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq); 1316 m_freem(qe->ipqe_m); 1317 tcpipqent_free(qe); 1318 rv = 1; 1319 } 1320 tp->t_segqlen = 0; 1321 KASSERT(TAILQ_EMPTY(&tp->timeq)); 1322 return (rv); 1323 } 1324 1325 void 1326 tcp_fasttimo(void) 1327 { 1328 if (tcp_drainwanted) { 1329 tcp_drain(); 1330 tcp_drainwanted = 0; 1331 } 1332 } 1333 1334 void 1335 tcp_drainstub(void) 1336 { 1337 tcp_drainwanted = 1; 1338 } 1339 1340 /* 1341 * Protocol drain routine. Called when memory is in short supply. 1342 * Called from pr_fasttimo thus a callout context. 1343 */ 1344 void 1345 tcp_drain(void) 1346 { 1347 struct inpcb_hdr *inph; 1348 struct tcpcb *tp; 1349 1350 mutex_enter(softnet_lock); 1351 KERNEL_LOCK(1, NULL); 1352 1353 /* 1354 * Free the sequence queue of all TCP connections. 1355 */ 1356 TAILQ_FOREACH(inph, &tcbtable.inpt_queue, inph_queue) { 1357 switch (inph->inph_af) { 1358 case AF_INET: 1359 tp = intotcpcb((struct inpcb *)inph); 1360 break; 1361 #ifdef INET6 1362 case AF_INET6: 1363 tp = in6totcpcb((struct in6pcb *)inph); 1364 break; 1365 #endif 1366 default: 1367 tp = NULL; 1368 break; 1369 } 1370 if (tp != NULL) { 1371 /* 1372 * We may be called from a device's interrupt 1373 * context. If the tcpcb is already busy, 1374 * just bail out now. 1375 */ 1376 if (tcp_reass_lock_try(tp) == 0) 1377 continue; 1378 if (tcp_freeq(tp)) 1379 TCP_STATINC(TCP_STAT_CONNSDRAINED); 1380 TCP_REASS_UNLOCK(tp); 1381 } 1382 } 1383 1384 KERNEL_UNLOCK_ONE(NULL); 1385 mutex_exit(softnet_lock); 1386 } 1387 1388 /* 1389 * Notify a tcp user of an asynchronous error; 1390 * store error as soft error, but wake up user 1391 * (for now, won't do anything until can select for soft error). 1392 */ 1393 void 1394 tcp_notify(struct inpcb *inp, int error) 1395 { 1396 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 1397 struct socket *so = inp->inp_socket; 1398 1399 /* 1400 * Ignore some errors if we are hooked up. 1401 * If connection hasn't completed, has retransmitted several times, 1402 * and receives a second error, give up now. This is better 1403 * than waiting a long time to establish a connection that 1404 * can never complete. 1405 */ 1406 if (tp->t_state == TCPS_ESTABLISHED && 1407 (error == EHOSTUNREACH || error == ENETUNREACH || 1408 error == EHOSTDOWN)) { 1409 return; 1410 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 && 1411 tp->t_rxtshift > 3 && tp->t_softerror) 1412 so->so_error = error; 1413 else 1414 tp->t_softerror = error; 1415 cv_broadcast(&so->so_cv); 1416 sorwakeup(so); 1417 sowwakeup(so); 1418 } 1419 1420 #ifdef INET6 1421 void 1422 tcp6_notify(struct in6pcb *in6p, int error) 1423 { 1424 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb; 1425 struct socket *so = in6p->in6p_socket; 1426 1427 /* 1428 * Ignore some errors if we are hooked up. 1429 * If connection hasn't completed, has retransmitted several times, 1430 * and receives a second error, give up now. This is better 1431 * than waiting a long time to establish a connection that 1432 * can never complete. 1433 */ 1434 if (tp->t_state == TCPS_ESTABLISHED && 1435 (error == EHOSTUNREACH || error == ENETUNREACH || 1436 error == EHOSTDOWN)) { 1437 return; 1438 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 && 1439 tp->t_rxtshift > 3 && tp->t_softerror) 1440 so->so_error = error; 1441 else 1442 tp->t_softerror = error; 1443 cv_broadcast(&so->so_cv); 1444 sorwakeup(so); 1445 sowwakeup(so); 1446 } 1447 #endif 1448 1449 #ifdef INET6 1450 void * 1451 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d) 1452 { 1453 struct tcphdr th; 1454 void (*notify)(struct in6pcb *, int) = tcp6_notify; 1455 int nmatch; 1456 struct ip6_hdr *ip6; 1457 const struct sockaddr_in6 *sa6_src = NULL; 1458 const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa; 1459 struct mbuf *m; 1460 int off; 1461 1462 if (sa->sa_family != AF_INET6 || 1463 sa->sa_len != sizeof(struct sockaddr_in6)) 1464 return NULL; 1465 if ((unsigned)cmd >= PRC_NCMDS) 1466 return NULL; 1467 else if (cmd == PRC_QUENCH) { 1468 /* 1469 * Don't honor ICMP Source Quench messages meant for 1470 * TCP connections. 1471 */ 1472 return NULL; 1473 } else if (PRC_IS_REDIRECT(cmd)) 1474 notify = in6_rtchange, d = NULL; 1475 else if (cmd == PRC_MSGSIZE) 1476 ; /* special code is present, see below */ 1477 else if (cmd == PRC_HOSTDEAD) 1478 d = NULL; 1479 else if (inet6ctlerrmap[cmd] == 0) 1480 return NULL; 1481 1482 /* if the parameter is from icmp6, decode it. */ 1483 if (d != NULL) { 1484 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d; 1485 m = ip6cp->ip6c_m; 1486 ip6 = ip6cp->ip6c_ip6; 1487 off = ip6cp->ip6c_off; 1488 sa6_src = ip6cp->ip6c_src; 1489 } else { 1490 m = NULL; 1491 ip6 = NULL; 1492 sa6_src = &sa6_any; 1493 off = 0; 1494 } 1495 1496 if (ip6) { 1497 /* 1498 * XXX: We assume that when ip6 is non NULL, 1499 * M and OFF are valid. 1500 */ 1501 1502 /* check if we can safely examine src and dst ports */ 1503 if (m->m_pkthdr.len < off + sizeof(th)) { 1504 if (cmd == PRC_MSGSIZE) 1505 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0); 1506 return NULL; 1507 } 1508 1509 memset(&th, 0, sizeof(th)); 1510 m_copydata(m, off, sizeof(th), (void *)&th); 1511 1512 if (cmd == PRC_MSGSIZE) { 1513 int valid = 0; 1514 1515 /* 1516 * Check to see if we have a valid TCP connection 1517 * corresponding to the address in the ICMPv6 message 1518 * payload. 1519 */ 1520 if (in6_pcblookup_connect(&tcbtable, &sa6->sin6_addr, 1521 th.th_dport, 1522 (const struct in6_addr *)&sa6_src->sin6_addr, 1523 th.th_sport, 0, 0)) 1524 valid++; 1525 1526 /* 1527 * Depending on the value of "valid" and routing table 1528 * size (mtudisc_{hi,lo}wat), we will: 1529 * - recalcurate the new MTU and create the 1530 * corresponding routing entry, or 1531 * - ignore the MTU change notification. 1532 */ 1533 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid); 1534 1535 /* 1536 * no need to call in6_pcbnotify, it should have been 1537 * called via callback if necessary 1538 */ 1539 return NULL; 1540 } 1541 1542 nmatch = in6_pcbnotify(&tcbtable, sa, th.th_dport, 1543 (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify); 1544 if (nmatch == 0 && syn_cache_count && 1545 (inet6ctlerrmap[cmd] == EHOSTUNREACH || 1546 inet6ctlerrmap[cmd] == ENETUNREACH || 1547 inet6ctlerrmap[cmd] == EHOSTDOWN)) 1548 syn_cache_unreach((const struct sockaddr *)sa6_src, 1549 sa, &th); 1550 } else { 1551 (void) in6_pcbnotify(&tcbtable, sa, 0, 1552 (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify); 1553 } 1554 1555 return NULL; 1556 } 1557 #endif 1558 1559 #ifdef INET 1560 /* assumes that ip header and tcp header are contiguous on mbuf */ 1561 void * 1562 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v) 1563 { 1564 struct ip *ip = v; 1565 struct tcphdr *th; 1566 struct icmp *icp; 1567 extern const int inetctlerrmap[]; 1568 void (*notify)(struct inpcb *, int) = tcp_notify; 1569 int errno; 1570 int nmatch; 1571 struct tcpcb *tp; 1572 u_int mtu; 1573 tcp_seq seq; 1574 struct inpcb *inp; 1575 #ifdef INET6 1576 struct in6pcb *in6p; 1577 struct in6_addr src6, dst6; 1578 #endif 1579 1580 if (sa->sa_family != AF_INET || 1581 sa->sa_len != sizeof(struct sockaddr_in)) 1582 return NULL; 1583 if ((unsigned)cmd >= PRC_NCMDS) 1584 return NULL; 1585 errno = inetctlerrmap[cmd]; 1586 if (cmd == PRC_QUENCH) 1587 /* 1588 * Don't honor ICMP Source Quench messages meant for 1589 * TCP connections. 1590 */ 1591 return NULL; 1592 else if (PRC_IS_REDIRECT(cmd)) 1593 notify = in_rtchange, ip = 0; 1594 else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) { 1595 /* 1596 * Check to see if we have a valid TCP connection 1597 * corresponding to the address in the ICMP message 1598 * payload. 1599 * 1600 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN. 1601 */ 1602 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2)); 1603 #ifdef INET6 1604 memset(&src6, 0, sizeof(src6)); 1605 memset(&dst6, 0, sizeof(dst6)); 1606 src6.s6_addr16[5] = dst6.s6_addr16[5] = 0xffff; 1607 memcpy(&src6.s6_addr32[3], &ip->ip_src, sizeof(struct in_addr)); 1608 memcpy(&dst6.s6_addr32[3], &ip->ip_dst, sizeof(struct in_addr)); 1609 #endif 1610 if ((inp = in_pcblookup_connect(&tcbtable, ip->ip_dst, 1611 th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL) 1612 #ifdef INET6 1613 in6p = NULL; 1614 #else 1615 ; 1616 #endif 1617 #ifdef INET6 1618 else if ((in6p = in6_pcblookup_connect(&tcbtable, &dst6, 1619 th->th_dport, &src6, th->th_sport, 0, 0)) != NULL) 1620 ; 1621 #endif 1622 else 1623 return NULL; 1624 1625 /* 1626 * Now that we've validated that we are actually communicating 1627 * with the host indicated in the ICMP message, locate the 1628 * ICMP header, recalculate the new MTU, and create the 1629 * corresponding routing entry. 1630 */ 1631 icp = (struct icmp *)((char *)ip - 1632 offsetof(struct icmp, icmp_ip)); 1633 if (inp) { 1634 if ((tp = intotcpcb(inp)) == NULL) 1635 return NULL; 1636 } 1637 #ifdef INET6 1638 else if (in6p) { 1639 if ((tp = in6totcpcb(in6p)) == NULL) 1640 return NULL; 1641 } 1642 #endif 1643 else 1644 return NULL; 1645 seq = ntohl(th->th_seq); 1646 if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max)) 1647 return NULL; 1648 /* 1649 * If the ICMP message advertises a Next-Hop MTU 1650 * equal or larger than the maximum packet size we have 1651 * ever sent, drop the message. 1652 */ 1653 mtu = (u_int)ntohs(icp->icmp_nextmtu); 1654 if (mtu >= tp->t_pmtud_mtu_sent) 1655 return NULL; 1656 if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) { 1657 /* 1658 * Calculate new MTU, and create corresponding 1659 * route (traditional PMTUD). 1660 */ 1661 tp->t_flags &= ~TF_PMTUD_PEND; 1662 icmp_mtudisc(icp, ip->ip_dst); 1663 } else { 1664 /* 1665 * Record the information got in the ICMP 1666 * message; act on it later. 1667 * If we had already recorded an ICMP message, 1668 * replace the old one only if the new message 1669 * refers to an older TCP segment 1670 */ 1671 if (tp->t_flags & TF_PMTUD_PEND) { 1672 if (SEQ_LT(tp->t_pmtud_th_seq, seq)) 1673 return NULL; 1674 } else 1675 tp->t_flags |= TF_PMTUD_PEND; 1676 tp->t_pmtud_th_seq = seq; 1677 tp->t_pmtud_nextmtu = icp->icmp_nextmtu; 1678 tp->t_pmtud_ip_len = icp->icmp_ip.ip_len; 1679 tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl; 1680 } 1681 return NULL; 1682 } else if (cmd == PRC_HOSTDEAD) 1683 ip = 0; 1684 else if (errno == 0) 1685 return NULL; 1686 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) { 1687 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2)); 1688 nmatch = in_pcbnotify(&tcbtable, satocsin(sa)->sin_addr, 1689 th->th_dport, ip->ip_src, th->th_sport, errno, notify); 1690 if (nmatch == 0 && syn_cache_count && 1691 (inetctlerrmap[cmd] == EHOSTUNREACH || 1692 inetctlerrmap[cmd] == ENETUNREACH || 1693 inetctlerrmap[cmd] == EHOSTDOWN)) { 1694 struct sockaddr_in sin; 1695 memset(&sin, 0, sizeof(sin)); 1696 sin.sin_len = sizeof(sin); 1697 sin.sin_family = AF_INET; 1698 sin.sin_port = th->th_sport; 1699 sin.sin_addr = ip->ip_src; 1700 syn_cache_unreach((struct sockaddr *)&sin, sa, th); 1701 } 1702 1703 /* XXX mapped address case */ 1704 } else 1705 in_pcbnotifyall(&tcbtable, satocsin(sa)->sin_addr, errno, 1706 notify); 1707 return NULL; 1708 } 1709 1710 /* 1711 * When a source quench is received, we are being notified of congestion. 1712 * Close the congestion window down to the Loss Window (one segment). 1713 * We will gradually open it again as we proceed. 1714 */ 1715 void 1716 tcp_quench(struct inpcb *inp, int errno) 1717 { 1718 struct tcpcb *tp = intotcpcb(inp); 1719 1720 if (tp) { 1721 tp->snd_cwnd = tp->t_segsz; 1722 tp->t_bytes_acked = 0; 1723 } 1724 } 1725 #endif 1726 1727 #ifdef INET6 1728 void 1729 tcp6_quench(struct in6pcb *in6p, int errno) 1730 { 1731 struct tcpcb *tp = in6totcpcb(in6p); 1732 1733 if (tp) { 1734 tp->snd_cwnd = tp->t_segsz; 1735 tp->t_bytes_acked = 0; 1736 } 1737 } 1738 #endif 1739 1740 #ifdef INET 1741 /* 1742 * Path MTU Discovery handlers. 1743 */ 1744 void 1745 tcp_mtudisc_callback(struct in_addr faddr) 1746 { 1747 #ifdef INET6 1748 struct in6_addr in6; 1749 #endif 1750 1751 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc); 1752 #ifdef INET6 1753 memset(&in6, 0, sizeof(in6)); 1754 in6.s6_addr16[5] = 0xffff; 1755 memcpy(&in6.s6_addr32[3], &faddr, sizeof(struct in_addr)); 1756 tcp6_mtudisc_callback(&in6); 1757 #endif 1758 } 1759 1760 /* 1761 * On receipt of path MTU corrections, flush old route and replace it 1762 * with the new one. Retransmit all unacknowledged packets, to ensure 1763 * that all packets will be received. 1764 */ 1765 void 1766 tcp_mtudisc(struct inpcb *inp, int errno) 1767 { 1768 struct tcpcb *tp = intotcpcb(inp); 1769 struct rtentry *rt = in_pcbrtentry(inp); 1770 1771 if (tp != 0) { 1772 if (rt != 0) { 1773 /* 1774 * If this was not a host route, remove and realloc. 1775 */ 1776 if ((rt->rt_flags & RTF_HOST) == 0) { 1777 in_rtchange(inp, errno); 1778 if ((rt = in_pcbrtentry(inp)) == 0) 1779 return; 1780 } 1781 1782 /* 1783 * Slow start out of the error condition. We 1784 * use the MTU because we know it's smaller 1785 * than the previously transmitted segment. 1786 * 1787 * Note: This is more conservative than the 1788 * suggestion in draft-floyd-incr-init-win-03. 1789 */ 1790 if (rt->rt_rmx.rmx_mtu != 0) 1791 tp->snd_cwnd = 1792 TCP_INITIAL_WINDOW(tcp_init_win, 1793 rt->rt_rmx.rmx_mtu); 1794 } 1795 1796 /* 1797 * Resend unacknowledged packets. 1798 */ 1799 tp->snd_nxt = tp->sack_newdata = tp->snd_una; 1800 tcp_output(tp); 1801 } 1802 } 1803 #endif 1804 1805 #ifdef INET6 1806 /* 1807 * Path MTU Discovery handlers. 1808 */ 1809 void 1810 tcp6_mtudisc_callback(struct in6_addr *faddr) 1811 { 1812 struct sockaddr_in6 sin6; 1813 1814 memset(&sin6, 0, sizeof(sin6)); 1815 sin6.sin6_family = AF_INET6; 1816 sin6.sin6_len = sizeof(struct sockaddr_in6); 1817 sin6.sin6_addr = *faddr; 1818 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0, 1819 (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc); 1820 } 1821 1822 void 1823 tcp6_mtudisc(struct in6pcb *in6p, int errno) 1824 { 1825 struct tcpcb *tp = in6totcpcb(in6p); 1826 struct rtentry *rt = in6_pcbrtentry(in6p); 1827 1828 if (tp != 0) { 1829 if (rt != 0) { 1830 /* 1831 * If this was not a host route, remove and realloc. 1832 */ 1833 if ((rt->rt_flags & RTF_HOST) == 0) { 1834 in6_rtchange(in6p, errno); 1835 if ((rt = in6_pcbrtentry(in6p)) == 0) 1836 return; 1837 } 1838 1839 /* 1840 * Slow start out of the error condition. We 1841 * use the MTU because we know it's smaller 1842 * than the previously transmitted segment. 1843 * 1844 * Note: This is more conservative than the 1845 * suggestion in draft-floyd-incr-init-win-03. 1846 */ 1847 if (rt->rt_rmx.rmx_mtu != 0) 1848 tp->snd_cwnd = 1849 TCP_INITIAL_WINDOW(tcp_init_win, 1850 rt->rt_rmx.rmx_mtu); 1851 } 1852 1853 /* 1854 * Resend unacknowledged packets. 1855 */ 1856 tp->snd_nxt = tp->sack_newdata = tp->snd_una; 1857 tcp_output(tp); 1858 } 1859 } 1860 #endif /* INET6 */ 1861 1862 /* 1863 * Compute the MSS to advertise to the peer. Called only during 1864 * the 3-way handshake. If we are the server (peer initiated 1865 * connection), we are called with a pointer to the interface 1866 * on which the SYN packet arrived. If we are the client (we 1867 * initiated connection), we are called with a pointer to the 1868 * interface out which this connection should go. 1869 * 1870 * NOTE: Do not subtract IP option/extension header size nor IPsec 1871 * header size from MSS advertisement. MSS option must hold the maximum 1872 * segment size we can accept, so it must always be: 1873 * max(if mtu) - ip header - tcp header 1874 */ 1875 u_long 1876 tcp_mss_to_advertise(const struct ifnet *ifp, int af) 1877 { 1878 extern u_long in_maxmtu; 1879 u_long mss = 0; 1880 u_long hdrsiz; 1881 1882 /* 1883 * In order to avoid defeating path MTU discovery on the peer, 1884 * we advertise the max MTU of all attached networks as our MSS, 1885 * per RFC 1191, section 3.1. 1886 * 1887 * We provide the option to advertise just the MTU of 1888 * the interface on which we hope this connection will 1889 * be receiving. If we are responding to a SYN, we 1890 * will have a pretty good idea about this, but when 1891 * initiating a connection there is a bit more doubt. 1892 * 1893 * We also need to ensure that loopback has a large enough 1894 * MSS, as the loopback MTU is never included in in_maxmtu. 1895 */ 1896 1897 if (ifp != NULL) 1898 switch (af) { 1899 case AF_INET: 1900 mss = ifp->if_mtu; 1901 break; 1902 #ifdef INET6 1903 case AF_INET6: 1904 mss = IN6_LINKMTU(ifp); 1905 break; 1906 #endif 1907 } 1908 1909 if (tcp_mss_ifmtu == 0) 1910 switch (af) { 1911 case AF_INET: 1912 mss = max(in_maxmtu, mss); 1913 break; 1914 #ifdef INET6 1915 case AF_INET6: 1916 mss = max(in6_maxmtu, mss); 1917 break; 1918 #endif 1919 } 1920 1921 switch (af) { 1922 case AF_INET: 1923 hdrsiz = sizeof(struct ip); 1924 break; 1925 #ifdef INET6 1926 case AF_INET6: 1927 hdrsiz = sizeof(struct ip6_hdr); 1928 break; 1929 #endif 1930 default: 1931 hdrsiz = 0; 1932 break; 1933 } 1934 hdrsiz += sizeof(struct tcphdr); 1935 if (mss > hdrsiz) 1936 mss -= hdrsiz; 1937 1938 mss = max(tcp_mssdflt, mss); 1939 return (mss); 1940 } 1941 1942 /* 1943 * Set connection variables based on the peer's advertised MSS. 1944 * We are passed the TCPCB for the actual connection. If we 1945 * are the server, we are called by the compressed state engine 1946 * when the 3-way handshake is complete. If we are the client, 1947 * we are called when we receive the SYN,ACK from the server. 1948 * 1949 * NOTE: Our advertised MSS value must be initialized in the TCPCB 1950 * before this routine is called! 1951 */ 1952 void 1953 tcp_mss_from_peer(struct tcpcb *tp, int offer) 1954 { 1955 struct socket *so; 1956 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 1957 struct rtentry *rt; 1958 #endif 1959 u_long bufsize; 1960 int mss; 1961 1962 #ifdef DIAGNOSTIC 1963 if (tp->t_inpcb && tp->t_in6pcb) 1964 panic("tcp_mss_from_peer: both t_inpcb and t_in6pcb are set"); 1965 #endif 1966 so = NULL; 1967 rt = NULL; 1968 #ifdef INET 1969 if (tp->t_inpcb) { 1970 so = tp->t_inpcb->inp_socket; 1971 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 1972 rt = in_pcbrtentry(tp->t_inpcb); 1973 #endif 1974 } 1975 #endif 1976 #ifdef INET6 1977 if (tp->t_in6pcb) { 1978 so = tp->t_in6pcb->in6p_socket; 1979 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 1980 rt = in6_pcbrtentry(tp->t_in6pcb); 1981 #endif 1982 } 1983 #endif 1984 1985 /* 1986 * As per RFC1122, use the default MSS value, unless they 1987 * sent us an offer. Do not accept offers less than 256 bytes. 1988 */ 1989 mss = tcp_mssdflt; 1990 if (offer) 1991 mss = offer; 1992 mss = max(mss, 256); /* sanity */ 1993 tp->t_peermss = mss; 1994 mss -= tcp_optlen(tp); 1995 #ifdef INET 1996 if (tp->t_inpcb) 1997 mss -= ip_optlen(tp->t_inpcb); 1998 #endif 1999 #ifdef INET6 2000 if (tp->t_in6pcb) 2001 mss -= ip6_optlen(tp->t_in6pcb); 2002 #endif 2003 2004 /* 2005 * If there's a pipesize, change the socket buffer to that size. 2006 * Make the socket buffer an integral number of MSS units. If 2007 * the MSS is larger than the socket buffer, artificially decrease 2008 * the MSS. 2009 */ 2010 #ifdef RTV_SPIPE 2011 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0) 2012 bufsize = rt->rt_rmx.rmx_sendpipe; 2013 else 2014 #endif 2015 { 2016 KASSERT(so != NULL); 2017 bufsize = so->so_snd.sb_hiwat; 2018 } 2019 if (bufsize < mss) 2020 mss = bufsize; 2021 else { 2022 bufsize = roundup(bufsize, mss); 2023 if (bufsize > sb_max) 2024 bufsize = sb_max; 2025 (void) sbreserve(&so->so_snd, bufsize, so); 2026 } 2027 tp->t_segsz = mss; 2028 2029 #ifdef RTV_SSTHRESH 2030 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) { 2031 /* 2032 * There's some sort of gateway or interface buffer 2033 * limit on the path. Use this to set the slow 2034 * start threshold, but set the threshold to no less 2035 * than 2 * MSS. 2036 */ 2037 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 2038 } 2039 #endif 2040 } 2041 2042 /* 2043 * Processing necessary when a TCP connection is established. 2044 */ 2045 void 2046 tcp_established(struct tcpcb *tp) 2047 { 2048 struct socket *so; 2049 #ifdef RTV_RPIPE 2050 struct rtentry *rt; 2051 #endif 2052 u_long bufsize; 2053 2054 #ifdef DIAGNOSTIC 2055 if (tp->t_inpcb && tp->t_in6pcb) 2056 panic("tcp_established: both t_inpcb and t_in6pcb are set"); 2057 #endif 2058 so = NULL; 2059 rt = NULL; 2060 #ifdef INET 2061 /* This is a while() to reduce the dreadful stairstepping below */ 2062 while (tp->t_inpcb) { 2063 so = tp->t_inpcb->inp_socket; 2064 #if defined(RTV_RPIPE) 2065 rt = in_pcbrtentry(tp->t_inpcb); 2066 #endif 2067 if (__predict_true(tcp_msl_enable)) { 2068 if (tp->t_inpcb->inp_laddr.s_addr == INADDR_LOOPBACK) { 2069 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2); 2070 break; 2071 } 2072 2073 if (__predict_false(tcp_rttlocal)) { 2074 /* This may be adjusted by tcp_input */ 2075 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2076 break; 2077 } 2078 if (in_localaddr(tp->t_inpcb->inp_faddr)) { 2079 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2080 break; 2081 } 2082 } 2083 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL; 2084 break; 2085 } 2086 #endif 2087 #ifdef INET6 2088 /* The !tp->t_inpcb lets the compiler know it can't be v4 *and* v6 */ 2089 while (!tp->t_inpcb && tp->t_in6pcb) { 2090 so = tp->t_in6pcb->in6p_socket; 2091 #if defined(RTV_RPIPE) 2092 rt = in6_pcbrtentry(tp->t_in6pcb); 2093 #endif 2094 if (__predict_true(tcp_msl_enable)) { 2095 extern const struct in6_addr in6addr_loopback; 2096 2097 if (IN6_ARE_ADDR_EQUAL(&tp->t_in6pcb->in6p_laddr, 2098 &in6addr_loopback)) { 2099 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2); 2100 break; 2101 } 2102 2103 if (__predict_false(tcp_rttlocal)) { 2104 /* This may be adjusted by tcp_input */ 2105 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2106 break; 2107 } 2108 if (in6_localaddr(&tp->t_in6pcb->in6p_faddr)) { 2109 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2110 break; 2111 } 2112 } 2113 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL; 2114 break; 2115 } 2116 #endif 2117 2118 tp->t_state = TCPS_ESTABLISHED; 2119 TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle); 2120 2121 #ifdef RTV_RPIPE 2122 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0) 2123 bufsize = rt->rt_rmx.rmx_recvpipe; 2124 else 2125 #endif 2126 { 2127 KASSERT(so != NULL); 2128 bufsize = so->so_rcv.sb_hiwat; 2129 } 2130 if (bufsize > tp->t_ourmss) { 2131 bufsize = roundup(bufsize, tp->t_ourmss); 2132 if (bufsize > sb_max) 2133 bufsize = sb_max; 2134 (void) sbreserve(&so->so_rcv, bufsize, so); 2135 } 2136 } 2137 2138 /* 2139 * Check if there's an initial rtt or rttvar. Convert from the 2140 * route-table units to scaled multiples of the slow timeout timer. 2141 * Called only during the 3-way handshake. 2142 */ 2143 void 2144 tcp_rmx_rtt(struct tcpcb *tp) 2145 { 2146 #ifdef RTV_RTT 2147 struct rtentry *rt = NULL; 2148 int rtt; 2149 2150 #ifdef DIAGNOSTIC 2151 if (tp->t_inpcb && tp->t_in6pcb) 2152 panic("tcp_rmx_rtt: both t_inpcb and t_in6pcb are set"); 2153 #endif 2154 #ifdef INET 2155 if (tp->t_inpcb) 2156 rt = in_pcbrtentry(tp->t_inpcb); 2157 #endif 2158 #ifdef INET6 2159 if (tp->t_in6pcb) 2160 rt = in6_pcbrtentry(tp->t_in6pcb); 2161 #endif 2162 if (rt == NULL) 2163 return; 2164 2165 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 2166 /* 2167 * XXX The lock bit for MTU indicates that the value 2168 * is also a minimum value; this is subject to time. 2169 */ 2170 if (rt->rt_rmx.rmx_locks & RTV_RTT) 2171 TCPT_RANGESET(tp->t_rttmin, 2172 rtt / (RTM_RTTUNIT / PR_SLOWHZ), 2173 TCPTV_MIN, TCPTV_REXMTMAX); 2174 tp->t_srtt = rtt / 2175 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2)); 2176 if (rt->rt_rmx.rmx_rttvar) { 2177 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 2178 ((RTM_RTTUNIT / PR_SLOWHZ) >> 2179 (TCP_RTTVAR_SHIFT + 2)); 2180 } else { 2181 /* Default variation is +- 1 rtt */ 2182 tp->t_rttvar = 2183 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT); 2184 } 2185 TCPT_RANGESET(tp->t_rxtcur, 2186 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2), 2187 tp->t_rttmin, TCPTV_REXMTMAX); 2188 } 2189 #endif 2190 } 2191 2192 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */ 2193 2194 /* 2195 * Get a new sequence value given a tcp control block 2196 */ 2197 tcp_seq 2198 tcp_new_iss(struct tcpcb *tp, tcp_seq addin) 2199 { 2200 2201 #ifdef INET 2202 if (tp->t_inpcb != NULL) { 2203 return (tcp_new_iss1(&tp->t_inpcb->inp_laddr, 2204 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport, 2205 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr), 2206 addin)); 2207 } 2208 #endif 2209 #ifdef INET6 2210 if (tp->t_in6pcb != NULL) { 2211 return (tcp_new_iss1(&tp->t_in6pcb->in6p_laddr, 2212 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport, 2213 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr), 2214 addin)); 2215 } 2216 #endif 2217 /* Not possible. */ 2218 panic("tcp_new_iss"); 2219 } 2220 2221 static u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */ 2222 2223 /* 2224 * Initialize RFC 1948 ISS Secret 2225 */ 2226 static int 2227 tcp_iss_secret_init(void) 2228 { 2229 cprng_strong(kern_cprng, 2230 tcp_iss_secret, sizeof(tcp_iss_secret), 0); 2231 2232 return 0; 2233 } 2234 2235 /* 2236 * This routine actually generates a new TCP initial sequence number. 2237 */ 2238 tcp_seq 2239 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport, 2240 size_t addrsz, tcp_seq addin) 2241 { 2242 tcp_seq tcp_iss; 2243 2244 if (tcp_do_rfc1948) { 2245 MD5_CTX ctx; 2246 u_int8_t hash[16]; /* XXX MD5 knowledge */ 2247 static ONCE_DECL(tcp_iss_secret_control); 2248 2249 /* 2250 * If we haven't been here before, initialize our cryptographic 2251 * hash secret. 2252 */ 2253 RUN_ONCE(&tcp_iss_secret_control, tcp_iss_secret_init); 2254 2255 /* 2256 * Compute the base value of the ISS. It is a hash 2257 * of (saddr, sport, daddr, dport, secret). 2258 */ 2259 MD5Init(&ctx); 2260 2261 MD5Update(&ctx, (u_char *) laddr, addrsz); 2262 MD5Update(&ctx, (u_char *) &lport, sizeof(lport)); 2263 2264 MD5Update(&ctx, (u_char *) faddr, addrsz); 2265 MD5Update(&ctx, (u_char *) &fport, sizeof(fport)); 2266 2267 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret)); 2268 2269 MD5Final(hash, &ctx); 2270 2271 memcpy(&tcp_iss, hash, sizeof(tcp_iss)); 2272 2273 /* 2274 * Now increment our "timer", and add it in to 2275 * the computed value. 2276 * 2277 * XXX Use `addin'? 2278 * XXX TCP_ISSINCR too large to use? 2279 */ 2280 tcp_iss_seq += TCP_ISSINCR; 2281 #ifdef TCPISS_DEBUG 2282 printf("ISS hash 0x%08x, ", tcp_iss); 2283 #endif 2284 tcp_iss += tcp_iss_seq + addin; 2285 #ifdef TCPISS_DEBUG 2286 printf("new ISS 0x%08x\n", tcp_iss); 2287 #endif 2288 } else { 2289 /* 2290 * Randomize. 2291 */ 2292 tcp_iss = cprng_fast32(); 2293 2294 /* 2295 * If we were asked to add some amount to a known value, 2296 * we will take a random value obtained above, mask off 2297 * the upper bits, and add in the known value. We also 2298 * add in a constant to ensure that we are at least a 2299 * certain distance from the original value. 2300 * 2301 * This is used when an old connection is in timed wait 2302 * and we have a new one coming in, for instance. 2303 */ 2304 if (addin != 0) { 2305 #ifdef TCPISS_DEBUG 2306 printf("Random %08x, ", tcp_iss); 2307 #endif 2308 tcp_iss &= TCP_ISS_RANDOM_MASK; 2309 tcp_iss += addin + TCP_ISSINCR; 2310 #ifdef TCPISS_DEBUG 2311 printf("Old ISS %08x, ISS %08x\n", addin, tcp_iss); 2312 #endif 2313 } else { 2314 tcp_iss &= TCP_ISS_RANDOM_MASK; 2315 tcp_iss += tcp_iss_seq; 2316 tcp_iss_seq += TCP_ISSINCR; 2317 #ifdef TCPISS_DEBUG 2318 printf("ISS %08x\n", tcp_iss); 2319 #endif 2320 } 2321 } 2322 2323 if (tcp_compat_42) { 2324 /* 2325 * Limit it to the positive range for really old TCP 2326 * implementations. 2327 * Just AND off the top bit instead of checking if 2328 * is set first - saves a branch 50% of the time. 2329 */ 2330 tcp_iss &= 0x7fffffff; /* XXX */ 2331 } 2332 2333 return (tcp_iss); 2334 } 2335 2336 #if defined(IPSEC) 2337 /* compute ESP/AH header size for TCP, including outer IP header. */ 2338 size_t 2339 ipsec4_hdrsiz_tcp(struct tcpcb *tp) 2340 { 2341 struct inpcb *inp; 2342 size_t hdrsiz; 2343 2344 /* XXX mapped addr case (tp->t_in6pcb) */ 2345 if (!tp || !tp->t_template || !(inp = tp->t_inpcb)) 2346 return 0; 2347 switch (tp->t_family) { 2348 case AF_INET: 2349 /* XXX: should use currect direction. */ 2350 hdrsiz = ipsec4_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp); 2351 break; 2352 default: 2353 hdrsiz = 0; 2354 break; 2355 } 2356 2357 return hdrsiz; 2358 } 2359 2360 #ifdef INET6 2361 size_t 2362 ipsec6_hdrsiz_tcp(struct tcpcb *tp) 2363 { 2364 struct in6pcb *in6p; 2365 size_t hdrsiz; 2366 2367 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb)) 2368 return 0; 2369 switch (tp->t_family) { 2370 case AF_INET6: 2371 /* XXX: should use currect direction. */ 2372 hdrsiz = ipsec6_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p); 2373 break; 2374 case AF_INET: 2375 /* mapped address case - tricky */ 2376 default: 2377 hdrsiz = 0; 2378 break; 2379 } 2380 2381 return hdrsiz; 2382 } 2383 #endif 2384 #endif /*IPSEC*/ 2385 2386 /* 2387 * Determine the length of the TCP options for this connection. 2388 * 2389 * XXX: What do we do for SACK, when we add that? Just reserve 2390 * all of the space? Otherwise we can't exactly be incrementing 2391 * cwnd by an amount that varies depending on the amount we last 2392 * had to SACK! 2393 */ 2394 2395 u_int 2396 tcp_optlen(struct tcpcb *tp) 2397 { 2398 u_int optlen; 2399 2400 optlen = 0; 2401 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 2402 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 2403 optlen += TCPOLEN_TSTAMP_APPA; 2404 2405 #ifdef TCP_SIGNATURE 2406 if (tp->t_flags & TF_SIGNATURE) 2407 optlen += TCPOLEN_SIGNATURE + 2; 2408 #endif /* TCP_SIGNATURE */ 2409 2410 return optlen; 2411 } 2412 2413 u_int 2414 tcp_hdrsz(struct tcpcb *tp) 2415 { 2416 u_int hlen; 2417 2418 switch (tp->t_family) { 2419 #ifdef INET6 2420 case AF_INET6: 2421 hlen = sizeof(struct ip6_hdr); 2422 break; 2423 #endif 2424 case AF_INET: 2425 hlen = sizeof(struct ip); 2426 break; 2427 default: 2428 hlen = 0; 2429 break; 2430 } 2431 hlen += sizeof(struct tcphdr); 2432 2433 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 2434 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 2435 hlen += TCPOLEN_TSTAMP_APPA; 2436 #ifdef TCP_SIGNATURE 2437 if (tp->t_flags & TF_SIGNATURE) 2438 hlen += TCPOLEN_SIGLEN; 2439 #endif 2440 return hlen; 2441 } 2442 2443 void 2444 tcp_statinc(u_int stat) 2445 { 2446 2447 KASSERT(stat < TCP_NSTATS); 2448 TCP_STATINC(stat); 2449 } 2450 2451 void 2452 tcp_statadd(u_int stat, uint64_t val) 2453 { 2454 2455 KASSERT(stat < TCP_NSTATS); 2456 TCP_STATADD(stat, val); 2457 } 2458