1 /* $NetBSD: tcp_subr.c,v 1.256 2014/09/05 06:04:43 matt Exp $ */ 2 3 /* 4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc. 34 * All rights reserved. 35 * 36 * This code is derived from software contributed to The NetBSD Foundation 37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation 38 * Facility, NASA Ames Research Center. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * POSSIBILITY OF SUCH DAMAGE. 60 */ 61 62 /* 63 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 64 * The Regents of the University of California. All rights reserved. 65 * 66 * Redistribution and use in source and binary forms, with or without 67 * modification, are permitted provided that the following conditions 68 * are met: 69 * 1. Redistributions of source code must retain the above copyright 70 * notice, this list of conditions and the following disclaimer. 71 * 2. Redistributions in binary form must reproduce the above copyright 72 * notice, this list of conditions and the following disclaimer in the 73 * documentation and/or other materials provided with the distribution. 74 * 3. Neither the name of the University nor the names of its contributors 75 * may be used to endorse or promote products derived from this software 76 * without specific prior written permission. 77 * 78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 88 * SUCH DAMAGE. 89 * 90 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 91 */ 92 93 #include <sys/cdefs.h> 94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.256 2014/09/05 06:04:43 matt Exp $"); 95 96 #include "opt_inet.h" 97 #include "opt_ipsec.h" 98 #include "opt_tcp_compat_42.h" 99 #include "opt_inet_csum.h" 100 #include "opt_mbuftrace.h" 101 102 #include <sys/param.h> 103 #include <sys/atomic.h> 104 #include <sys/proc.h> 105 #include <sys/systm.h> 106 #include <sys/malloc.h> 107 #include <sys/mbuf.h> 108 #include <sys/once.h> 109 #include <sys/socket.h> 110 #include <sys/socketvar.h> 111 #include <sys/protosw.h> 112 #include <sys/errno.h> 113 #include <sys/kernel.h> 114 #include <sys/pool.h> 115 #include <sys/md5.h> 116 #include <sys/cprng.h> 117 118 #include <net/route.h> 119 #include <net/if.h> 120 121 #include <netinet/in.h> 122 #include <netinet/in_systm.h> 123 #include <netinet/ip.h> 124 #include <netinet/in_pcb.h> 125 #include <netinet/ip_var.h> 126 #include <netinet/ip_icmp.h> 127 128 #ifdef INET6 129 #ifndef INET 130 #include <netinet/in.h> 131 #endif 132 #include <netinet/ip6.h> 133 #include <netinet6/in6_pcb.h> 134 #include <netinet6/ip6_var.h> 135 #include <netinet6/in6_var.h> 136 #include <netinet6/ip6protosw.h> 137 #include <netinet/icmp6.h> 138 #include <netinet6/nd6.h> 139 #endif 140 141 #include <netinet/tcp.h> 142 #include <netinet/tcp_fsm.h> 143 #include <netinet/tcp_seq.h> 144 #include <netinet/tcp_timer.h> 145 #include <netinet/tcp_var.h> 146 #include <netinet/tcp_vtw.h> 147 #include <netinet/tcp_private.h> 148 #include <netinet/tcp_congctl.h> 149 #include <netinet/tcpip.h> 150 151 #ifdef IPSEC 152 #include <netipsec/ipsec.h> 153 #include <netipsec/xform.h> 154 #ifdef INET6 155 #include <netipsec/ipsec6.h> 156 #endif 157 #include <netipsec/key.h> 158 #endif /* IPSEC*/ 159 160 161 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */ 162 u_int32_t tcp_now; /* slow ticks, for RFC 1323 timestamps */ 163 164 percpu_t *tcpstat_percpu; 165 166 /* patchable/settable parameters for tcp */ 167 int tcp_mssdflt = TCP_MSS; 168 int tcp_minmss = TCP_MINMSS; 169 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 170 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */ 171 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */ 172 int tcp_do_sack = 1; /* selective acknowledgement */ 173 int tcp_do_win_scale = 1; /* RFC1323 window scaling */ 174 int tcp_do_timestamps = 1; /* RFC1323 timestamps */ 175 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */ 176 int tcp_do_ecn = 0; /* Explicit Congestion Notification */ 177 #ifndef TCP_INIT_WIN 178 #define TCP_INIT_WIN 4 /* initial slow start window */ 179 #endif 180 #ifndef TCP_INIT_WIN_LOCAL 181 #define TCP_INIT_WIN_LOCAL 4 /* initial slow start window for local nets */ 182 #endif 183 /* 184 * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460. 185 * This is to simulate current behavior for iw == 4 186 */ 187 int tcp_init_win_max[] = { 188 1 * 1460, 189 1 * 1460, 190 2 * 1460, 191 2 * 1460, 192 3 * 1460, 193 5 * 1460, 194 6 * 1460, 195 7 * 1460, 196 8 * 1460, 197 9 * 1460, 198 10 * 1460 199 }; 200 int tcp_init_win = TCP_INIT_WIN; 201 int tcp_init_win_local = TCP_INIT_WIN_LOCAL; 202 int tcp_mss_ifmtu = 0; 203 #ifdef TCP_COMPAT_42 204 int tcp_compat_42 = 1; 205 #else 206 int tcp_compat_42 = 0; 207 #endif 208 int tcp_rst_ppslim = 100; /* 100pps */ 209 int tcp_ackdrop_ppslim = 100; /* 100pps */ 210 int tcp_do_loopback_cksum = 0; 211 int tcp_do_abc = 1; /* RFC3465 Appropriate byte counting. */ 212 int tcp_abc_aggressive = 1; /* 1: L=2*SMSS 0: L=1*SMSS */ 213 int tcp_sack_tp_maxholes = 32; 214 int tcp_sack_globalmaxholes = 1024; 215 int tcp_sack_globalholes = 0; 216 int tcp_ecn_maxretries = 1; 217 int tcp_msl_enable = 1; /* enable TIME_WAIT truncation */ 218 int tcp_msl_loop = PR_SLOWHZ; /* MSL for loopback */ 219 int tcp_msl_local = 5 * PR_SLOWHZ; /* MSL for 'local' */ 220 int tcp_msl_remote = TCPTV_MSL; /* MSL otherwise */ 221 int tcp_msl_remote_threshold = TCPTV_SRTTDFLT; /* RTT threshold */ 222 int tcp_rttlocal = 0; /* Use RTT to decide who's 'local' */ 223 224 int tcp4_vtw_enable = 0; /* 1 to enable */ 225 int tcp6_vtw_enable = 0; /* 1 to enable */ 226 int tcp_vtw_was_enabled = 0; 227 int tcp_vtw_entries = 1 << 4; /* 16 vestigial TIME_WAIT entries */ 228 229 /* tcb hash */ 230 #ifndef TCBHASHSIZE 231 #define TCBHASHSIZE 128 232 #endif 233 int tcbhashsize = TCBHASHSIZE; 234 235 /* syn hash parameters */ 236 #define TCP_SYN_HASH_SIZE 293 237 #define TCP_SYN_BUCKET_SIZE 35 238 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE; 239 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE; 240 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE; 241 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE]; 242 243 int tcp_freeq(struct tcpcb *); 244 245 #ifdef INET 246 static void tcp_mtudisc_callback(struct in_addr); 247 #endif 248 249 #ifdef INET6 250 void tcp6_mtudisc(struct in6pcb *, int); 251 #endif 252 253 static struct pool tcpcb_pool; 254 255 static int tcp_drainwanted; 256 257 #ifdef TCP_CSUM_COUNTERS 258 #include <sys/device.h> 259 260 #if defined(INET) 261 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 262 NULL, "tcp", "hwcsum bad"); 263 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 264 NULL, "tcp", "hwcsum ok"); 265 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 266 NULL, "tcp", "hwcsum data"); 267 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 268 NULL, "tcp", "swcsum"); 269 270 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad); 271 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok); 272 EVCNT_ATTACH_STATIC(tcp_hwcsum_data); 273 EVCNT_ATTACH_STATIC(tcp_swcsum); 274 #endif /* defined(INET) */ 275 276 #if defined(INET6) 277 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 278 NULL, "tcp6", "hwcsum bad"); 279 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 280 NULL, "tcp6", "hwcsum ok"); 281 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 282 NULL, "tcp6", "hwcsum data"); 283 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 284 NULL, "tcp6", "swcsum"); 285 286 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad); 287 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok); 288 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data); 289 EVCNT_ATTACH_STATIC(tcp6_swcsum); 290 #endif /* defined(INET6) */ 291 #endif /* TCP_CSUM_COUNTERS */ 292 293 294 #ifdef TCP_OUTPUT_COUNTERS 295 #include <sys/device.h> 296 297 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 298 NULL, "tcp", "output big header"); 299 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 300 NULL, "tcp", "output predict hit"); 301 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 302 NULL, "tcp", "output predict miss"); 303 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 304 NULL, "tcp", "output copy small"); 305 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 306 NULL, "tcp", "output copy big"); 307 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 308 NULL, "tcp", "output reference big"); 309 310 EVCNT_ATTACH_STATIC(tcp_output_bigheader); 311 EVCNT_ATTACH_STATIC(tcp_output_predict_hit); 312 EVCNT_ATTACH_STATIC(tcp_output_predict_miss); 313 EVCNT_ATTACH_STATIC(tcp_output_copysmall); 314 EVCNT_ATTACH_STATIC(tcp_output_copybig); 315 EVCNT_ATTACH_STATIC(tcp_output_refbig); 316 317 #endif /* TCP_OUTPUT_COUNTERS */ 318 319 #ifdef TCP_REASS_COUNTERS 320 #include <sys/device.h> 321 322 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 323 NULL, "tcp_reass", "calls"); 324 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 325 &tcp_reass_, "tcp_reass", "insert into empty queue"); 326 struct evcnt tcp_reass_iteration[8] = { 327 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"), 328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"), 329 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"), 330 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"), 331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"), 332 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"), 333 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"), 334 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"), 335 }; 336 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 337 &tcp_reass_, "tcp_reass", "prepend to first"); 338 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 339 &tcp_reass_, "tcp_reass", "prepend"); 340 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 341 &tcp_reass_, "tcp_reass", "insert"); 342 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 343 &tcp_reass_, "tcp_reass", "insert at tail"); 344 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 345 &tcp_reass_, "tcp_reass", "append"); 346 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 347 &tcp_reass_, "tcp_reass", "append to tail fragment"); 348 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 349 &tcp_reass_, "tcp_reass", "overlap at end"); 350 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 351 &tcp_reass_, "tcp_reass", "overlap at start"); 352 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 353 &tcp_reass_, "tcp_reass", "duplicate segment"); 354 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 355 &tcp_reass_, "tcp_reass", "duplicate fragment"); 356 357 EVCNT_ATTACH_STATIC(tcp_reass_); 358 EVCNT_ATTACH_STATIC(tcp_reass_empty); 359 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0); 360 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1); 361 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2); 362 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3); 363 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4); 364 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5); 365 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6); 366 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7); 367 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst); 368 EVCNT_ATTACH_STATIC(tcp_reass_prepend); 369 EVCNT_ATTACH_STATIC(tcp_reass_insert); 370 EVCNT_ATTACH_STATIC(tcp_reass_inserttail); 371 EVCNT_ATTACH_STATIC(tcp_reass_append); 372 EVCNT_ATTACH_STATIC(tcp_reass_appendtail); 373 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail); 374 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront); 375 EVCNT_ATTACH_STATIC(tcp_reass_segdup); 376 EVCNT_ATTACH_STATIC(tcp_reass_fragdup); 377 378 #endif /* TCP_REASS_COUNTERS */ 379 380 #ifdef MBUFTRACE 381 struct mowner tcp_mowner = MOWNER_INIT("tcp", ""); 382 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx"); 383 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx"); 384 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock"); 385 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx"); 386 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx"); 387 #endif 388 389 callout_t tcp_slowtimo_ch; 390 391 static int 392 do_tcpinit(void) 393 { 394 395 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize); 396 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl", 397 NULL, IPL_SOFTNET); 398 399 tcp_usrreq_init(); 400 401 /* Initialize timer state. */ 402 tcp_timer_init(); 403 404 /* Initialize the compressed state engine. */ 405 syn_cache_init(); 406 407 /* Initialize the congestion control algorithms. */ 408 tcp_congctl_init(); 409 410 /* Initialize the TCPCB template. */ 411 tcp_tcpcb_template(); 412 413 /* Initialize reassembly queue */ 414 tcpipqent_init(); 415 416 /* SACK */ 417 tcp_sack_init(); 418 419 MOWNER_ATTACH(&tcp_tx_mowner); 420 MOWNER_ATTACH(&tcp_rx_mowner); 421 MOWNER_ATTACH(&tcp_reass_mowner); 422 MOWNER_ATTACH(&tcp_sock_mowner); 423 MOWNER_ATTACH(&tcp_sock_tx_mowner); 424 MOWNER_ATTACH(&tcp_sock_rx_mowner); 425 MOWNER_ATTACH(&tcp_mowner); 426 427 tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS); 428 429 vtw_earlyinit(); 430 431 callout_init(&tcp_slowtimo_ch, CALLOUT_MPSAFE); 432 callout_reset(&tcp_slowtimo_ch, 1, tcp_slowtimo, NULL); 433 434 return 0; 435 } 436 437 void 438 tcp_init_common(unsigned basehlen) 439 { 440 static ONCE_DECL(dotcpinit); 441 unsigned hlen = basehlen + sizeof(struct tcphdr); 442 unsigned oldhlen; 443 444 if (max_linkhdr + hlen > MHLEN) 445 panic("tcp_init"); 446 while ((oldhlen = max_protohdr) < hlen) 447 atomic_cas_uint(&max_protohdr, oldhlen, hlen); 448 449 RUN_ONCE(&dotcpinit, do_tcpinit); 450 } 451 452 /* 453 * Tcp initialization 454 */ 455 void 456 tcp_init(void) 457 { 458 459 icmp_mtudisc_callback_register(tcp_mtudisc_callback); 460 461 tcp_init_common(sizeof(struct ip)); 462 } 463 464 /* 465 * Create template to be used to send tcp packets on a connection. 466 * Call after host entry created, allocates an mbuf and fills 467 * in a skeletal tcp/ip header, minimizing the amount of work 468 * necessary when the connection is used. 469 */ 470 struct mbuf * 471 tcp_template(struct tcpcb *tp) 472 { 473 struct inpcb *inp = tp->t_inpcb; 474 #ifdef INET6 475 struct in6pcb *in6p = tp->t_in6pcb; 476 #endif 477 struct tcphdr *n; 478 struct mbuf *m; 479 int hlen; 480 481 switch (tp->t_family) { 482 case AF_INET: 483 hlen = sizeof(struct ip); 484 if (inp) 485 break; 486 #ifdef INET6 487 if (in6p) { 488 /* mapped addr case */ 489 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr) 490 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr)) 491 break; 492 } 493 #endif 494 return NULL; /*EINVAL*/ 495 #ifdef INET6 496 case AF_INET6: 497 hlen = sizeof(struct ip6_hdr); 498 if (in6p) { 499 /* more sainty check? */ 500 break; 501 } 502 return NULL; /*EINVAL*/ 503 #endif 504 default: 505 hlen = 0; /*pacify gcc*/ 506 return NULL; /*EAFNOSUPPORT*/ 507 } 508 #ifdef DIAGNOSTIC 509 if (hlen + sizeof(struct tcphdr) > MCLBYTES) 510 panic("mclbytes too small for t_template"); 511 #endif 512 m = tp->t_template; 513 if (m && m->m_len == hlen + sizeof(struct tcphdr)) 514 ; 515 else { 516 if (m) 517 m_freem(m); 518 m = tp->t_template = NULL; 519 MGETHDR(m, M_DONTWAIT, MT_HEADER); 520 if (m && hlen + sizeof(struct tcphdr) > MHLEN) { 521 MCLGET(m, M_DONTWAIT); 522 if ((m->m_flags & M_EXT) == 0) { 523 m_free(m); 524 m = NULL; 525 } 526 } 527 if (m == NULL) 528 return NULL; 529 MCLAIM(m, &tcp_mowner); 530 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr); 531 } 532 533 memset(mtod(m, void *), 0, m->m_len); 534 535 n = (struct tcphdr *)(mtod(m, char *) + hlen); 536 537 switch (tp->t_family) { 538 case AF_INET: 539 { 540 struct ipovly *ipov; 541 mtod(m, struct ip *)->ip_v = 4; 542 mtod(m, struct ip *)->ip_hl = hlen >> 2; 543 ipov = mtod(m, struct ipovly *); 544 ipov->ih_pr = IPPROTO_TCP; 545 ipov->ih_len = htons(sizeof(struct tcphdr)); 546 if (inp) { 547 ipov->ih_src = inp->inp_laddr; 548 ipov->ih_dst = inp->inp_faddr; 549 } 550 #ifdef INET6 551 else if (in6p) { 552 /* mapped addr case */ 553 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src, 554 sizeof(ipov->ih_src)); 555 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst, 556 sizeof(ipov->ih_dst)); 557 } 558 #endif 559 /* 560 * Compute the pseudo-header portion of the checksum 561 * now. We incrementally add in the TCP option and 562 * payload lengths later, and then compute the TCP 563 * checksum right before the packet is sent off onto 564 * the wire. 565 */ 566 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr, 567 ipov->ih_dst.s_addr, 568 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 569 break; 570 } 571 #ifdef INET6 572 case AF_INET6: 573 { 574 struct ip6_hdr *ip6; 575 mtod(m, struct ip *)->ip_v = 6; 576 ip6 = mtod(m, struct ip6_hdr *); 577 ip6->ip6_nxt = IPPROTO_TCP; 578 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 579 ip6->ip6_src = in6p->in6p_laddr; 580 ip6->ip6_dst = in6p->in6p_faddr; 581 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK; 582 if (ip6_auto_flowlabel) { 583 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; 584 ip6->ip6_flow |= 585 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 586 } 587 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; 588 ip6->ip6_vfc |= IPV6_VERSION; 589 590 /* 591 * Compute the pseudo-header portion of the checksum 592 * now. We incrementally add in the TCP option and 593 * payload lengths later, and then compute the TCP 594 * checksum right before the packet is sent off onto 595 * the wire. 596 */ 597 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr, 598 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)), 599 htonl(IPPROTO_TCP)); 600 break; 601 } 602 #endif 603 } 604 if (inp) { 605 n->th_sport = inp->inp_lport; 606 n->th_dport = inp->inp_fport; 607 } 608 #ifdef INET6 609 else if (in6p) { 610 n->th_sport = in6p->in6p_lport; 611 n->th_dport = in6p->in6p_fport; 612 } 613 #endif 614 n->th_seq = 0; 615 n->th_ack = 0; 616 n->th_x2 = 0; 617 n->th_off = 5; 618 n->th_flags = 0; 619 n->th_win = 0; 620 n->th_urp = 0; 621 return (m); 622 } 623 624 /* 625 * Send a single message to the TCP at address specified by 626 * the given TCP/IP header. If m == 0, then we make a copy 627 * of the tcpiphdr at ti and send directly to the addressed host. 628 * This is used to force keep alive messages out using the TCP 629 * template for a connection tp->t_template. If flags are given 630 * then we send a message back to the TCP which originated the 631 * segment ti, and discard the mbuf containing it and any other 632 * attached mbufs. 633 * 634 * In any case the ack and sequence number of the transmitted 635 * segment are as specified by the parameters. 636 */ 637 int 638 tcp_respond(struct tcpcb *tp, struct mbuf *mtemplate, struct mbuf *m, 639 struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags) 640 { 641 #ifdef INET6 642 struct rtentry *rt; 643 #endif 644 struct route *ro; 645 int error, tlen, win = 0; 646 int hlen; 647 struct ip *ip; 648 #ifdef INET6 649 struct ip6_hdr *ip6; 650 #endif 651 int family; /* family on packet, not inpcb/in6pcb! */ 652 struct tcphdr *th; 653 struct socket *so; 654 655 if (tp != NULL && (flags & TH_RST) == 0) { 656 #ifdef DIAGNOSTIC 657 if (tp->t_inpcb && tp->t_in6pcb) 658 panic("tcp_respond: both t_inpcb and t_in6pcb are set"); 659 #endif 660 #ifdef INET 661 if (tp->t_inpcb) 662 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 663 #endif 664 #ifdef INET6 665 if (tp->t_in6pcb) 666 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv); 667 #endif 668 } 669 670 th = NULL; /* Quell uninitialized warning */ 671 ip = NULL; 672 #ifdef INET6 673 ip6 = NULL; 674 #endif 675 if (m == 0) { 676 if (!mtemplate) 677 return EINVAL; 678 679 /* get family information from template */ 680 switch (mtod(mtemplate, struct ip *)->ip_v) { 681 case 4: 682 family = AF_INET; 683 hlen = sizeof(struct ip); 684 break; 685 #ifdef INET6 686 case 6: 687 family = AF_INET6; 688 hlen = sizeof(struct ip6_hdr); 689 break; 690 #endif 691 default: 692 return EAFNOSUPPORT; 693 } 694 695 MGETHDR(m, M_DONTWAIT, MT_HEADER); 696 if (m) { 697 MCLAIM(m, &tcp_tx_mowner); 698 MCLGET(m, M_DONTWAIT); 699 if ((m->m_flags & M_EXT) == 0) { 700 m_free(m); 701 m = NULL; 702 } 703 } 704 if (m == NULL) 705 return (ENOBUFS); 706 707 if (tcp_compat_42) 708 tlen = 1; 709 else 710 tlen = 0; 711 712 m->m_data += max_linkhdr; 713 bcopy(mtod(mtemplate, void *), mtod(m, void *), 714 mtemplate->m_len); 715 switch (family) { 716 case AF_INET: 717 ip = mtod(m, struct ip *); 718 th = (struct tcphdr *)(ip + 1); 719 break; 720 #ifdef INET6 721 case AF_INET6: 722 ip6 = mtod(m, struct ip6_hdr *); 723 th = (struct tcphdr *)(ip6 + 1); 724 break; 725 #endif 726 #if 0 727 default: 728 /* noone will visit here */ 729 m_freem(m); 730 return EAFNOSUPPORT; 731 #endif 732 } 733 flags = TH_ACK; 734 } else { 735 736 if ((m->m_flags & M_PKTHDR) == 0) { 737 #if 0 738 printf("non PKTHDR to tcp_respond\n"); 739 #endif 740 m_freem(m); 741 return EINVAL; 742 } 743 #ifdef DIAGNOSTIC 744 if (!th0) 745 panic("th0 == NULL in tcp_respond"); 746 #endif 747 748 /* get family information from m */ 749 switch (mtod(m, struct ip *)->ip_v) { 750 case 4: 751 family = AF_INET; 752 hlen = sizeof(struct ip); 753 ip = mtod(m, struct ip *); 754 break; 755 #ifdef INET6 756 case 6: 757 family = AF_INET6; 758 hlen = sizeof(struct ip6_hdr); 759 ip6 = mtod(m, struct ip6_hdr *); 760 break; 761 #endif 762 default: 763 m_freem(m); 764 return EAFNOSUPPORT; 765 } 766 /* clear h/w csum flags inherited from rx packet */ 767 m->m_pkthdr.csum_flags = 0; 768 769 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2)) 770 tlen = sizeof(*th0); 771 else 772 tlen = th0->th_off << 2; 773 774 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 && 775 mtod(m, char *) + hlen == (char *)th0) { 776 m->m_len = hlen + tlen; 777 m_freem(m->m_next); 778 m->m_next = NULL; 779 } else { 780 struct mbuf *n; 781 782 #ifdef DIAGNOSTIC 783 if (max_linkhdr + hlen + tlen > MCLBYTES) { 784 m_freem(m); 785 return EMSGSIZE; 786 } 787 #endif 788 MGETHDR(n, M_DONTWAIT, MT_HEADER); 789 if (n && max_linkhdr + hlen + tlen > MHLEN) { 790 MCLGET(n, M_DONTWAIT); 791 if ((n->m_flags & M_EXT) == 0) { 792 m_freem(n); 793 n = NULL; 794 } 795 } 796 if (!n) { 797 m_freem(m); 798 return ENOBUFS; 799 } 800 801 MCLAIM(n, &tcp_tx_mowner); 802 n->m_data += max_linkhdr; 803 n->m_len = hlen + tlen; 804 m_copyback(n, 0, hlen, mtod(m, void *)); 805 m_copyback(n, hlen, tlen, (void *)th0); 806 807 m_freem(m); 808 m = n; 809 n = NULL; 810 } 811 812 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 813 switch (family) { 814 case AF_INET: 815 ip = mtod(m, struct ip *); 816 th = (struct tcphdr *)(ip + 1); 817 ip->ip_p = IPPROTO_TCP; 818 xchg(ip->ip_dst, ip->ip_src, struct in_addr); 819 ip->ip_p = IPPROTO_TCP; 820 break; 821 #ifdef INET6 822 case AF_INET6: 823 ip6 = mtod(m, struct ip6_hdr *); 824 th = (struct tcphdr *)(ip6 + 1); 825 ip6->ip6_nxt = IPPROTO_TCP; 826 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 827 ip6->ip6_nxt = IPPROTO_TCP; 828 break; 829 #endif 830 #if 0 831 default: 832 /* noone will visit here */ 833 m_freem(m); 834 return EAFNOSUPPORT; 835 #endif 836 } 837 xchg(th->th_dport, th->th_sport, u_int16_t); 838 #undef xchg 839 tlen = 0; /*be friendly with the following code*/ 840 } 841 th->th_seq = htonl(seq); 842 th->th_ack = htonl(ack); 843 th->th_x2 = 0; 844 if ((flags & TH_SYN) == 0) { 845 if (tp) 846 win >>= tp->rcv_scale; 847 if (win > TCP_MAXWIN) 848 win = TCP_MAXWIN; 849 th->th_win = htons((u_int16_t)win); 850 th->th_off = sizeof (struct tcphdr) >> 2; 851 tlen += sizeof(*th); 852 } else 853 tlen += th->th_off << 2; 854 m->m_len = hlen + tlen; 855 m->m_pkthdr.len = hlen + tlen; 856 m->m_pkthdr.rcvif = NULL; 857 th->th_flags = flags; 858 th->th_urp = 0; 859 860 switch (family) { 861 #ifdef INET 862 case AF_INET: 863 { 864 struct ipovly *ipov = (struct ipovly *)ip; 865 memset(ipov->ih_x1, 0, sizeof ipov->ih_x1); 866 ipov->ih_len = htons((u_int16_t)tlen); 867 868 th->th_sum = 0; 869 th->th_sum = in_cksum(m, hlen + tlen); 870 ip->ip_len = htons(hlen + tlen); 871 ip->ip_ttl = ip_defttl; 872 break; 873 } 874 #endif 875 #ifdef INET6 876 case AF_INET6: 877 { 878 th->th_sum = 0; 879 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 880 tlen); 881 ip6->ip6_plen = htons(tlen); 882 if (tp && tp->t_in6pcb) { 883 struct ifnet *oifp; 884 ro = &tp->t_in6pcb->in6p_route; 885 oifp = (rt = rtcache_validate(ro)) != NULL ? rt->rt_ifp 886 : NULL; 887 ip6->ip6_hlim = in6_selecthlim(tp->t_in6pcb, oifp); 888 } else 889 ip6->ip6_hlim = ip6_defhlim; 890 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK; 891 if (ip6_auto_flowlabel) { 892 ip6->ip6_flow |= 893 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 894 } 895 break; 896 } 897 #endif 898 } 899 900 if (tp && tp->t_inpcb) 901 so = tp->t_inpcb->inp_socket; 902 #ifdef INET6 903 else if (tp && tp->t_in6pcb) 904 so = tp->t_in6pcb->in6p_socket; 905 #endif 906 else 907 so = NULL; 908 909 if (tp != NULL && tp->t_inpcb != NULL) { 910 ro = &tp->t_inpcb->inp_route; 911 #ifdef DIAGNOSTIC 912 if (family != AF_INET) 913 panic("tcp_respond: address family mismatch"); 914 if (!in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr)) { 915 panic("tcp_respond: ip_dst %x != inp_faddr %x", 916 ntohl(ip->ip_dst.s_addr), 917 ntohl(tp->t_inpcb->inp_faddr.s_addr)); 918 } 919 #endif 920 } 921 #ifdef INET6 922 else if (tp != NULL && tp->t_in6pcb != NULL) { 923 ro = (struct route *)&tp->t_in6pcb->in6p_route; 924 #ifdef DIAGNOSTIC 925 if (family == AF_INET) { 926 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr)) 927 panic("tcp_respond: not mapped addr"); 928 if (memcmp(&ip->ip_dst, 929 &tp->t_in6pcb->in6p_faddr.s6_addr32[3], 930 sizeof(ip->ip_dst)) != 0) { 931 panic("tcp_respond: ip_dst != in6p_faddr"); 932 } 933 } else if (family == AF_INET6) { 934 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, 935 &tp->t_in6pcb->in6p_faddr)) 936 panic("tcp_respond: ip6_dst != in6p_faddr"); 937 } else 938 panic("tcp_respond: address family mismatch"); 939 #endif 940 } 941 #endif 942 else 943 ro = NULL; 944 945 switch (family) { 946 #ifdef INET 947 case AF_INET: 948 error = ip_output(m, NULL, ro, 949 (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL, so); 950 break; 951 #endif 952 #ifdef INET6 953 case AF_INET6: 954 error = ip6_output(m, NULL, ro, 0, NULL, so, NULL); 955 break; 956 #endif 957 default: 958 error = EAFNOSUPPORT; 959 break; 960 } 961 962 return (error); 963 } 964 965 /* 966 * Template TCPCB. Rather than zeroing a new TCPCB and initializing 967 * a bunch of members individually, we maintain this template for the 968 * static and mostly-static components of the TCPCB, and copy it into 969 * the new TCPCB instead. 970 */ 971 static struct tcpcb tcpcb_template = { 972 .t_srtt = TCPTV_SRTTBASE, 973 .t_rttmin = TCPTV_MIN, 974 975 .snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT, 976 .snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT, 977 .snd_numholes = 0, 978 .snd_cubic_wmax = 0, 979 .snd_cubic_wmax_last = 0, 980 .snd_cubic_ctime = 0, 981 982 .t_partialacks = -1, 983 .t_bytes_acked = 0, 984 }; 985 986 /* 987 * Updates the TCPCB template whenever a parameter that would affect 988 * the template is changed. 989 */ 990 void 991 tcp_tcpcb_template(void) 992 { 993 struct tcpcb *tp = &tcpcb_template; 994 int flags; 995 996 tp->t_peermss = tcp_mssdflt; 997 tp->t_ourmss = tcp_mssdflt; 998 tp->t_segsz = tcp_mssdflt; 999 1000 flags = 0; 1001 if (tcp_do_rfc1323 && tcp_do_win_scale) 1002 flags |= TF_REQ_SCALE; 1003 if (tcp_do_rfc1323 && tcp_do_timestamps) 1004 flags |= TF_REQ_TSTMP; 1005 tp->t_flags = flags; 1006 1007 /* 1008 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 1009 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives 1010 * reasonable initial retransmit time. 1011 */ 1012 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1); 1013 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 1014 TCPTV_MIN, TCPTV_REXMTMAX); 1015 1016 /* Keep Alive */ 1017 tp->t_keepinit = tcp_keepinit; 1018 tp->t_keepidle = tcp_keepidle; 1019 tp->t_keepintvl = tcp_keepintvl; 1020 tp->t_keepcnt = tcp_keepcnt; 1021 tp->t_maxidle = tp->t_keepcnt * tp->t_keepintvl; 1022 1023 /* MSL */ 1024 tp->t_msl = TCPTV_MSL; 1025 } 1026 1027 /* 1028 * Create a new TCP control block, making an 1029 * empty reassembly queue and hooking it to the argument 1030 * protocol control block. 1031 */ 1032 /* family selects inpcb, or in6pcb */ 1033 struct tcpcb * 1034 tcp_newtcpcb(int family, void *aux) 1035 { 1036 #ifdef INET6 1037 struct rtentry *rt; 1038 #endif 1039 struct tcpcb *tp; 1040 int i; 1041 1042 /* XXX Consider using a pool_cache for speed. */ 1043 tp = pool_get(&tcpcb_pool, PR_NOWAIT); /* splsoftnet via tcp_usrreq */ 1044 if (tp == NULL) 1045 return (NULL); 1046 memcpy(tp, &tcpcb_template, sizeof(*tp)); 1047 TAILQ_INIT(&tp->segq); 1048 TAILQ_INIT(&tp->timeq); 1049 tp->t_family = family; /* may be overridden later on */ 1050 TAILQ_INIT(&tp->snd_holes); 1051 LIST_INIT(&tp->t_sc); /* XXX can template this */ 1052 1053 /* Don't sweat this loop; hopefully the compiler will unroll it. */ 1054 for (i = 0; i < TCPT_NTIMERS; i++) { 1055 callout_init(&tp->t_timer[i], CALLOUT_MPSAFE); 1056 TCP_TIMER_INIT(tp, i); 1057 } 1058 callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE); 1059 1060 switch (family) { 1061 case AF_INET: 1062 { 1063 struct inpcb *inp = (struct inpcb *)aux; 1064 1065 inp->inp_ip.ip_ttl = ip_defttl; 1066 inp->inp_ppcb = (void *)tp; 1067 1068 tp->t_inpcb = inp; 1069 tp->t_mtudisc = ip_mtudisc; 1070 break; 1071 } 1072 #ifdef INET6 1073 case AF_INET6: 1074 { 1075 struct in6pcb *in6p = (struct in6pcb *)aux; 1076 1077 in6p->in6p_ip6.ip6_hlim = in6_selecthlim(in6p, 1078 (rt = rtcache_validate(&in6p->in6p_route)) != NULL 1079 ? rt->rt_ifp 1080 : NULL); 1081 in6p->in6p_ppcb = (void *)tp; 1082 1083 tp->t_in6pcb = in6p; 1084 /* for IPv6, always try to run path MTU discovery */ 1085 tp->t_mtudisc = 1; 1086 break; 1087 } 1088 #endif /* INET6 */ 1089 default: 1090 for (i = 0; i < TCPT_NTIMERS; i++) 1091 callout_destroy(&tp->t_timer[i]); 1092 callout_destroy(&tp->t_delack_ch); 1093 pool_put(&tcpcb_pool, tp); /* splsoftnet via tcp_usrreq */ 1094 return (NULL); 1095 } 1096 1097 /* 1098 * Initialize our timebase. When we send timestamps, we take 1099 * the delta from tcp_now -- this means each connection always 1100 * gets a timebase of 1, which makes it, among other things, 1101 * more difficult to determine how long a system has been up, 1102 * and thus how many TCP sequence increments have occurred. 1103 * 1104 * We start with 1, because 0 doesn't work with linux, which 1105 * considers timestamp 0 in a SYN packet as a bug and disables 1106 * timestamps. 1107 */ 1108 tp->ts_timebase = tcp_now - 1; 1109 1110 tcp_congctl_select(tp, tcp_congctl_global_name); 1111 1112 return (tp); 1113 } 1114 1115 /* 1116 * Drop a TCP connection, reporting 1117 * the specified error. If connection is synchronized, 1118 * then send a RST to peer. 1119 */ 1120 struct tcpcb * 1121 tcp_drop(struct tcpcb *tp, int errno) 1122 { 1123 struct socket *so = NULL; 1124 1125 #ifdef DIAGNOSTIC 1126 if (tp->t_inpcb && tp->t_in6pcb) 1127 panic("tcp_drop: both t_inpcb and t_in6pcb are set"); 1128 #endif 1129 #ifdef INET 1130 if (tp->t_inpcb) 1131 so = tp->t_inpcb->inp_socket; 1132 #endif 1133 #ifdef INET6 1134 if (tp->t_in6pcb) 1135 so = tp->t_in6pcb->in6p_socket; 1136 #endif 1137 if (!so) 1138 return NULL; 1139 1140 if (TCPS_HAVERCVDSYN(tp->t_state)) { 1141 tp->t_state = TCPS_CLOSED; 1142 (void) tcp_output(tp); 1143 TCP_STATINC(TCP_STAT_DROPS); 1144 } else 1145 TCP_STATINC(TCP_STAT_CONNDROPS); 1146 if (errno == ETIMEDOUT && tp->t_softerror) 1147 errno = tp->t_softerror; 1148 so->so_error = errno; 1149 return (tcp_close(tp)); 1150 } 1151 1152 /* 1153 * Close a TCP control block: 1154 * discard all space held by the tcp 1155 * discard internet protocol block 1156 * wake up any sleepers 1157 */ 1158 struct tcpcb * 1159 tcp_close(struct tcpcb *tp) 1160 { 1161 struct inpcb *inp; 1162 #ifdef INET6 1163 struct in6pcb *in6p; 1164 #endif 1165 struct socket *so; 1166 #ifdef RTV_RTT 1167 struct rtentry *rt; 1168 #endif 1169 struct route *ro; 1170 int j; 1171 1172 inp = tp->t_inpcb; 1173 #ifdef INET6 1174 in6p = tp->t_in6pcb; 1175 #endif 1176 so = NULL; 1177 ro = NULL; 1178 if (inp) { 1179 so = inp->inp_socket; 1180 ro = &inp->inp_route; 1181 } 1182 #ifdef INET6 1183 else if (in6p) { 1184 so = in6p->in6p_socket; 1185 ro = (struct route *)&in6p->in6p_route; 1186 } 1187 #endif 1188 1189 #ifdef RTV_RTT 1190 /* 1191 * If we sent enough data to get some meaningful characteristics, 1192 * save them in the routing entry. 'Enough' is arbitrarily 1193 * defined as the sendpipesize (default 4K) * 16. This would 1194 * give us 16 rtt samples assuming we only get one sample per 1195 * window (the usual case on a long haul net). 16 samples is 1196 * enough for the srtt filter to converge to within 5% of the correct 1197 * value; fewer samples and we could save a very bogus rtt. 1198 * 1199 * Don't update the default route's characteristics and don't 1200 * update anything that the user "locked". 1201 */ 1202 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) && 1203 ro && (rt = rtcache_validate(ro)) != NULL && 1204 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) { 1205 u_long i = 0; 1206 1207 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 1208 i = tp->t_srtt * 1209 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2)); 1210 if (rt->rt_rmx.rmx_rtt && i) 1211 /* 1212 * filter this update to half the old & half 1213 * the new values, converting scale. 1214 * See route.h and tcp_var.h for a 1215 * description of the scaling constants. 1216 */ 1217 rt->rt_rmx.rmx_rtt = 1218 (rt->rt_rmx.rmx_rtt + i) / 2; 1219 else 1220 rt->rt_rmx.rmx_rtt = i; 1221 } 1222 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 1223 i = tp->t_rttvar * 1224 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2)); 1225 if (rt->rt_rmx.rmx_rttvar && i) 1226 rt->rt_rmx.rmx_rttvar = 1227 (rt->rt_rmx.rmx_rttvar + i) / 2; 1228 else 1229 rt->rt_rmx.rmx_rttvar = i; 1230 } 1231 /* 1232 * update the pipelimit (ssthresh) if it has been updated 1233 * already or if a pipesize was specified & the threshhold 1234 * got below half the pipesize. I.e., wait for bad news 1235 * before we start updating, then update on both good 1236 * and bad news. 1237 */ 1238 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 1239 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) || 1240 i < (rt->rt_rmx.rmx_sendpipe / 2)) { 1241 /* 1242 * convert the limit from user data bytes to 1243 * packets then to packet data bytes. 1244 */ 1245 i = (i + tp->t_segsz / 2) / tp->t_segsz; 1246 if (i < 2) 1247 i = 2; 1248 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr)); 1249 if (rt->rt_rmx.rmx_ssthresh) 1250 rt->rt_rmx.rmx_ssthresh = 1251 (rt->rt_rmx.rmx_ssthresh + i) / 2; 1252 else 1253 rt->rt_rmx.rmx_ssthresh = i; 1254 } 1255 } 1256 #endif /* RTV_RTT */ 1257 /* free the reassembly queue, if any */ 1258 TCP_REASS_LOCK(tp); 1259 (void) tcp_freeq(tp); 1260 TCP_REASS_UNLOCK(tp); 1261 1262 /* free the SACK holes list. */ 1263 tcp_free_sackholes(tp); 1264 tcp_congctl_release(tp); 1265 syn_cache_cleanup(tp); 1266 1267 if (tp->t_template) { 1268 m_free(tp->t_template); 1269 tp->t_template = NULL; 1270 } 1271 1272 /* 1273 * Detaching the pcb will unlock the socket/tcpcb, and stopping 1274 * the timers can also drop the lock. We need to prevent access 1275 * to the tcpcb as it's half torn down. Flag the pcb as dead 1276 * (prevents access by timers) and only then detach it. 1277 */ 1278 tp->t_flags |= TF_DEAD; 1279 if (inp) { 1280 inp->inp_ppcb = 0; 1281 soisdisconnected(so); 1282 in_pcbdetach(inp); 1283 } 1284 #ifdef INET6 1285 else if (in6p) { 1286 in6p->in6p_ppcb = 0; 1287 soisdisconnected(so); 1288 in6_pcbdetach(in6p); 1289 } 1290 #endif 1291 /* 1292 * pcb is no longer visble elsewhere, so we can safely release 1293 * the lock in callout_halt() if needed. 1294 */ 1295 TCP_STATINC(TCP_STAT_CLOSED); 1296 for (j = 0; j < TCPT_NTIMERS; j++) { 1297 callout_halt(&tp->t_timer[j], softnet_lock); 1298 callout_destroy(&tp->t_timer[j]); 1299 } 1300 callout_halt(&tp->t_delack_ch, softnet_lock); 1301 callout_destroy(&tp->t_delack_ch); 1302 pool_put(&tcpcb_pool, tp); 1303 1304 return NULL; 1305 } 1306 1307 int 1308 tcp_freeq(struct tcpcb *tp) 1309 { 1310 struct ipqent *qe; 1311 int rv = 0; 1312 #ifdef TCPREASS_DEBUG 1313 int i = 0; 1314 #endif 1315 1316 TCP_REASS_LOCK_CHECK(tp); 1317 1318 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) { 1319 #ifdef TCPREASS_DEBUG 1320 printf("tcp_freeq[%p,%d]: %u:%u(%u) 0x%02x\n", 1321 tp, i++, qe->ipqe_seq, qe->ipqe_seq + qe->ipqe_len, 1322 qe->ipqe_len, qe->ipqe_flags & (TH_SYN|TH_FIN|TH_RST)); 1323 #endif 1324 TAILQ_REMOVE(&tp->segq, qe, ipqe_q); 1325 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq); 1326 m_freem(qe->ipqe_m); 1327 tcpipqent_free(qe); 1328 rv = 1; 1329 } 1330 tp->t_segqlen = 0; 1331 KASSERT(TAILQ_EMPTY(&tp->timeq)); 1332 return (rv); 1333 } 1334 1335 void 1336 tcp_fasttimo(void) 1337 { 1338 if (tcp_drainwanted) { 1339 tcp_drain(); 1340 tcp_drainwanted = 0; 1341 } 1342 } 1343 1344 void 1345 tcp_drainstub(void) 1346 { 1347 tcp_drainwanted = 1; 1348 } 1349 1350 /* 1351 * Protocol drain routine. Called when memory is in short supply. 1352 * Called from pr_fasttimo thus a callout context. 1353 */ 1354 void 1355 tcp_drain(void) 1356 { 1357 struct inpcb_hdr *inph; 1358 struct tcpcb *tp; 1359 1360 mutex_enter(softnet_lock); 1361 KERNEL_LOCK(1, NULL); 1362 1363 /* 1364 * Free the sequence queue of all TCP connections. 1365 */ 1366 TAILQ_FOREACH(inph, &tcbtable.inpt_queue, inph_queue) { 1367 switch (inph->inph_af) { 1368 case AF_INET: 1369 tp = intotcpcb((struct inpcb *)inph); 1370 break; 1371 #ifdef INET6 1372 case AF_INET6: 1373 tp = in6totcpcb((struct in6pcb *)inph); 1374 break; 1375 #endif 1376 default: 1377 tp = NULL; 1378 break; 1379 } 1380 if (tp != NULL) { 1381 /* 1382 * We may be called from a device's interrupt 1383 * context. If the tcpcb is already busy, 1384 * just bail out now. 1385 */ 1386 if (tcp_reass_lock_try(tp) == 0) 1387 continue; 1388 if (tcp_freeq(tp)) 1389 TCP_STATINC(TCP_STAT_CONNSDRAINED); 1390 TCP_REASS_UNLOCK(tp); 1391 } 1392 } 1393 1394 KERNEL_UNLOCK_ONE(NULL); 1395 mutex_exit(softnet_lock); 1396 } 1397 1398 /* 1399 * Notify a tcp user of an asynchronous error; 1400 * store error as soft error, but wake up user 1401 * (for now, won't do anything until can select for soft error). 1402 */ 1403 void 1404 tcp_notify(struct inpcb *inp, int error) 1405 { 1406 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 1407 struct socket *so = inp->inp_socket; 1408 1409 /* 1410 * Ignore some errors if we are hooked up. 1411 * If connection hasn't completed, has retransmitted several times, 1412 * and receives a second error, give up now. This is better 1413 * than waiting a long time to establish a connection that 1414 * can never complete. 1415 */ 1416 if (tp->t_state == TCPS_ESTABLISHED && 1417 (error == EHOSTUNREACH || error == ENETUNREACH || 1418 error == EHOSTDOWN)) { 1419 return; 1420 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 && 1421 tp->t_rxtshift > 3 && tp->t_softerror) 1422 so->so_error = error; 1423 else 1424 tp->t_softerror = error; 1425 cv_broadcast(&so->so_cv); 1426 sorwakeup(so); 1427 sowwakeup(so); 1428 } 1429 1430 #ifdef INET6 1431 void 1432 tcp6_notify(struct in6pcb *in6p, int error) 1433 { 1434 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb; 1435 struct socket *so = in6p->in6p_socket; 1436 1437 /* 1438 * Ignore some errors if we are hooked up. 1439 * If connection hasn't completed, has retransmitted several times, 1440 * and receives a second error, give up now. This is better 1441 * than waiting a long time to establish a connection that 1442 * can never complete. 1443 */ 1444 if (tp->t_state == TCPS_ESTABLISHED && 1445 (error == EHOSTUNREACH || error == ENETUNREACH || 1446 error == EHOSTDOWN)) { 1447 return; 1448 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 && 1449 tp->t_rxtshift > 3 && tp->t_softerror) 1450 so->so_error = error; 1451 else 1452 tp->t_softerror = error; 1453 cv_broadcast(&so->so_cv); 1454 sorwakeup(so); 1455 sowwakeup(so); 1456 } 1457 #endif 1458 1459 #ifdef INET6 1460 void * 1461 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d) 1462 { 1463 struct tcphdr th; 1464 void (*notify)(struct in6pcb *, int) = tcp6_notify; 1465 int nmatch; 1466 struct ip6_hdr *ip6; 1467 const struct sockaddr_in6 *sa6_src = NULL; 1468 const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa; 1469 struct mbuf *m; 1470 int off; 1471 1472 if (sa->sa_family != AF_INET6 || 1473 sa->sa_len != sizeof(struct sockaddr_in6)) 1474 return NULL; 1475 if ((unsigned)cmd >= PRC_NCMDS) 1476 return NULL; 1477 else if (cmd == PRC_QUENCH) { 1478 /* 1479 * Don't honor ICMP Source Quench messages meant for 1480 * TCP connections. 1481 */ 1482 return NULL; 1483 } else if (PRC_IS_REDIRECT(cmd)) 1484 notify = in6_rtchange, d = NULL; 1485 else if (cmd == PRC_MSGSIZE) 1486 ; /* special code is present, see below */ 1487 else if (cmd == PRC_HOSTDEAD) 1488 d = NULL; 1489 else if (inet6ctlerrmap[cmd] == 0) 1490 return NULL; 1491 1492 /* if the parameter is from icmp6, decode it. */ 1493 if (d != NULL) { 1494 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d; 1495 m = ip6cp->ip6c_m; 1496 ip6 = ip6cp->ip6c_ip6; 1497 off = ip6cp->ip6c_off; 1498 sa6_src = ip6cp->ip6c_src; 1499 } else { 1500 m = NULL; 1501 ip6 = NULL; 1502 sa6_src = &sa6_any; 1503 off = 0; 1504 } 1505 1506 if (ip6) { 1507 /* 1508 * XXX: We assume that when ip6 is non NULL, 1509 * M and OFF are valid. 1510 */ 1511 1512 /* check if we can safely examine src and dst ports */ 1513 if (m->m_pkthdr.len < off + sizeof(th)) { 1514 if (cmd == PRC_MSGSIZE) 1515 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0); 1516 return NULL; 1517 } 1518 1519 memset(&th, 0, sizeof(th)); 1520 m_copydata(m, off, sizeof(th), (void *)&th); 1521 1522 if (cmd == PRC_MSGSIZE) { 1523 int valid = 0; 1524 1525 /* 1526 * Check to see if we have a valid TCP connection 1527 * corresponding to the address in the ICMPv6 message 1528 * payload. 1529 */ 1530 if (in6_pcblookup_connect(&tcbtable, &sa6->sin6_addr, 1531 th.th_dport, 1532 (const struct in6_addr *)&sa6_src->sin6_addr, 1533 th.th_sport, 0, 0)) 1534 valid++; 1535 1536 /* 1537 * Depending on the value of "valid" and routing table 1538 * size (mtudisc_{hi,lo}wat), we will: 1539 * - recalcurate the new MTU and create the 1540 * corresponding routing entry, or 1541 * - ignore the MTU change notification. 1542 */ 1543 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid); 1544 1545 /* 1546 * no need to call in6_pcbnotify, it should have been 1547 * called via callback if necessary 1548 */ 1549 return NULL; 1550 } 1551 1552 nmatch = in6_pcbnotify(&tcbtable, sa, th.th_dport, 1553 (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify); 1554 if (nmatch == 0 && syn_cache_count && 1555 (inet6ctlerrmap[cmd] == EHOSTUNREACH || 1556 inet6ctlerrmap[cmd] == ENETUNREACH || 1557 inet6ctlerrmap[cmd] == EHOSTDOWN)) 1558 syn_cache_unreach((const struct sockaddr *)sa6_src, 1559 sa, &th); 1560 } else { 1561 (void) in6_pcbnotify(&tcbtable, sa, 0, 1562 (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify); 1563 } 1564 1565 return NULL; 1566 } 1567 #endif 1568 1569 #ifdef INET 1570 /* assumes that ip header and tcp header are contiguous on mbuf */ 1571 void * 1572 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v) 1573 { 1574 struct ip *ip = v; 1575 struct tcphdr *th; 1576 struct icmp *icp; 1577 extern const int inetctlerrmap[]; 1578 void (*notify)(struct inpcb *, int) = tcp_notify; 1579 int errno; 1580 int nmatch; 1581 struct tcpcb *tp; 1582 u_int mtu; 1583 tcp_seq seq; 1584 struct inpcb *inp; 1585 #ifdef INET6 1586 struct in6pcb *in6p; 1587 struct in6_addr src6, dst6; 1588 #endif 1589 1590 if (sa->sa_family != AF_INET || 1591 sa->sa_len != sizeof(struct sockaddr_in)) 1592 return NULL; 1593 if ((unsigned)cmd >= PRC_NCMDS) 1594 return NULL; 1595 errno = inetctlerrmap[cmd]; 1596 if (cmd == PRC_QUENCH) 1597 /* 1598 * Don't honor ICMP Source Quench messages meant for 1599 * TCP connections. 1600 */ 1601 return NULL; 1602 else if (PRC_IS_REDIRECT(cmd)) 1603 notify = in_rtchange, ip = 0; 1604 else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) { 1605 /* 1606 * Check to see if we have a valid TCP connection 1607 * corresponding to the address in the ICMP message 1608 * payload. 1609 * 1610 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN. 1611 */ 1612 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2)); 1613 #ifdef INET6 1614 memset(&src6, 0, sizeof(src6)); 1615 memset(&dst6, 0, sizeof(dst6)); 1616 src6.s6_addr16[5] = dst6.s6_addr16[5] = 0xffff; 1617 memcpy(&src6.s6_addr32[3], &ip->ip_src, sizeof(struct in_addr)); 1618 memcpy(&dst6.s6_addr32[3], &ip->ip_dst, sizeof(struct in_addr)); 1619 #endif 1620 if ((inp = in_pcblookup_connect(&tcbtable, ip->ip_dst, 1621 th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL) 1622 #ifdef INET6 1623 in6p = NULL; 1624 #else 1625 ; 1626 #endif 1627 #ifdef INET6 1628 else if ((in6p = in6_pcblookup_connect(&tcbtable, &dst6, 1629 th->th_dport, &src6, th->th_sport, 0, 0)) != NULL) 1630 ; 1631 #endif 1632 else 1633 return NULL; 1634 1635 /* 1636 * Now that we've validated that we are actually communicating 1637 * with the host indicated in the ICMP message, locate the 1638 * ICMP header, recalculate the new MTU, and create the 1639 * corresponding routing entry. 1640 */ 1641 icp = (struct icmp *)((char *)ip - 1642 offsetof(struct icmp, icmp_ip)); 1643 if (inp) { 1644 if ((tp = intotcpcb(inp)) == NULL) 1645 return NULL; 1646 } 1647 #ifdef INET6 1648 else if (in6p) { 1649 if ((tp = in6totcpcb(in6p)) == NULL) 1650 return NULL; 1651 } 1652 #endif 1653 else 1654 return NULL; 1655 seq = ntohl(th->th_seq); 1656 if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max)) 1657 return NULL; 1658 /* 1659 * If the ICMP message advertises a Next-Hop MTU 1660 * equal or larger than the maximum packet size we have 1661 * ever sent, drop the message. 1662 */ 1663 mtu = (u_int)ntohs(icp->icmp_nextmtu); 1664 if (mtu >= tp->t_pmtud_mtu_sent) 1665 return NULL; 1666 if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) { 1667 /* 1668 * Calculate new MTU, and create corresponding 1669 * route (traditional PMTUD). 1670 */ 1671 tp->t_flags &= ~TF_PMTUD_PEND; 1672 icmp_mtudisc(icp, ip->ip_dst); 1673 } else { 1674 /* 1675 * Record the information got in the ICMP 1676 * message; act on it later. 1677 * If we had already recorded an ICMP message, 1678 * replace the old one only if the new message 1679 * refers to an older TCP segment 1680 */ 1681 if (tp->t_flags & TF_PMTUD_PEND) { 1682 if (SEQ_LT(tp->t_pmtud_th_seq, seq)) 1683 return NULL; 1684 } else 1685 tp->t_flags |= TF_PMTUD_PEND; 1686 tp->t_pmtud_th_seq = seq; 1687 tp->t_pmtud_nextmtu = icp->icmp_nextmtu; 1688 tp->t_pmtud_ip_len = icp->icmp_ip.ip_len; 1689 tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl; 1690 } 1691 return NULL; 1692 } else if (cmd == PRC_HOSTDEAD) 1693 ip = 0; 1694 else if (errno == 0) 1695 return NULL; 1696 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) { 1697 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2)); 1698 nmatch = in_pcbnotify(&tcbtable, satocsin(sa)->sin_addr, 1699 th->th_dport, ip->ip_src, th->th_sport, errno, notify); 1700 if (nmatch == 0 && syn_cache_count && 1701 (inetctlerrmap[cmd] == EHOSTUNREACH || 1702 inetctlerrmap[cmd] == ENETUNREACH || 1703 inetctlerrmap[cmd] == EHOSTDOWN)) { 1704 struct sockaddr_in sin; 1705 memset(&sin, 0, sizeof(sin)); 1706 sin.sin_len = sizeof(sin); 1707 sin.sin_family = AF_INET; 1708 sin.sin_port = th->th_sport; 1709 sin.sin_addr = ip->ip_src; 1710 syn_cache_unreach((struct sockaddr *)&sin, sa, th); 1711 } 1712 1713 /* XXX mapped address case */ 1714 } else 1715 in_pcbnotifyall(&tcbtable, satocsin(sa)->sin_addr, errno, 1716 notify); 1717 return NULL; 1718 } 1719 1720 /* 1721 * When a source quench is received, we are being notified of congestion. 1722 * Close the congestion window down to the Loss Window (one segment). 1723 * We will gradually open it again as we proceed. 1724 */ 1725 void 1726 tcp_quench(struct inpcb *inp, int errno) 1727 { 1728 struct tcpcb *tp = intotcpcb(inp); 1729 1730 if (tp) { 1731 tp->snd_cwnd = tp->t_segsz; 1732 tp->t_bytes_acked = 0; 1733 } 1734 } 1735 #endif 1736 1737 #ifdef INET6 1738 void 1739 tcp6_quench(struct in6pcb *in6p, int errno) 1740 { 1741 struct tcpcb *tp = in6totcpcb(in6p); 1742 1743 if (tp) { 1744 tp->snd_cwnd = tp->t_segsz; 1745 tp->t_bytes_acked = 0; 1746 } 1747 } 1748 #endif 1749 1750 #ifdef INET 1751 /* 1752 * Path MTU Discovery handlers. 1753 */ 1754 void 1755 tcp_mtudisc_callback(struct in_addr faddr) 1756 { 1757 #ifdef INET6 1758 struct in6_addr in6; 1759 #endif 1760 1761 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc); 1762 #ifdef INET6 1763 memset(&in6, 0, sizeof(in6)); 1764 in6.s6_addr16[5] = 0xffff; 1765 memcpy(&in6.s6_addr32[3], &faddr, sizeof(struct in_addr)); 1766 tcp6_mtudisc_callback(&in6); 1767 #endif 1768 } 1769 1770 /* 1771 * On receipt of path MTU corrections, flush old route and replace it 1772 * with the new one. Retransmit all unacknowledged packets, to ensure 1773 * that all packets will be received. 1774 */ 1775 void 1776 tcp_mtudisc(struct inpcb *inp, int errno) 1777 { 1778 struct tcpcb *tp = intotcpcb(inp); 1779 struct rtentry *rt = in_pcbrtentry(inp); 1780 1781 if (tp != 0) { 1782 if (rt != 0) { 1783 /* 1784 * If this was not a host route, remove and realloc. 1785 */ 1786 if ((rt->rt_flags & RTF_HOST) == 0) { 1787 in_rtchange(inp, errno); 1788 if ((rt = in_pcbrtentry(inp)) == 0) 1789 return; 1790 } 1791 1792 /* 1793 * Slow start out of the error condition. We 1794 * use the MTU because we know it's smaller 1795 * than the previously transmitted segment. 1796 * 1797 * Note: This is more conservative than the 1798 * suggestion in draft-floyd-incr-init-win-03. 1799 */ 1800 if (rt->rt_rmx.rmx_mtu != 0) 1801 tp->snd_cwnd = 1802 TCP_INITIAL_WINDOW(tcp_init_win, 1803 rt->rt_rmx.rmx_mtu); 1804 } 1805 1806 /* 1807 * Resend unacknowledged packets. 1808 */ 1809 tp->snd_nxt = tp->sack_newdata = tp->snd_una; 1810 tcp_output(tp); 1811 } 1812 } 1813 #endif 1814 1815 #ifdef INET6 1816 /* 1817 * Path MTU Discovery handlers. 1818 */ 1819 void 1820 tcp6_mtudisc_callback(struct in6_addr *faddr) 1821 { 1822 struct sockaddr_in6 sin6; 1823 1824 memset(&sin6, 0, sizeof(sin6)); 1825 sin6.sin6_family = AF_INET6; 1826 sin6.sin6_len = sizeof(struct sockaddr_in6); 1827 sin6.sin6_addr = *faddr; 1828 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0, 1829 (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc); 1830 } 1831 1832 void 1833 tcp6_mtudisc(struct in6pcb *in6p, int errno) 1834 { 1835 struct tcpcb *tp = in6totcpcb(in6p); 1836 struct rtentry *rt = in6_pcbrtentry(in6p); 1837 1838 if (tp != 0) { 1839 if (rt != 0) { 1840 /* 1841 * If this was not a host route, remove and realloc. 1842 */ 1843 if ((rt->rt_flags & RTF_HOST) == 0) { 1844 in6_rtchange(in6p, errno); 1845 if ((rt = in6_pcbrtentry(in6p)) == 0) 1846 return; 1847 } 1848 1849 /* 1850 * Slow start out of the error condition. We 1851 * use the MTU because we know it's smaller 1852 * than the previously transmitted segment. 1853 * 1854 * Note: This is more conservative than the 1855 * suggestion in draft-floyd-incr-init-win-03. 1856 */ 1857 if (rt->rt_rmx.rmx_mtu != 0) 1858 tp->snd_cwnd = 1859 TCP_INITIAL_WINDOW(tcp_init_win, 1860 rt->rt_rmx.rmx_mtu); 1861 } 1862 1863 /* 1864 * Resend unacknowledged packets. 1865 */ 1866 tp->snd_nxt = tp->sack_newdata = tp->snd_una; 1867 tcp_output(tp); 1868 } 1869 } 1870 #endif /* INET6 */ 1871 1872 /* 1873 * Compute the MSS to advertise to the peer. Called only during 1874 * the 3-way handshake. If we are the server (peer initiated 1875 * connection), we are called with a pointer to the interface 1876 * on which the SYN packet arrived. If we are the client (we 1877 * initiated connection), we are called with a pointer to the 1878 * interface out which this connection should go. 1879 * 1880 * NOTE: Do not subtract IP option/extension header size nor IPsec 1881 * header size from MSS advertisement. MSS option must hold the maximum 1882 * segment size we can accept, so it must always be: 1883 * max(if mtu) - ip header - tcp header 1884 */ 1885 u_long 1886 tcp_mss_to_advertise(const struct ifnet *ifp, int af) 1887 { 1888 extern u_long in_maxmtu; 1889 u_long mss = 0; 1890 u_long hdrsiz; 1891 1892 /* 1893 * In order to avoid defeating path MTU discovery on the peer, 1894 * we advertise the max MTU of all attached networks as our MSS, 1895 * per RFC 1191, section 3.1. 1896 * 1897 * We provide the option to advertise just the MTU of 1898 * the interface on which we hope this connection will 1899 * be receiving. If we are responding to a SYN, we 1900 * will have a pretty good idea about this, but when 1901 * initiating a connection there is a bit more doubt. 1902 * 1903 * We also need to ensure that loopback has a large enough 1904 * MSS, as the loopback MTU is never included in in_maxmtu. 1905 */ 1906 1907 if (ifp != NULL) 1908 switch (af) { 1909 case AF_INET: 1910 mss = ifp->if_mtu; 1911 break; 1912 #ifdef INET6 1913 case AF_INET6: 1914 mss = IN6_LINKMTU(ifp); 1915 break; 1916 #endif 1917 } 1918 1919 if (tcp_mss_ifmtu == 0) 1920 switch (af) { 1921 case AF_INET: 1922 mss = max(in_maxmtu, mss); 1923 break; 1924 #ifdef INET6 1925 case AF_INET6: 1926 mss = max(in6_maxmtu, mss); 1927 break; 1928 #endif 1929 } 1930 1931 switch (af) { 1932 case AF_INET: 1933 hdrsiz = sizeof(struct ip); 1934 break; 1935 #ifdef INET6 1936 case AF_INET6: 1937 hdrsiz = sizeof(struct ip6_hdr); 1938 break; 1939 #endif 1940 default: 1941 hdrsiz = 0; 1942 break; 1943 } 1944 hdrsiz += sizeof(struct tcphdr); 1945 if (mss > hdrsiz) 1946 mss -= hdrsiz; 1947 1948 mss = max(tcp_mssdflt, mss); 1949 return (mss); 1950 } 1951 1952 /* 1953 * Set connection variables based on the peer's advertised MSS. 1954 * We are passed the TCPCB for the actual connection. If we 1955 * are the server, we are called by the compressed state engine 1956 * when the 3-way handshake is complete. If we are the client, 1957 * we are called when we receive the SYN,ACK from the server. 1958 * 1959 * NOTE: Our advertised MSS value must be initialized in the TCPCB 1960 * before this routine is called! 1961 */ 1962 void 1963 tcp_mss_from_peer(struct tcpcb *tp, int offer) 1964 { 1965 struct socket *so; 1966 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 1967 struct rtentry *rt; 1968 #endif 1969 u_long bufsize; 1970 int mss; 1971 1972 #ifdef DIAGNOSTIC 1973 if (tp->t_inpcb && tp->t_in6pcb) 1974 panic("tcp_mss_from_peer: both t_inpcb and t_in6pcb are set"); 1975 #endif 1976 so = NULL; 1977 rt = NULL; 1978 #ifdef INET 1979 if (tp->t_inpcb) { 1980 so = tp->t_inpcb->inp_socket; 1981 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 1982 rt = in_pcbrtentry(tp->t_inpcb); 1983 #endif 1984 } 1985 #endif 1986 #ifdef INET6 1987 if (tp->t_in6pcb) { 1988 so = tp->t_in6pcb->in6p_socket; 1989 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 1990 rt = in6_pcbrtentry(tp->t_in6pcb); 1991 #endif 1992 } 1993 #endif 1994 1995 /* 1996 * As per RFC1122, use the default MSS value, unless they 1997 * sent us an offer. Do not accept offers less than 256 bytes. 1998 */ 1999 mss = tcp_mssdflt; 2000 if (offer) 2001 mss = offer; 2002 mss = max(mss, 256); /* sanity */ 2003 tp->t_peermss = mss; 2004 mss -= tcp_optlen(tp); 2005 #ifdef INET 2006 if (tp->t_inpcb) 2007 mss -= ip_optlen(tp->t_inpcb); 2008 #endif 2009 #ifdef INET6 2010 if (tp->t_in6pcb) 2011 mss -= ip6_optlen(tp->t_in6pcb); 2012 #endif 2013 2014 /* 2015 * If there's a pipesize, change the socket buffer to that size. 2016 * Make the socket buffer an integral number of MSS units. If 2017 * the MSS is larger than the socket buffer, artificially decrease 2018 * the MSS. 2019 */ 2020 #ifdef RTV_SPIPE 2021 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0) 2022 bufsize = rt->rt_rmx.rmx_sendpipe; 2023 else 2024 #endif 2025 { 2026 KASSERT(so != NULL); 2027 bufsize = so->so_snd.sb_hiwat; 2028 } 2029 if (bufsize < mss) 2030 mss = bufsize; 2031 else { 2032 bufsize = roundup(bufsize, mss); 2033 if (bufsize > sb_max) 2034 bufsize = sb_max; 2035 (void) sbreserve(&so->so_snd, bufsize, so); 2036 } 2037 tp->t_segsz = mss; 2038 2039 #ifdef RTV_SSTHRESH 2040 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) { 2041 /* 2042 * There's some sort of gateway or interface buffer 2043 * limit on the path. Use this to set the slow 2044 * start threshold, but set the threshold to no less 2045 * than 2 * MSS. 2046 */ 2047 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 2048 } 2049 #endif 2050 } 2051 2052 /* 2053 * Processing necessary when a TCP connection is established. 2054 */ 2055 void 2056 tcp_established(struct tcpcb *tp) 2057 { 2058 struct socket *so; 2059 #ifdef RTV_RPIPE 2060 struct rtentry *rt; 2061 #endif 2062 u_long bufsize; 2063 2064 #ifdef DIAGNOSTIC 2065 if (tp->t_inpcb && tp->t_in6pcb) 2066 panic("tcp_established: both t_inpcb and t_in6pcb are set"); 2067 #endif 2068 so = NULL; 2069 rt = NULL; 2070 #ifdef INET 2071 /* This is a while() to reduce the dreadful stairstepping below */ 2072 while (tp->t_inpcb) { 2073 so = tp->t_inpcb->inp_socket; 2074 #if defined(RTV_RPIPE) 2075 rt = in_pcbrtentry(tp->t_inpcb); 2076 #endif 2077 if (__predict_true(tcp_msl_enable)) { 2078 if (tp->t_inpcb->inp_laddr.s_addr == INADDR_LOOPBACK) { 2079 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2); 2080 break; 2081 } 2082 2083 if (__predict_false(tcp_rttlocal)) { 2084 /* This may be adjusted by tcp_input */ 2085 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2086 break; 2087 } 2088 if (in_localaddr(tp->t_inpcb->inp_faddr)) { 2089 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2090 break; 2091 } 2092 } 2093 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL; 2094 break; 2095 } 2096 #endif 2097 #ifdef INET6 2098 /* The !tp->t_inpcb lets the compiler know it can't be v4 *and* v6 */ 2099 while (!tp->t_inpcb && tp->t_in6pcb) { 2100 so = tp->t_in6pcb->in6p_socket; 2101 #if defined(RTV_RPIPE) 2102 rt = in6_pcbrtentry(tp->t_in6pcb); 2103 #endif 2104 if (__predict_true(tcp_msl_enable)) { 2105 extern const struct in6_addr in6addr_loopback; 2106 2107 if (IN6_ARE_ADDR_EQUAL(&tp->t_in6pcb->in6p_laddr, 2108 &in6addr_loopback)) { 2109 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2); 2110 break; 2111 } 2112 2113 if (__predict_false(tcp_rttlocal)) { 2114 /* This may be adjusted by tcp_input */ 2115 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2116 break; 2117 } 2118 if (in6_localaddr(&tp->t_in6pcb->in6p_faddr)) { 2119 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1); 2120 break; 2121 } 2122 } 2123 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL; 2124 break; 2125 } 2126 #endif 2127 2128 tp->t_state = TCPS_ESTABLISHED; 2129 TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle); 2130 2131 #ifdef RTV_RPIPE 2132 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0) 2133 bufsize = rt->rt_rmx.rmx_recvpipe; 2134 else 2135 #endif 2136 { 2137 KASSERT(so != NULL); 2138 bufsize = so->so_rcv.sb_hiwat; 2139 } 2140 if (bufsize > tp->t_ourmss) { 2141 bufsize = roundup(bufsize, tp->t_ourmss); 2142 if (bufsize > sb_max) 2143 bufsize = sb_max; 2144 (void) sbreserve(&so->so_rcv, bufsize, so); 2145 } 2146 } 2147 2148 /* 2149 * Check if there's an initial rtt or rttvar. Convert from the 2150 * route-table units to scaled multiples of the slow timeout timer. 2151 * Called only during the 3-way handshake. 2152 */ 2153 void 2154 tcp_rmx_rtt(struct tcpcb *tp) 2155 { 2156 #ifdef RTV_RTT 2157 struct rtentry *rt = NULL; 2158 int rtt; 2159 2160 #ifdef DIAGNOSTIC 2161 if (tp->t_inpcb && tp->t_in6pcb) 2162 panic("tcp_rmx_rtt: both t_inpcb and t_in6pcb are set"); 2163 #endif 2164 #ifdef INET 2165 if (tp->t_inpcb) 2166 rt = in_pcbrtentry(tp->t_inpcb); 2167 #endif 2168 #ifdef INET6 2169 if (tp->t_in6pcb) 2170 rt = in6_pcbrtentry(tp->t_in6pcb); 2171 #endif 2172 if (rt == NULL) 2173 return; 2174 2175 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 2176 /* 2177 * XXX The lock bit for MTU indicates that the value 2178 * is also a minimum value; this is subject to time. 2179 */ 2180 if (rt->rt_rmx.rmx_locks & RTV_RTT) 2181 TCPT_RANGESET(tp->t_rttmin, 2182 rtt / (RTM_RTTUNIT / PR_SLOWHZ), 2183 TCPTV_MIN, TCPTV_REXMTMAX); 2184 tp->t_srtt = rtt / 2185 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2)); 2186 if (rt->rt_rmx.rmx_rttvar) { 2187 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 2188 ((RTM_RTTUNIT / PR_SLOWHZ) >> 2189 (TCP_RTTVAR_SHIFT + 2)); 2190 } else { 2191 /* Default variation is +- 1 rtt */ 2192 tp->t_rttvar = 2193 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT); 2194 } 2195 TCPT_RANGESET(tp->t_rxtcur, 2196 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2), 2197 tp->t_rttmin, TCPTV_REXMTMAX); 2198 } 2199 #endif 2200 } 2201 2202 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */ 2203 u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */ 2204 2205 /* 2206 * Get a new sequence value given a tcp control block 2207 */ 2208 tcp_seq 2209 tcp_new_iss(struct tcpcb *tp, tcp_seq addin) 2210 { 2211 2212 #ifdef INET 2213 if (tp->t_inpcb != NULL) { 2214 return (tcp_new_iss1(&tp->t_inpcb->inp_laddr, 2215 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport, 2216 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr), 2217 addin)); 2218 } 2219 #endif 2220 #ifdef INET6 2221 if (tp->t_in6pcb != NULL) { 2222 return (tcp_new_iss1(&tp->t_in6pcb->in6p_laddr, 2223 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport, 2224 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr), 2225 addin)); 2226 } 2227 #endif 2228 /* Not possible. */ 2229 panic("tcp_new_iss"); 2230 } 2231 2232 /* 2233 * This routine actually generates a new TCP initial sequence number. 2234 */ 2235 tcp_seq 2236 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport, 2237 size_t addrsz, tcp_seq addin) 2238 { 2239 tcp_seq tcp_iss; 2240 2241 static bool tcp_iss_gotten_secret; 2242 2243 /* 2244 * If we haven't been here before, initialize our cryptographic 2245 * hash secret. 2246 */ 2247 if (tcp_iss_gotten_secret == false) { 2248 cprng_strong(kern_cprng, 2249 tcp_iss_secret, sizeof(tcp_iss_secret), FASYNC); 2250 tcp_iss_gotten_secret = true; 2251 } 2252 2253 if (tcp_do_rfc1948) { 2254 MD5_CTX ctx; 2255 u_int8_t hash[16]; /* XXX MD5 knowledge */ 2256 2257 /* 2258 * Compute the base value of the ISS. It is a hash 2259 * of (saddr, sport, daddr, dport, secret). 2260 */ 2261 MD5Init(&ctx); 2262 2263 MD5Update(&ctx, (u_char *) laddr, addrsz); 2264 MD5Update(&ctx, (u_char *) &lport, sizeof(lport)); 2265 2266 MD5Update(&ctx, (u_char *) faddr, addrsz); 2267 MD5Update(&ctx, (u_char *) &fport, sizeof(fport)); 2268 2269 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret)); 2270 2271 MD5Final(hash, &ctx); 2272 2273 memcpy(&tcp_iss, hash, sizeof(tcp_iss)); 2274 2275 /* 2276 * Now increment our "timer", and add it in to 2277 * the computed value. 2278 * 2279 * XXX Use `addin'? 2280 * XXX TCP_ISSINCR too large to use? 2281 */ 2282 tcp_iss_seq += TCP_ISSINCR; 2283 #ifdef TCPISS_DEBUG 2284 printf("ISS hash 0x%08x, ", tcp_iss); 2285 #endif 2286 tcp_iss += tcp_iss_seq + addin; 2287 #ifdef TCPISS_DEBUG 2288 printf("new ISS 0x%08x\n", tcp_iss); 2289 #endif 2290 } else { 2291 /* 2292 * Randomize. 2293 */ 2294 tcp_iss = cprng_fast32(); 2295 2296 /* 2297 * If we were asked to add some amount to a known value, 2298 * we will take a random value obtained above, mask off 2299 * the upper bits, and add in the known value. We also 2300 * add in a constant to ensure that we are at least a 2301 * certain distance from the original value. 2302 * 2303 * This is used when an old connection is in timed wait 2304 * and we have a new one coming in, for instance. 2305 */ 2306 if (addin != 0) { 2307 #ifdef TCPISS_DEBUG 2308 printf("Random %08x, ", tcp_iss); 2309 #endif 2310 tcp_iss &= TCP_ISS_RANDOM_MASK; 2311 tcp_iss += addin + TCP_ISSINCR; 2312 #ifdef TCPISS_DEBUG 2313 printf("Old ISS %08x, ISS %08x\n", addin, tcp_iss); 2314 #endif 2315 } else { 2316 tcp_iss &= TCP_ISS_RANDOM_MASK; 2317 tcp_iss += tcp_iss_seq; 2318 tcp_iss_seq += TCP_ISSINCR; 2319 #ifdef TCPISS_DEBUG 2320 printf("ISS %08x\n", tcp_iss); 2321 #endif 2322 } 2323 } 2324 2325 if (tcp_compat_42) { 2326 /* 2327 * Limit it to the positive range for really old TCP 2328 * implementations. 2329 * Just AND off the top bit instead of checking if 2330 * is set first - saves a branch 50% of the time. 2331 */ 2332 tcp_iss &= 0x7fffffff; /* XXX */ 2333 } 2334 2335 return (tcp_iss); 2336 } 2337 2338 #if defined(IPSEC) 2339 /* compute ESP/AH header size for TCP, including outer IP header. */ 2340 size_t 2341 ipsec4_hdrsiz_tcp(struct tcpcb *tp) 2342 { 2343 struct inpcb *inp; 2344 size_t hdrsiz; 2345 2346 /* XXX mapped addr case (tp->t_in6pcb) */ 2347 if (!tp || !tp->t_template || !(inp = tp->t_inpcb)) 2348 return 0; 2349 switch (tp->t_family) { 2350 case AF_INET: 2351 /* XXX: should use currect direction. */ 2352 hdrsiz = ipsec4_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp); 2353 break; 2354 default: 2355 hdrsiz = 0; 2356 break; 2357 } 2358 2359 return hdrsiz; 2360 } 2361 2362 #ifdef INET6 2363 size_t 2364 ipsec6_hdrsiz_tcp(struct tcpcb *tp) 2365 { 2366 struct in6pcb *in6p; 2367 size_t hdrsiz; 2368 2369 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb)) 2370 return 0; 2371 switch (tp->t_family) { 2372 case AF_INET6: 2373 /* XXX: should use currect direction. */ 2374 hdrsiz = ipsec6_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p); 2375 break; 2376 case AF_INET: 2377 /* mapped address case - tricky */ 2378 default: 2379 hdrsiz = 0; 2380 break; 2381 } 2382 2383 return hdrsiz; 2384 } 2385 #endif 2386 #endif /*IPSEC*/ 2387 2388 /* 2389 * Determine the length of the TCP options for this connection. 2390 * 2391 * XXX: What do we do for SACK, when we add that? Just reserve 2392 * all of the space? Otherwise we can't exactly be incrementing 2393 * cwnd by an amount that varies depending on the amount we last 2394 * had to SACK! 2395 */ 2396 2397 u_int 2398 tcp_optlen(struct tcpcb *tp) 2399 { 2400 u_int optlen; 2401 2402 optlen = 0; 2403 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 2404 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 2405 optlen += TCPOLEN_TSTAMP_APPA; 2406 2407 #ifdef TCP_SIGNATURE 2408 if (tp->t_flags & TF_SIGNATURE) 2409 optlen += TCPOLEN_SIGNATURE + 2; 2410 #endif /* TCP_SIGNATURE */ 2411 2412 return optlen; 2413 } 2414 2415 u_int 2416 tcp_hdrsz(struct tcpcb *tp) 2417 { 2418 u_int hlen; 2419 2420 switch (tp->t_family) { 2421 #ifdef INET6 2422 case AF_INET6: 2423 hlen = sizeof(struct ip6_hdr); 2424 break; 2425 #endif 2426 case AF_INET: 2427 hlen = sizeof(struct ip); 2428 break; 2429 default: 2430 hlen = 0; 2431 break; 2432 } 2433 hlen += sizeof(struct tcphdr); 2434 2435 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 2436 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 2437 hlen += TCPOLEN_TSTAMP_APPA; 2438 #ifdef TCP_SIGNATURE 2439 if (tp->t_flags & TF_SIGNATURE) 2440 hlen += TCPOLEN_SIGLEN; 2441 #endif 2442 return hlen; 2443 } 2444 2445 void 2446 tcp_statinc(u_int stat) 2447 { 2448 2449 KASSERT(stat < TCP_NSTATS); 2450 TCP_STATINC(stat); 2451 } 2452 2453 void 2454 tcp_statadd(u_int stat, uint64_t val) 2455 { 2456 2457 KASSERT(stat < TCP_NSTATS); 2458 TCP_STATADD(stat, val); 2459 } 2460