1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 64 */ 65 66 #include "opt_compat.h" 67 #include "opt_inet.h" 68 #include "opt_inet6.h" 69 #include "opt_ipsec.h" 70 #include "opt_tcpdebug.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/callout.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/malloc.h> 78 #include <sys/mpipe.h> 79 #include <sys/mbuf.h> 80 #ifdef INET6 81 #include <sys/domain.h> 82 #endif 83 #include <sys/proc.h> 84 #include <sys/priv.h> 85 #include <sys/socket.h> 86 #include <sys/socketops.h> 87 #include <sys/socketvar.h> 88 #include <sys/protosw.h> 89 #include <sys/random.h> 90 #include <sys/in_cksum.h> 91 #include <sys/ktr.h> 92 93 #include <net/route.h> 94 #include <net/if.h> 95 #include <net/netisr2.h> 96 97 #define _IP_VHL 98 #include <netinet/in.h> 99 #include <netinet/in_systm.h> 100 #include <netinet/ip.h> 101 #include <netinet/ip6.h> 102 #include <netinet/in_pcb.h> 103 #include <netinet6/in6_pcb.h> 104 #include <netinet/in_var.h> 105 #include <netinet/ip_var.h> 106 #include <netinet6/ip6_var.h> 107 #include <netinet/ip_icmp.h> 108 #ifdef INET6 109 #include <netinet/icmp6.h> 110 #endif 111 #include <netinet/tcp.h> 112 #include <netinet/tcp_fsm.h> 113 #include <netinet/tcp_seq.h> 114 #include <netinet/tcp_timer.h> 115 #include <netinet/tcp_timer2.h> 116 #include <netinet/tcp_var.h> 117 #include <netinet6/tcp6_var.h> 118 #include <netinet/tcpip.h> 119 #ifdef TCPDEBUG 120 #include <netinet/tcp_debug.h> 121 #endif 122 #include <netinet6/ip6protosw.h> 123 124 #ifdef IPSEC 125 #include <netinet6/ipsec.h> 126 #include <netproto/key/key.h> 127 #ifdef INET6 128 #include <netinet6/ipsec6.h> 129 #endif 130 #endif 131 132 #ifdef FAST_IPSEC 133 #include <netproto/ipsec/ipsec.h> 134 #ifdef INET6 135 #include <netproto/ipsec/ipsec6.h> 136 #endif 137 #define IPSEC 138 #endif 139 140 #include <sys/md5.h> 141 #include <machine/smp.h> 142 143 #include <sys/msgport2.h> 144 #include <sys/mplock2.h> 145 #include <net/netmsg2.h> 146 147 #if !defined(KTR_TCP) 148 #define KTR_TCP KTR_ALL 149 #endif 150 /* 151 KTR_INFO_MASTER(tcp); 152 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); 153 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); 154 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); 155 #define logtcp(name) KTR_LOG(tcp_ ## name) 156 */ 157 158 #define TCP_IW_MAXSEGS_DFLT 4 159 #define TCP_IW_CAPSEGS_DFLT 4 160 161 struct inpcbinfo tcbinfo[MAXCPU]; 162 struct tcpcbackqhead tcpcbackq[MAXCPU]; 163 164 int tcp_mssdflt = TCP_MSS; 165 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 166 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 167 168 #ifdef INET6 169 int tcp_v6mssdflt = TCP6_MSS; 170 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 171 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 172 #endif 173 174 /* 175 * Minimum MSS we accept and use. This prevents DoS attacks where 176 * we are forced to a ridiculous low MSS like 20 and send hundreds 177 * of packets instead of one. The effect scales with the available 178 * bandwidth and quickly saturates the CPU and network interface 179 * with packet generation and sending. Set to zero to disable MINMSS 180 * checking. This setting prevents us from sending too small packets. 181 */ 182 int tcp_minmss = TCP_MINMSS; 183 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 184 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 185 186 #if 0 187 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 188 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 189 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 190 #endif 191 192 int tcp_do_rfc1323 = 1; 193 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 194 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 195 196 static int tcp_tcbhashsize = 0; 197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 198 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 199 200 static int do_tcpdrain = 1; 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 202 "Enable tcp_drain routine for extra help when low on mbufs"); 203 204 static int icmp_may_rst = 1; 205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 206 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 207 208 static int tcp_isn_reseed_interval = 0; 209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 210 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 211 212 /* 213 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on 214 * by default, but with generous values which should allow maximal 215 * bandwidth. In particular, the slop defaults to 50 (5 packets). 216 * 217 * The reason for doing this is that the limiter is the only mechanism we 218 * have which seems to do a really good job preventing receiver RX rings 219 * on network interfaces from getting blown out. Even though GigE/10GigE 220 * is supposed to flow control it looks like either it doesn't actually 221 * do it or Open Source drivers do not properly enable it. 222 * 223 * People using the limiter to reduce bottlenecks on slower WAN connections 224 * should set the slop to 20 (2 packets). 225 */ 226 static int tcp_inflight_enable = 1; 227 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 228 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 229 230 static int tcp_inflight_debug = 0; 231 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 232 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 233 234 static int tcp_inflight_min = 6144; 235 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 236 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 237 238 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 239 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 240 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 241 242 static int tcp_inflight_stab = 50; 243 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 244 &tcp_inflight_stab, 0, "Fudge bw 1/10% (50=5%)"); 245 246 static int tcp_inflight_adjrtt = 2; 247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_adjrtt, CTLFLAG_RW, 248 &tcp_inflight_adjrtt, 0, "Slop for rtt 1/(hz*32)"); 249 250 static int tcp_do_rfc3390 = 1; 251 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 252 &tcp_do_rfc3390, 0, 253 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 254 255 static u_long tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT; 256 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW, 257 &tcp_iw_maxsegs, 0, "TCP IW segments max"); 258 259 static u_long tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT; 260 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW, 261 &tcp_iw_capsegs, 0, "TCP IW segments"); 262 263 int tcp_low_rtobase = 1; 264 SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW, 265 &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)"); 266 267 static int tcp_do_ncr = 1; 268 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr, CTLFLAG_RW, 269 &tcp_do_ncr, 0, "Non-Congestion Robustness (RFC 4653)"); 270 271 int tcp_ncr_rxtthresh_max = 16; 272 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr_rxtthresh_max, CTLFLAG_RW, 273 &tcp_ncr_rxtthresh_max, 0, 274 "Non-Congestion Robustness (RFC 4653), DupThresh upper limit"); 275 276 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 277 static struct malloc_pipe tcptemp_mpipe; 278 279 static void tcp_willblock(void); 280 static void tcp_notify (struct inpcb *, int); 281 282 struct tcp_stats tcpstats_percpu[MAXCPU] __cachealign; 283 284 static struct netmsg_base tcp_drain_netmsg[MAXCPU]; 285 static void tcp_drain_dispatch(netmsg_t nmsg); 286 287 static int 288 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 289 { 290 int cpu, error = 0; 291 292 for (cpu = 0; cpu < ncpus2; ++cpu) { 293 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], 294 sizeof(struct tcp_stats)))) 295 break; 296 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], 297 sizeof(struct tcp_stats)))) 298 break; 299 } 300 301 return (error); 302 } 303 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 304 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 305 306 /* 307 * Target size of TCP PCB hash tables. Must be a power of two. 308 * 309 * Note that this can be overridden by the kernel environment 310 * variable net.inet.tcp.tcbhashsize 311 */ 312 #ifndef TCBHASHSIZE 313 #define TCBHASHSIZE 512 314 #endif 315 316 /* 317 * This is the actual shape of what we allocate using the zone 318 * allocator. Doing it this way allows us to protect both structures 319 * using the same generation count, and also eliminates the overhead 320 * of allocating tcpcbs separately. By hiding the structure here, 321 * we avoid changing most of the rest of the code (although it needs 322 * to be changed, eventually, for greater efficiency). 323 */ 324 #define ALIGNMENT 32 325 #define ALIGNM1 (ALIGNMENT - 1) 326 struct inp_tp { 327 union { 328 struct inpcb inp; 329 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 330 } inp_tp_u; 331 struct tcpcb tcb; 332 struct tcp_callout inp_tp_rexmt; 333 struct tcp_callout inp_tp_persist; 334 struct tcp_callout inp_tp_keep; 335 struct tcp_callout inp_tp_2msl; 336 struct tcp_callout inp_tp_delack; 337 struct netmsg_tcp_timer inp_tp_timermsg; 338 struct netmsg_base inp_tp_sndmore; 339 }; 340 #undef ALIGNMENT 341 #undef ALIGNM1 342 343 /* 344 * Tcp initialization 345 */ 346 void 347 tcp_init(void) 348 { 349 struct inpcbportinfo *portinfo; 350 struct inpcbinfo *ticb; 351 int hashsize = TCBHASHSIZE; 352 int cpu; 353 354 /* 355 * note: tcptemp is used for keepalives, and it is ok for an 356 * allocation to fail so do not specify MPF_INT. 357 */ 358 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 359 25, -1, 0, NULL, NULL, NULL); 360 361 tcp_delacktime = TCPTV_DELACK; 362 tcp_keepinit = TCPTV_KEEP_INIT; 363 tcp_keepidle = TCPTV_KEEP_IDLE; 364 tcp_keepintvl = TCPTV_KEEPINTVL; 365 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 366 tcp_msl = TCPTV_MSL; 367 tcp_rexmit_min = TCPTV_MIN; 368 tcp_rexmit_slop = TCPTV_CPU_VAR; 369 370 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 371 if (!powerof2(hashsize)) { 372 kprintf("WARNING: TCB hash size not a power of 2\n"); 373 hashsize = 512; /* safe default */ 374 } 375 tcp_tcbhashsize = hashsize; 376 377 portinfo = kmalloc_cachealign(sizeof(*portinfo) * ncpus2, M_PCB, 378 M_WAITOK); 379 380 for (cpu = 0; cpu < ncpus2; cpu++) { 381 ticb = &tcbinfo[cpu]; 382 in_pcbinfo_init(ticb, cpu, FALSE); 383 ticb->hashbase = hashinit(hashsize, M_PCB, 384 &ticb->hashmask); 385 in_pcbportinfo_init(&portinfo[cpu], hashsize, TRUE, cpu); 386 ticb->portinfo = portinfo; 387 ticb->portinfo_mask = ncpus2_mask; 388 ticb->wildcardhashbase = hashinit(hashsize, M_PCB, 389 &ticb->wildcardhashmask); 390 ticb->localgrphashbase = hashinit(hashsize, M_PCB, 391 &ticb->localgrphashmask); 392 ticb->ipi_size = sizeof(struct inp_tp); 393 TAILQ_INIT(&tcpcbackq[cpu]); 394 } 395 396 tcp_reass_maxseg = nmbclusters / 16; 397 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 398 399 #ifdef INET6 400 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 401 #else 402 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 403 #endif 404 if (max_protohdr < TCP_MINPROTOHDR) 405 max_protohdr = TCP_MINPROTOHDR; 406 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 407 panic("tcp_init"); 408 #undef TCP_MINPROTOHDR 409 410 /* 411 * Initialize TCP statistics counters for each CPU. 412 */ 413 for (cpu = 0; cpu < ncpus2; ++cpu) 414 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); 415 416 /* 417 * Initialize netmsgs for TCP drain 418 */ 419 for (cpu = 0; cpu < ncpus2; ++cpu) { 420 netmsg_init(&tcp_drain_netmsg[cpu], NULL, &netisr_adone_rport, 421 MSGF_PRIORITY, tcp_drain_dispatch); 422 } 423 424 syncache_init(); 425 netisr_register_rollup(tcp_willblock, NETISR_ROLLUP_PRIO_TCP); 426 } 427 428 static void 429 tcp_willblock(void) 430 { 431 struct tcpcb *tp; 432 int cpu = mycpu->gd_cpuid; 433 434 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) { 435 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 436 tp->t_flags &= ~TF_ONOUTPUTQ; 437 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq); 438 tcp_output(tp); 439 } 440 } 441 442 /* 443 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 444 * tcp_template used to store this data in mbufs, but we now recopy it out 445 * of the tcpcb each time to conserve mbufs. 446 */ 447 void 448 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr, boolean_t tso) 449 { 450 struct inpcb *inp = tp->t_inpcb; 451 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 452 453 #ifdef INET6 454 if (INP_ISIPV6(inp)) { 455 struct ip6_hdr *ip6; 456 457 ip6 = (struct ip6_hdr *)ip_ptr; 458 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 459 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 460 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 461 (IPV6_VERSION & IPV6_VERSION_MASK); 462 ip6->ip6_nxt = IPPROTO_TCP; 463 ip6->ip6_plen = sizeof(struct tcphdr); 464 ip6->ip6_src = inp->in6p_laddr; 465 ip6->ip6_dst = inp->in6p_faddr; 466 tcp_hdr->th_sum = 0; 467 } else 468 #endif 469 { 470 struct ip *ip = (struct ip *) ip_ptr; 471 u_int plen; 472 473 ip->ip_vhl = IP_VHL_BORING; 474 ip->ip_tos = 0; 475 ip->ip_len = 0; 476 ip->ip_id = 0; 477 ip->ip_off = 0; 478 ip->ip_ttl = 0; 479 ip->ip_sum = 0; 480 ip->ip_p = IPPROTO_TCP; 481 ip->ip_src = inp->inp_laddr; 482 ip->ip_dst = inp->inp_faddr; 483 484 if (tso) 485 plen = htons(IPPROTO_TCP); 486 else 487 plen = htons(sizeof(struct tcphdr) + IPPROTO_TCP); 488 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 489 ip->ip_dst.s_addr, plen); 490 } 491 492 tcp_hdr->th_sport = inp->inp_lport; 493 tcp_hdr->th_dport = inp->inp_fport; 494 tcp_hdr->th_seq = 0; 495 tcp_hdr->th_ack = 0; 496 tcp_hdr->th_x2 = 0; 497 tcp_hdr->th_off = 5; 498 tcp_hdr->th_flags = 0; 499 tcp_hdr->th_win = 0; 500 tcp_hdr->th_urp = 0; 501 } 502 503 /* 504 * Create template to be used to send tcp packets on a connection. 505 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 506 * use for this function is in keepalives, which use tcp_respond. 507 */ 508 struct tcptemp * 509 tcp_maketemplate(struct tcpcb *tp) 510 { 511 struct tcptemp *tmp; 512 513 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 514 return (NULL); 515 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t, FALSE); 516 return (tmp); 517 } 518 519 void 520 tcp_freetemplate(struct tcptemp *tmp) 521 { 522 mpipe_free(&tcptemp_mpipe, tmp); 523 } 524 525 /* 526 * Send a single message to the TCP at address specified by 527 * the given TCP/IP header. If m == NULL, then we make a copy 528 * of the tcpiphdr at ti and send directly to the addressed host. 529 * This is used to force keep alive messages out using the TCP 530 * template for a connection. If flags are given then we send 531 * a message back to the TCP which originated the * segment ti, 532 * and discard the mbuf containing it and any other attached mbufs. 533 * 534 * In any case the ack and sequence number of the transmitted 535 * segment are as specified by the parameters. 536 * 537 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 538 */ 539 void 540 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 541 tcp_seq ack, tcp_seq seq, int flags) 542 { 543 int tlen; 544 long win = 0; 545 struct route *ro = NULL; 546 struct route sro; 547 struct ip *ip = ipgen; 548 struct tcphdr *nth; 549 int ipflags = 0; 550 struct route_in6 *ro6 = NULL; 551 struct route_in6 sro6; 552 struct ip6_hdr *ip6 = ipgen; 553 boolean_t use_tmpro = TRUE; 554 #ifdef INET6 555 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 556 #else 557 const boolean_t isipv6 = FALSE; 558 #endif 559 560 if (tp != NULL) { 561 if (!(flags & TH_RST)) { 562 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv); 563 if (win < 0) 564 win = 0; 565 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 566 win = (long)TCP_MAXWIN << tp->rcv_scale; 567 } 568 /* 569 * Don't use the route cache of a listen socket, 570 * it is not MPSAFE; use temporary route cache. 571 */ 572 if (tp->t_state != TCPS_LISTEN) { 573 if (isipv6) 574 ro6 = &tp->t_inpcb->in6p_route; 575 else 576 ro = &tp->t_inpcb->inp_route; 577 use_tmpro = FALSE; 578 } 579 } 580 if (use_tmpro) { 581 if (isipv6) { 582 ro6 = &sro6; 583 bzero(ro6, sizeof *ro6); 584 } else { 585 ro = &sro; 586 bzero(ro, sizeof *ro); 587 } 588 } 589 if (m == NULL) { 590 m = m_gethdr(M_NOWAIT, MT_HEADER); 591 if (m == NULL) 592 return; 593 tlen = 0; 594 m->m_data += max_linkhdr; 595 if (isipv6) { 596 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 597 ip6 = mtod(m, struct ip6_hdr *); 598 nth = (struct tcphdr *)(ip6 + 1); 599 } else { 600 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 601 ip = mtod(m, struct ip *); 602 nth = (struct tcphdr *)(ip + 1); 603 } 604 bcopy(th, nth, sizeof(struct tcphdr)); 605 flags = TH_ACK; 606 } else { 607 m_freem(m->m_next); 608 m->m_next = NULL; 609 m->m_data = (caddr_t)ipgen; 610 /* m_len is set later */ 611 tlen = 0; 612 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 613 if (isipv6) { 614 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 615 nth = (struct tcphdr *)(ip6 + 1); 616 } else { 617 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 618 nth = (struct tcphdr *)(ip + 1); 619 } 620 if (th != nth) { 621 /* 622 * this is usually a case when an extension header 623 * exists between the IPv6 header and the 624 * TCP header. 625 */ 626 nth->th_sport = th->th_sport; 627 nth->th_dport = th->th_dport; 628 } 629 xchg(nth->th_dport, nth->th_sport, n_short); 630 #undef xchg 631 } 632 if (isipv6) { 633 ip6->ip6_flow = 0; 634 ip6->ip6_vfc = IPV6_VERSION; 635 ip6->ip6_nxt = IPPROTO_TCP; 636 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 637 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 638 } else { 639 tlen += sizeof(struct tcpiphdr); 640 ip->ip_len = tlen; 641 ip->ip_ttl = ip_defttl; 642 } 643 m->m_len = tlen; 644 m->m_pkthdr.len = tlen; 645 m->m_pkthdr.rcvif = NULL; 646 nth->th_seq = htonl(seq); 647 nth->th_ack = htonl(ack); 648 nth->th_x2 = 0; 649 nth->th_off = sizeof(struct tcphdr) >> 2; 650 nth->th_flags = flags; 651 if (tp != NULL) 652 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 653 else 654 nth->th_win = htons((u_short)win); 655 nth->th_urp = 0; 656 if (isipv6) { 657 nth->th_sum = 0; 658 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 659 sizeof(struct ip6_hdr), 660 tlen - sizeof(struct ip6_hdr)); 661 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 662 (ro6 && ro6->ro_rt) ? 663 ro6->ro_rt->rt_ifp : NULL); 664 } else { 665 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 666 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 667 m->m_pkthdr.csum_flags = CSUM_TCP; 668 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 669 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr); 670 } 671 #ifdef TCPDEBUG 672 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 673 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 674 #endif 675 if (isipv6) { 676 ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 677 tp ? tp->t_inpcb : NULL); 678 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 679 RTFREE(ro6->ro_rt); 680 ro6->ro_rt = NULL; 681 } 682 } else { 683 ipflags |= IP_DEBUGROUTE; 684 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 685 if ((ro == &sro) && (ro->ro_rt != NULL)) { 686 RTFREE(ro->ro_rt); 687 ro->ro_rt = NULL; 688 } 689 } 690 } 691 692 /* 693 * Create a new TCP control block, making an 694 * empty reassembly queue and hooking it to the argument 695 * protocol control block. The `inp' parameter must have 696 * come from the zone allocator set up in tcp_init(). 697 */ 698 struct tcpcb * 699 tcp_newtcpcb(struct inpcb *inp) 700 { 701 struct inp_tp *it; 702 struct tcpcb *tp; 703 #ifdef INET6 704 boolean_t isipv6 = INP_ISIPV6(inp); 705 #else 706 const boolean_t isipv6 = FALSE; 707 #endif 708 709 it = (struct inp_tp *)inp; 710 tp = &it->tcb; 711 bzero(tp, sizeof(struct tcpcb)); 712 TAILQ_INIT(&tp->t_segq); 713 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 714 tp->t_rxtthresh = tcprexmtthresh; 715 716 /* Set up our timeouts. */ 717 tp->tt_rexmt = &it->inp_tp_rexmt; 718 tp->tt_persist = &it->inp_tp_persist; 719 tp->tt_keep = &it->inp_tp_keep; 720 tp->tt_2msl = &it->inp_tp_2msl; 721 tp->tt_delack = &it->inp_tp_delack; 722 tcp_inittimers(tp); 723 724 /* 725 * Zero out timer message. We don't create it here, 726 * since the current CPU may not be the owner of this 727 * inpcb. 728 */ 729 tp->tt_msg = &it->inp_tp_timermsg; 730 bzero(tp->tt_msg, sizeof(*tp->tt_msg)); 731 732 tp->t_keepinit = tcp_keepinit; 733 tp->t_keepidle = tcp_keepidle; 734 tp->t_keepintvl = tcp_keepintvl; 735 tp->t_keepcnt = tcp_keepcnt; 736 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt; 737 738 if (tcp_do_ncr) 739 tp->t_flags |= TF_NCR; 740 if (tcp_do_rfc1323) 741 tp->t_flags |= (TF_REQ_SCALE | TF_REQ_TSTMP); 742 743 tp->t_inpcb = inp; /* XXX */ 744 tp->t_state = TCPS_CLOSED; 745 /* 746 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 747 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 748 * reasonable initial retransmit time. 749 */ 750 tp->t_srtt = TCPTV_SRTTBASE; 751 tp->t_rttvar = 752 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 753 tp->t_rttmin = tcp_rexmit_min; 754 tp->t_rxtcur = TCPTV_RTOBASE; 755 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 756 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 757 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 758 tp->snd_last = ticks; 759 tp->t_rcvtime = ticks; 760 /* 761 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 762 * because the socket may be bound to an IPv6 wildcard address, 763 * which may match an IPv4-mapped IPv6 address. 764 */ 765 inp->inp_ip_ttl = ip_defttl; 766 inp->inp_ppcb = tp; 767 tcp_sack_tcpcb_init(tp); 768 769 tp->tt_sndmore = &it->inp_tp_sndmore; 770 tcp_output_init(tp); 771 772 return (tp); /* XXX */ 773 } 774 775 /* 776 * Drop a TCP connection, reporting the specified error. 777 * If connection is synchronized, then send a RST to peer. 778 */ 779 struct tcpcb * 780 tcp_drop(struct tcpcb *tp, int error) 781 { 782 struct socket *so = tp->t_inpcb->inp_socket; 783 784 if (TCPS_HAVERCVDSYN(tp->t_state)) { 785 tp->t_state = TCPS_CLOSED; 786 tcp_output(tp); 787 tcpstat.tcps_drops++; 788 } else 789 tcpstat.tcps_conndrops++; 790 if (error == ETIMEDOUT && tp->t_softerror) 791 error = tp->t_softerror; 792 so->so_error = error; 793 return (tcp_close(tp)); 794 } 795 796 struct netmsg_listen_detach { 797 struct netmsg_base base; 798 struct tcpcb *nm_tp; 799 struct tcpcb *nm_tp_inh; 800 }; 801 802 static void 803 tcp_listen_detach_handler(netmsg_t msg) 804 { 805 struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg; 806 struct tcpcb *tp = nmsg->nm_tp; 807 int cpu = mycpuid, nextcpu; 808 809 if (tp->t_flags & TF_LISTEN) 810 syncache_destroy(tp, nmsg->nm_tp_inh); 811 812 in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]); 813 814 nextcpu = cpu + 1; 815 if (nextcpu < ncpus2) 816 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->base.lmsg); 817 else 818 lwkt_replymsg(&nmsg->base.lmsg, 0); 819 } 820 821 /* 822 * Close a TCP control block: 823 * discard all space held by the tcp 824 * discard internet protocol block 825 * wake up any sleepers 826 */ 827 struct tcpcb * 828 tcp_close(struct tcpcb *tp) 829 { 830 struct tseg_qent *q; 831 struct inpcb *inp = tp->t_inpcb; 832 struct inpcb *inp_inh = NULL; 833 struct tcpcb *tp_inh = NULL; 834 struct socket *so = inp->inp_socket; 835 struct rtentry *rt; 836 boolean_t dosavessthresh; 837 #ifdef INET6 838 boolean_t isipv6 = INP_ISIPV6(inp); 839 #else 840 const boolean_t isipv6 = FALSE; 841 #endif 842 843 if (tp->t_flags & TF_LISTEN) { 844 /* 845 * Pending socket/syncache inheritance 846 * 847 * If this is a listen(2) socket, find another listen(2) 848 * socket in the same local group, which could inherit 849 * the syncache and sockets pending on the completion 850 * and incompletion queues. 851 * 852 * NOTE: 853 * Currently the inheritance could only happen on the 854 * listen(2) sockets w/ SO_REUSEPORT set. 855 */ 856 ASSERT_IN_NETISR(0); 857 inp_inh = in_pcblocalgroup_last(&tcbinfo[0], inp); 858 if (inp_inh != NULL) 859 tp_inh = intotcpcb(inp_inh); 860 } 861 862 /* 863 * INP_WILDCARD indicates that listen(2) has been called on 864 * this socket. This implies: 865 * - A wildcard inp's hash is replicated for each protocol thread. 866 * - Syncache for this inp grows independently in each protocol 867 * thread. 868 * - There is more than one cpu 869 * 870 * We have to chain a message to the rest of the protocol threads 871 * to cleanup the wildcard hash and the syncache. The cleanup 872 * in the current protocol thread is defered till the end of this 873 * function (syncache_destroy and in_pcbdetach). 874 * 875 * NOTE: 876 * After cleanup the inp's hash and syncache entries, this inp will 877 * no longer be available to the rest of the protocol threads, so we 878 * are safe to whack the inp in the following code. 879 */ 880 if ((inp->inp_flags & INP_WILDCARD) && ncpus2 > 1) { 881 struct netmsg_listen_detach nmsg; 882 883 KKASSERT(so->so_port == netisr_cpuport(0)); 884 ASSERT_IN_NETISR(0); 885 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); 886 887 netmsg_init(&nmsg.base, NULL, &curthread->td_msgport, 888 MSGF_PRIORITY, tcp_listen_detach_handler); 889 nmsg.nm_tp = tp; 890 nmsg.nm_tp_inh = tp_inh; 891 lwkt_domsg(netisr_cpuport(1), &nmsg.base.lmsg, 0); 892 } 893 894 KKASSERT(tp->t_state != TCPS_TERMINATING); 895 tp->t_state = TCPS_TERMINATING; 896 897 /* 898 * Make sure that all of our timers are stopped before we 899 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL), 900 * timers are never used. If timer message is never created 901 * (tp->tt_msg->tt_tcb == NULL), timers are never used too. 902 */ 903 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) { 904 tcp_callout_stop(tp, tp->tt_rexmt); 905 tcp_callout_stop(tp, tp->tt_persist); 906 tcp_callout_stop(tp, tp->tt_keep); 907 tcp_callout_stop(tp, tp->tt_2msl); 908 tcp_callout_stop(tp, tp->tt_delack); 909 } 910 911 if (tp->t_flags & TF_ONOUTPUTQ) { 912 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 913 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq); 914 tp->t_flags &= ~TF_ONOUTPUTQ; 915 } 916 917 /* 918 * If we got enough samples through the srtt filter, 919 * save the rtt and rttvar in the routing entry. 920 * 'Enough' is arbitrarily defined as the 16 samples. 921 * 16 samples is enough for the srtt filter to converge 922 * to within 5% of the correct value; fewer samples and 923 * we could save a very bogus rtt. 924 * 925 * Don't update the default route's characteristics and don't 926 * update anything that the user "locked". 927 */ 928 if (tp->t_rttupdated >= 16) { 929 u_long i = 0; 930 931 if (isipv6) { 932 struct sockaddr_in6 *sin6; 933 934 if ((rt = inp->in6p_route.ro_rt) == NULL) 935 goto no_valid_rt; 936 sin6 = (struct sockaddr_in6 *)rt_key(rt); 937 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 938 goto no_valid_rt; 939 } else 940 if ((rt = inp->inp_route.ro_rt) == NULL || 941 ((struct sockaddr_in *)rt_key(rt))-> 942 sin_addr.s_addr == INADDR_ANY) 943 goto no_valid_rt; 944 945 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 946 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 947 if (rt->rt_rmx.rmx_rtt && i) 948 /* 949 * filter this update to half the old & half 950 * the new values, converting scale. 951 * See route.h and tcp_var.h for a 952 * description of the scaling constants. 953 */ 954 rt->rt_rmx.rmx_rtt = 955 (rt->rt_rmx.rmx_rtt + i) / 2; 956 else 957 rt->rt_rmx.rmx_rtt = i; 958 tcpstat.tcps_cachedrtt++; 959 } 960 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 961 i = tp->t_rttvar * 962 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 963 if (rt->rt_rmx.rmx_rttvar && i) 964 rt->rt_rmx.rmx_rttvar = 965 (rt->rt_rmx.rmx_rttvar + i) / 2; 966 else 967 rt->rt_rmx.rmx_rttvar = i; 968 tcpstat.tcps_cachedrttvar++; 969 } 970 /* 971 * The old comment here said: 972 * update the pipelimit (ssthresh) if it has been updated 973 * already or if a pipesize was specified & the threshhold 974 * got below half the pipesize. I.e., wait for bad news 975 * before we start updating, then update on both good 976 * and bad news. 977 * 978 * But we want to save the ssthresh even if no pipesize is 979 * specified explicitly in the route, because such 980 * connections still have an implicit pipesize specified 981 * by the global tcp_sendspace. In the absence of a reliable 982 * way to calculate the pipesize, it will have to do. 983 */ 984 i = tp->snd_ssthresh; 985 if (rt->rt_rmx.rmx_sendpipe != 0) 986 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 987 else 988 dosavessthresh = (i < so->so_snd.ssb_hiwat/2); 989 if (dosavessthresh || 990 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 991 (rt->rt_rmx.rmx_ssthresh != 0))) { 992 /* 993 * convert the limit from user data bytes to 994 * packets then to packet data bytes. 995 */ 996 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 997 if (i < 2) 998 i = 2; 999 i *= tp->t_maxseg + 1000 (isipv6 ? 1001 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1002 sizeof(struct tcpiphdr)); 1003 if (rt->rt_rmx.rmx_ssthresh) 1004 rt->rt_rmx.rmx_ssthresh = 1005 (rt->rt_rmx.rmx_ssthresh + i) / 2; 1006 else 1007 rt->rt_rmx.rmx_ssthresh = i; 1008 tcpstat.tcps_cachedssthresh++; 1009 } 1010 } 1011 1012 no_valid_rt: 1013 /* free the reassembly queue, if any */ 1014 while((q = TAILQ_FIRST(&tp->t_segq)) != NULL) { 1015 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 1016 m_freem(q->tqe_m); 1017 kfree(q, M_TSEGQ); 1018 atomic_add_int(&tcp_reass_qsize, -1); 1019 } 1020 /* throw away SACK blocks in scoreboard*/ 1021 if (TCP_DO_SACK(tp)) 1022 tcp_sack_destroy(&tp->scb); 1023 1024 inp->inp_ppcb = NULL; 1025 soisdisconnected(so); 1026 /* note: pcb detached later on */ 1027 1028 tcp_destroy_timermsg(tp); 1029 tcp_output_cancel(tp); 1030 1031 if (tp->t_flags & TF_LISTEN) { 1032 syncache_destroy(tp, tp_inh); 1033 if (inp_inh != NULL && inp_inh->inp_socket != NULL) { 1034 /* 1035 * Pending sockets inheritance only needs 1036 * to be done once in the current thread, 1037 * i.e. netisr0. 1038 */ 1039 soinherit(so, inp_inh->inp_socket); 1040 } 1041 } 1042 1043 so_async_rcvd_drop(so); 1044 /* Drop the reference for the asynchronized pru_rcvd */ 1045 sofree(so); 1046 1047 /* 1048 * NOTE: 1049 * pcbdetach removes any wildcard hash entry on the current CPU. 1050 */ 1051 #ifdef INET6 1052 if (isipv6) 1053 in6_pcbdetach(inp); 1054 else 1055 #endif 1056 in_pcbdetach(inp); 1057 1058 tcpstat.tcps_closed++; 1059 return (NULL); 1060 } 1061 1062 static __inline void 1063 tcp_drain_oncpu(struct inpcbinfo *pcbinfo) 1064 { 1065 struct inpcbhead *head = &pcbinfo->pcblisthead; 1066 struct inpcb *inpb; 1067 1068 /* 1069 * Since we run in netisr, it is MP safe, even if 1070 * we block during the inpcb list iteration, i.e. 1071 * we don't need to use inpcb marker here. 1072 */ 1073 ASSERT_IN_NETISR(pcbinfo->cpu); 1074 1075 LIST_FOREACH(inpb, head, inp_list) { 1076 struct tcpcb *tcpb; 1077 struct tseg_qent *te; 1078 1079 if (inpb->inp_flags & INP_PLACEMARKER) 1080 continue; 1081 1082 tcpb = intotcpcb(inpb); 1083 KASSERT(tcpb != NULL, ("tcp_drain_oncpu: tcpb is NULL")); 1084 1085 if ((te = TAILQ_FIRST(&tcpb->t_segq)) != NULL) { 1086 TAILQ_REMOVE(&tcpb->t_segq, te, tqe_q); 1087 if (te->tqe_th->th_flags & TH_FIN) 1088 tcpb->t_flags &= ~TF_QUEDFIN; 1089 m_freem(te->tqe_m); 1090 kfree(te, M_TSEGQ); 1091 atomic_add_int(&tcp_reass_qsize, -1); 1092 /* retry */ 1093 } 1094 } 1095 } 1096 1097 static void 1098 tcp_drain_dispatch(netmsg_t nmsg) 1099 { 1100 crit_enter(); 1101 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1102 crit_exit(); 1103 1104 tcp_drain_oncpu(&tcbinfo[mycpuid]); 1105 } 1106 1107 static void 1108 tcp_drain_ipi(void *arg __unused) 1109 { 1110 int cpu = mycpuid; 1111 struct lwkt_msg *msg = &tcp_drain_netmsg[cpu].lmsg; 1112 1113 crit_enter(); 1114 if (msg->ms_flags & MSGF_DONE) 1115 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 1116 crit_exit(); 1117 } 1118 1119 void 1120 tcp_drain(void) 1121 { 1122 cpumask_t mask; 1123 1124 if (!do_tcpdrain) 1125 return; 1126 1127 /* 1128 * Walk the tcpbs, if existing, and flush the reassembly queue, 1129 * if there is one... 1130 * XXX: The "Net/3" implementation doesn't imply that the TCP 1131 * reassembly queue should be flushed, but in a situation 1132 * where we're really low on mbufs, this is potentially 1133 * useful. 1134 * YYY: We may consider run tcp_drain_oncpu directly here, 1135 * however, that will require M_WAITOK memory allocation 1136 * for the inpcb marker. 1137 */ 1138 CPUMASK_ASSBMASK(mask, ncpus2); 1139 CPUMASK_ANDMASK(mask, smp_active_mask); 1140 if (CPUMASK_TESTNZERO(mask)) 1141 lwkt_send_ipiq_mask(mask, tcp_drain_ipi, NULL); 1142 } 1143 1144 /* 1145 * Notify a tcp user of an asynchronous error; 1146 * store error as soft error, but wake up user 1147 * (for now, won't do anything until can select for soft error). 1148 * 1149 * Do not wake up user since there currently is no mechanism for 1150 * reporting soft errors (yet - a kqueue filter may be added). 1151 */ 1152 static void 1153 tcp_notify(struct inpcb *inp, int error) 1154 { 1155 struct tcpcb *tp = intotcpcb(inp); 1156 1157 /* 1158 * Ignore some errors if we are hooked up. 1159 * If connection hasn't completed, has retransmitted several times, 1160 * and receives a second error, give up now. This is better 1161 * than waiting a long time to establish a connection that 1162 * can never complete. 1163 */ 1164 if (tp->t_state == TCPS_ESTABLISHED && 1165 (error == EHOSTUNREACH || error == ENETUNREACH || 1166 error == EHOSTDOWN)) { 1167 return; 1168 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1169 tp->t_softerror) 1170 tcp_drop(tp, error); 1171 else 1172 tp->t_softerror = error; 1173 #if 0 1174 wakeup(&so->so_timeo); 1175 sorwakeup(so); 1176 sowwakeup(so); 1177 #endif 1178 } 1179 1180 static int 1181 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1182 { 1183 int error, i, n; 1184 struct inpcb *marker; 1185 struct inpcb *inp; 1186 int origcpu, ccpu; 1187 1188 error = 0; 1189 n = 0; 1190 1191 /* 1192 * The process of preparing the TCB list is too time-consuming and 1193 * resource-intensive to repeat twice on every request. 1194 */ 1195 if (req->oldptr == NULL) { 1196 for (ccpu = 0; ccpu < ncpus2; ++ccpu) 1197 n += tcbinfo[ccpu].ipi_count; 1198 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); 1199 return (0); 1200 } 1201 1202 if (req->newptr != NULL) 1203 return (EPERM); 1204 1205 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1206 marker->inp_flags |= INP_PLACEMARKER; 1207 1208 /* 1209 * OK, now we're committed to doing something. Run the inpcb list 1210 * for each cpu in the system and construct the output. Use a 1211 * list placemarker to deal with list changes occuring during 1212 * copyout blockages (but otherwise depend on being on the correct 1213 * cpu to avoid races). 1214 */ 1215 origcpu = mycpu->gd_cpuid; 1216 for (ccpu = 0; ccpu < ncpus2 && error == 0; ++ccpu) { 1217 caddr_t inp_ppcb; 1218 struct xtcpcb xt; 1219 1220 lwkt_migratecpu(ccpu); 1221 1222 n = tcbinfo[ccpu].ipi_count; 1223 1224 LIST_INSERT_HEAD(&tcbinfo[ccpu].pcblisthead, marker, inp_list); 1225 i = 0; 1226 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1227 /* 1228 * process a snapshot of pcbs, ignoring placemarkers 1229 * and using our own to allow SYSCTL_OUT to block. 1230 */ 1231 LIST_REMOVE(marker, inp_list); 1232 LIST_INSERT_AFTER(inp, marker, inp_list); 1233 1234 if (inp->inp_flags & INP_PLACEMARKER) 1235 continue; 1236 if (prison_xinpcb(req->td, inp)) 1237 continue; 1238 1239 xt.xt_len = sizeof xt; 1240 bcopy(inp, &xt.xt_inp, sizeof *inp); 1241 inp_ppcb = inp->inp_ppcb; 1242 if (inp_ppcb != NULL) 1243 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1244 else 1245 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1246 if (inp->inp_socket) 1247 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1248 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1249 break; 1250 ++i; 1251 } 1252 LIST_REMOVE(marker, inp_list); 1253 if (error == 0 && i < n) { 1254 bzero(&xt, sizeof xt); 1255 xt.xt_len = sizeof xt; 1256 while (i < n) { 1257 error = SYSCTL_OUT(req, &xt, sizeof xt); 1258 if (error) 1259 break; 1260 ++i; 1261 } 1262 } 1263 } 1264 1265 /* 1266 * Make sure we are on the same cpu we were on originally, since 1267 * higher level callers expect this. Also don't pollute caches with 1268 * migrated userland data by (eventually) returning to userland 1269 * on a different cpu. 1270 */ 1271 lwkt_migratecpu(origcpu); 1272 kfree(marker, M_TEMP); 1273 return (error); 1274 } 1275 1276 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1277 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1278 1279 static int 1280 tcp_getcred(SYSCTL_HANDLER_ARGS) 1281 { 1282 struct sockaddr_in addrs[2]; 1283 struct ucred cred0, *cred = NULL; 1284 struct inpcb *inp; 1285 int cpu, origcpu, error; 1286 1287 error = priv_check(req->td, PRIV_ROOT); 1288 if (error != 0) 1289 return (error); 1290 error = SYSCTL_IN(req, addrs, sizeof addrs); 1291 if (error != 0) 1292 return (error); 1293 1294 origcpu = mycpuid; 1295 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1296 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1297 1298 lwkt_migratecpu(cpu); 1299 1300 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1301 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1302 if (inp == NULL || inp->inp_socket == NULL) { 1303 error = ENOENT; 1304 } else if (inp->inp_socket->so_cred != NULL) { 1305 cred0 = *(inp->inp_socket->so_cred); 1306 cred = &cred0; 1307 } 1308 1309 lwkt_migratecpu(origcpu); 1310 1311 if (error) 1312 return (error); 1313 1314 return SYSCTL_OUT(req, cred, sizeof(struct ucred)); 1315 } 1316 1317 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1318 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1319 1320 #ifdef INET6 1321 static int 1322 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1323 { 1324 struct sockaddr_in6 addrs[2]; 1325 struct inpcb *inp; 1326 int error; 1327 1328 error = priv_check(req->td, PRIV_ROOT); 1329 if (error != 0) 1330 return (error); 1331 error = SYSCTL_IN(req, addrs, sizeof addrs); 1332 if (error != 0) 1333 return (error); 1334 crit_enter(); 1335 inp = in6_pcblookup_hash(&tcbinfo[0], 1336 &addrs[1].sin6_addr, addrs[1].sin6_port, 1337 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL); 1338 if (inp == NULL || inp->inp_socket == NULL) { 1339 error = ENOENT; 1340 goto out; 1341 } 1342 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1343 out: 1344 crit_exit(); 1345 return (error); 1346 } 1347 1348 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1349 0, 0, 1350 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1351 #endif 1352 1353 struct netmsg_tcp_notify { 1354 struct netmsg_base base; 1355 inp_notify_t nm_notify; 1356 struct in_addr nm_faddr; 1357 int nm_arg; 1358 }; 1359 1360 static void 1361 tcp_notifyall_oncpu(netmsg_t msg) 1362 { 1363 struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg; 1364 int nextcpu; 1365 1366 in_pcbnotifyall(&tcbinfo[mycpuid], nm->nm_faddr, 1367 nm->nm_arg, nm->nm_notify); 1368 1369 nextcpu = mycpuid + 1; 1370 if (nextcpu < ncpus2) 1371 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg); 1372 else 1373 lwkt_replymsg(&nm->base.lmsg, 0); 1374 } 1375 1376 inp_notify_t 1377 tcp_get_inpnotify(int cmd, const struct sockaddr *sa, 1378 int *arg, struct ip **ip0, int *cpuid) 1379 { 1380 struct ip *ip = *ip0; 1381 struct in_addr faddr; 1382 inp_notify_t notify = tcp_notify; 1383 1384 faddr = ((const struct sockaddr_in *)sa)->sin_addr; 1385 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1386 return NULL; 1387 1388 *arg = inetctlerrmap[cmd]; 1389 if (cmd == PRC_QUENCH) { 1390 notify = tcp_quench; 1391 } else if (icmp_may_rst && 1392 (cmd == PRC_UNREACH_ADMIN_PROHIB || 1393 cmd == PRC_UNREACH_PORT || 1394 cmd == PRC_TIMXCEED_INTRANS) && 1395 ip != NULL) { 1396 notify = tcp_drop_syn_sent; 1397 } else if (cmd == PRC_MSGSIZE) { 1398 const struct icmp *icmp = (const struct icmp *) 1399 ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 1400 1401 *arg = ntohs(icmp->icmp_nextmtu); 1402 notify = tcp_mtudisc; 1403 } else if (PRC_IS_REDIRECT(cmd)) { 1404 ip = NULL; 1405 notify = in_rtchange; 1406 } else if (cmd == PRC_HOSTDEAD) { 1407 ip = NULL; 1408 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 1409 return NULL; 1410 } 1411 1412 if (cpuid != NULL) { 1413 if (ip == NULL) { 1414 /* Go through all CPUs */ 1415 *cpuid = ncpus; 1416 } else { 1417 const struct tcphdr *th; 1418 1419 th = (const struct tcphdr *) 1420 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)); 1421 *cpuid = tcp_addrcpu(faddr.s_addr, th->th_dport, 1422 ip->ip_src.s_addr, th->th_sport); 1423 } 1424 } 1425 1426 *ip0 = ip; 1427 return notify; 1428 } 1429 1430 void 1431 tcp_ctlinput(netmsg_t msg) 1432 { 1433 int cmd = msg->ctlinput.nm_cmd; 1434 struct sockaddr *sa = msg->ctlinput.nm_arg; 1435 struct ip *ip = msg->ctlinput.nm_extra; 1436 struct in_addr faddr; 1437 inp_notify_t notify; 1438 int arg, cpuid; 1439 1440 notify = tcp_get_inpnotify(cmd, sa, &arg, &ip, &cpuid); 1441 if (notify == NULL) 1442 goto done; 1443 1444 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1445 if (ip != NULL) { 1446 const struct tcphdr *th; 1447 struct inpcb *inp; 1448 1449 if (cpuid != mycpuid) 1450 goto done; 1451 1452 th = (const struct tcphdr *) 1453 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)); 1454 inp = in_pcblookup_hash(&tcbinfo[mycpuid], faddr, th->th_dport, 1455 ip->ip_src, th->th_sport, 0, NULL); 1456 if (inp != NULL && inp->inp_socket != NULL) { 1457 tcp_seq icmpseq = htonl(th->th_seq); 1458 struct tcpcb *tp = intotcpcb(inp); 1459 1460 if (SEQ_GEQ(icmpseq, tp->snd_una) && 1461 SEQ_LT(icmpseq, tp->snd_max)) 1462 notify(inp, arg); 1463 } else { 1464 struct in_conninfo inc; 1465 1466 inc.inc_fport = th->th_dport; 1467 inc.inc_lport = th->th_sport; 1468 inc.inc_faddr = faddr; 1469 inc.inc_laddr = ip->ip_src; 1470 #ifdef INET6 1471 inc.inc_isipv6 = 0; 1472 #endif 1473 syncache_unreach(&inc, th); 1474 } 1475 } else if (msg->ctlinput.nm_direct) { 1476 if (cpuid != ncpus && cpuid != mycpuid) 1477 goto done; 1478 if (mycpuid >= ncpus2) 1479 goto done; 1480 1481 in_pcbnotifyall(&tcbinfo[mycpuid], faddr, arg, notify); 1482 } else { 1483 struct netmsg_tcp_notify *nm; 1484 1485 ASSERT_IN_NETISR(0); 1486 nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT); 1487 netmsg_init(&nm->base, NULL, &netisr_afree_rport, 1488 0, tcp_notifyall_oncpu); 1489 nm->nm_faddr = faddr; 1490 nm->nm_arg = arg; 1491 nm->nm_notify = notify; 1492 1493 lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg); 1494 } 1495 done: 1496 lwkt_replymsg(&msg->lmsg, 0); 1497 } 1498 1499 #ifdef INET6 1500 1501 void 1502 tcp6_ctlinput(netmsg_t msg) 1503 { 1504 int cmd = msg->ctlinput.nm_cmd; 1505 struct sockaddr *sa = msg->ctlinput.nm_arg; 1506 void *d = msg->ctlinput.nm_extra; 1507 struct tcphdr th; 1508 inp_notify_t notify = tcp_notify; 1509 struct ip6_hdr *ip6; 1510 struct mbuf *m; 1511 struct ip6ctlparam *ip6cp = NULL; 1512 const struct sockaddr_in6 *sa6_src = NULL; 1513 int off; 1514 struct tcp_portonly { 1515 u_int16_t th_sport; 1516 u_int16_t th_dport; 1517 } *thp; 1518 int arg; 1519 1520 if (sa->sa_family != AF_INET6 || 1521 sa->sa_len != sizeof(struct sockaddr_in6)) { 1522 goto out; 1523 } 1524 1525 arg = 0; 1526 if (cmd == PRC_QUENCH) 1527 notify = tcp_quench; 1528 else if (cmd == PRC_MSGSIZE) { 1529 struct ip6ctlparam *ip6cp = d; 1530 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; 1531 1532 arg = ntohl(icmp6->icmp6_mtu); 1533 notify = tcp_mtudisc; 1534 } else if (!PRC_IS_REDIRECT(cmd) && 1535 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { 1536 goto out; 1537 } 1538 1539 /* if the parameter is from icmp6, decode it. */ 1540 if (d != NULL) { 1541 ip6cp = (struct ip6ctlparam *)d; 1542 m = ip6cp->ip6c_m; 1543 ip6 = ip6cp->ip6c_ip6; 1544 off = ip6cp->ip6c_off; 1545 sa6_src = ip6cp->ip6c_src; 1546 } else { 1547 m = NULL; 1548 ip6 = NULL; 1549 off = 0; /* fool gcc */ 1550 sa6_src = &sa6_any; 1551 } 1552 1553 if (ip6 != NULL) { 1554 struct in_conninfo inc; 1555 /* 1556 * XXX: We assume that when IPV6 is non NULL, 1557 * M and OFF are valid. 1558 */ 1559 1560 /* check if we can safely examine src and dst ports */ 1561 if (m->m_pkthdr.len < off + sizeof *thp) 1562 goto out; 1563 1564 bzero(&th, sizeof th); 1565 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1566 1567 in6_pcbnotify(&tcbinfo[0], sa, th.th_dport, 1568 (struct sockaddr *)ip6cp->ip6c_src, 1569 th.th_sport, cmd, arg, notify); 1570 1571 inc.inc_fport = th.th_dport; 1572 inc.inc_lport = th.th_sport; 1573 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1574 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1575 inc.inc_isipv6 = 1; 1576 syncache_unreach(&inc, &th); 1577 } else { 1578 in6_pcbnotify(&tcbinfo[0], sa, 0, 1579 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); 1580 } 1581 out: 1582 lwkt_replymsg(&msg->ctlinput.base.lmsg, 0); 1583 } 1584 1585 #endif 1586 1587 /* 1588 * Following is where TCP initial sequence number generation occurs. 1589 * 1590 * There are two places where we must use initial sequence numbers: 1591 * 1. In SYN-ACK packets. 1592 * 2. In SYN packets. 1593 * 1594 * All ISNs for SYN-ACK packets are generated by the syncache. See 1595 * tcp_syncache.c for details. 1596 * 1597 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1598 * depends on this property. In addition, these ISNs should be 1599 * unguessable so as to prevent connection hijacking. To satisfy 1600 * the requirements of this situation, the algorithm outlined in 1601 * RFC 1948 is used to generate sequence numbers. 1602 * 1603 * Implementation details: 1604 * 1605 * Time is based off the system timer, and is corrected so that it 1606 * increases by one megabyte per second. This allows for proper 1607 * recycling on high speed LANs while still leaving over an hour 1608 * before rollover. 1609 * 1610 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1611 * between seeding of isn_secret. This is normally set to zero, 1612 * as reseeding should not be necessary. 1613 * 1614 */ 1615 1616 #define ISN_BYTES_PER_SECOND 1048576 1617 1618 u_char isn_secret[32]; 1619 int isn_last_reseed; 1620 MD5_CTX isn_ctx; 1621 1622 tcp_seq 1623 tcp_new_isn(struct tcpcb *tp) 1624 { 1625 u_int32_t md5_buffer[4]; 1626 tcp_seq new_isn; 1627 1628 /* Seed if this is the first use, reseed if requested. */ 1629 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1630 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1631 < (u_int)ticks))) { 1632 read_random_unlimited(&isn_secret, sizeof isn_secret); 1633 isn_last_reseed = ticks; 1634 } 1635 1636 /* Compute the md5 hash and return the ISN. */ 1637 MD5Init(&isn_ctx); 1638 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); 1639 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); 1640 #ifdef INET6 1641 if (INP_ISIPV6(tp->t_inpcb)) { 1642 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1643 sizeof(struct in6_addr)); 1644 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1645 sizeof(struct in6_addr)); 1646 } else 1647 #endif 1648 { 1649 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1650 sizeof(struct in_addr)); 1651 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1652 sizeof(struct in_addr)); 1653 } 1654 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1655 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1656 new_isn = (tcp_seq) md5_buffer[0]; 1657 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1658 return (new_isn); 1659 } 1660 1661 /* 1662 * When a source quench is received, close congestion window 1663 * to one segment. We will gradually open it again as we proceed. 1664 */ 1665 void 1666 tcp_quench(struct inpcb *inp, int error) 1667 { 1668 struct tcpcb *tp = intotcpcb(inp); 1669 1670 KASSERT(tp != NULL, ("tcp_quench: tp is NULL")); 1671 tp->snd_cwnd = tp->t_maxseg; 1672 tp->snd_wacked = 0; 1673 } 1674 1675 /* 1676 * When a specific ICMP unreachable message is received and the 1677 * connection state is SYN-SENT, drop the connection. This behavior 1678 * is controlled by the icmp_may_rst sysctl. 1679 */ 1680 void 1681 tcp_drop_syn_sent(struct inpcb *inp, int error) 1682 { 1683 struct tcpcb *tp = intotcpcb(inp); 1684 1685 KASSERT(tp != NULL, ("tcp_drop_syn_sent: tp is NULL")); 1686 if (tp->t_state == TCPS_SYN_SENT) 1687 tcp_drop(tp, error); 1688 } 1689 1690 /* 1691 * When a `need fragmentation' ICMP is received, update our idea of the MSS 1692 * based on the new value in the route. Also nudge TCP to send something, 1693 * since we know the packet we just sent was dropped. 1694 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1695 */ 1696 void 1697 tcp_mtudisc(struct inpcb *inp, int mtu) 1698 { 1699 struct tcpcb *tp = intotcpcb(inp); 1700 struct rtentry *rt; 1701 struct socket *so = inp->inp_socket; 1702 int maxopd, mss; 1703 #ifdef INET6 1704 boolean_t isipv6 = INP_ISIPV6(inp); 1705 #else 1706 const boolean_t isipv6 = FALSE; 1707 #endif 1708 1709 KASSERT(tp != NULL, ("tcp_mtudisc: tp is NULL")); 1710 1711 /* 1712 * If no MTU is provided in the ICMP message, use the 1713 * next lower likely value, as specified in RFC 1191. 1714 */ 1715 if (mtu == 0) { 1716 int oldmtu; 1717 1718 oldmtu = tp->t_maxopd + 1719 (isipv6 ? 1720 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1721 sizeof(struct tcpiphdr)); 1722 mtu = ip_next_mtu(oldmtu, 0); 1723 } 1724 1725 if (isipv6) 1726 rt = tcp_rtlookup6(&inp->inp_inc); 1727 else 1728 rt = tcp_rtlookup(&inp->inp_inc); 1729 if (rt != NULL) { 1730 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) 1731 mtu = rt->rt_rmx.rmx_mtu; 1732 1733 maxopd = mtu - 1734 (isipv6 ? 1735 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1736 sizeof(struct tcpiphdr)); 1737 1738 /* 1739 * XXX - The following conditional probably violates the TCP 1740 * spec. The problem is that, since we don't know the 1741 * other end's MSS, we are supposed to use a conservative 1742 * default. But, if we do that, then MTU discovery will 1743 * never actually take place, because the conservative 1744 * default is much less than the MTUs typically seen 1745 * on the Internet today. For the moment, we'll sweep 1746 * this under the carpet. 1747 * 1748 * The conservative default might not actually be a problem 1749 * if the only case this occurs is when sending an initial 1750 * SYN with options and data to a host we've never talked 1751 * to before. Then, they will reply with an MSS value which 1752 * will get recorded and the new parameters should get 1753 * recomputed. For Further Study. 1754 */ 1755 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd) 1756 maxopd = rt->rt_rmx.rmx_mssopt; 1757 } else 1758 maxopd = mtu - 1759 (isipv6 ? 1760 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1761 sizeof(struct tcpiphdr)); 1762 1763 if (tp->t_maxopd <= maxopd) 1764 return; 1765 tp->t_maxopd = maxopd; 1766 1767 mss = maxopd; 1768 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == 1769 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 1770 mss -= TCPOLEN_TSTAMP_APPA; 1771 1772 /* round down to multiple of MCLBYTES */ 1773 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ 1774 if (mss > MCLBYTES) 1775 mss &= ~(MCLBYTES - 1); 1776 #else 1777 if (mss > MCLBYTES) 1778 mss = (mss / MCLBYTES) * MCLBYTES; 1779 #endif 1780 1781 if (so->so_snd.ssb_hiwat < mss) 1782 mss = so->so_snd.ssb_hiwat; 1783 1784 tp->t_maxseg = mss; 1785 tp->t_rtttime = 0; 1786 tp->snd_nxt = tp->snd_una; 1787 tcp_output(tp); 1788 tcpstat.tcps_mturesent++; 1789 } 1790 1791 /* 1792 * Look-up the routing entry to the peer of this inpcb. If no route 1793 * is found and it cannot be allocated the return NULL. This routine 1794 * is called by TCP routines that access the rmx structure and by tcp_mss 1795 * to get the interface MTU. 1796 */ 1797 struct rtentry * 1798 tcp_rtlookup(struct in_conninfo *inc) 1799 { 1800 struct route *ro = &inc->inc_route; 1801 1802 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { 1803 /* No route yet, so try to acquire one */ 1804 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1805 /* 1806 * unused portions of the structure MUST be zero'd 1807 * out because rtalloc() treats it as opaque data 1808 */ 1809 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1810 ro->ro_dst.sa_family = AF_INET; 1811 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1812 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1813 inc->inc_faddr; 1814 rtalloc(ro); 1815 } 1816 } 1817 return (ro->ro_rt); 1818 } 1819 1820 #ifdef INET6 1821 struct rtentry * 1822 tcp_rtlookup6(struct in_conninfo *inc) 1823 { 1824 struct route_in6 *ro6 = &inc->inc6_route; 1825 1826 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { 1827 /* No route yet, so try to acquire one */ 1828 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1829 /* 1830 * unused portions of the structure MUST be zero'd 1831 * out because rtalloc() treats it as opaque data 1832 */ 1833 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1834 ro6->ro_dst.sin6_family = AF_INET6; 1835 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1836 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1837 rtalloc((struct route *)ro6); 1838 } 1839 } 1840 return (ro6->ro_rt); 1841 } 1842 #endif 1843 1844 #ifdef IPSEC 1845 /* compute ESP/AH header size for TCP, including outer IP header. */ 1846 size_t 1847 ipsec_hdrsiz_tcp(struct tcpcb *tp) 1848 { 1849 struct inpcb *inp; 1850 struct mbuf *m; 1851 size_t hdrsiz; 1852 struct ip *ip; 1853 struct tcphdr *th; 1854 1855 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1856 return (0); 1857 MGETHDR(m, M_NOWAIT, MT_DATA); 1858 if (!m) 1859 return (0); 1860 1861 #ifdef INET6 1862 if (INP_ISIPV6(inp)) { 1863 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1864 1865 th = (struct tcphdr *)(ip6 + 1); 1866 m->m_pkthdr.len = m->m_len = 1867 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1868 tcp_fillheaders(tp, ip6, th, FALSE); 1869 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1870 } else 1871 #endif 1872 { 1873 ip = mtod(m, struct ip *); 1874 th = (struct tcphdr *)(ip + 1); 1875 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1876 tcp_fillheaders(tp, ip, th, FALSE); 1877 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1878 } 1879 1880 m_free(m); 1881 return (hdrsiz); 1882 } 1883 #endif 1884 1885 /* 1886 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1887 * 1888 * This code attempts to calculate the bandwidth-delay product as a 1889 * means of determining the optimal window size to maximize bandwidth, 1890 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1891 * routers. This code also does a fairly good job keeping RTTs in check 1892 * across slow links like modems. We implement an algorithm which is very 1893 * similar (but not meant to be) TCP/Vegas. The code operates on the 1894 * transmitter side of a TCP connection and so only effects the transmit 1895 * side of the connection. 1896 * 1897 * BACKGROUND: TCP makes no provision for the management of buffer space 1898 * at the end points or at the intermediate routers and switches. A TCP 1899 * stream, whether using NewReno or not, will eventually buffer as 1900 * many packets as it is able and the only reason this typically works is 1901 * due to the fairly small default buffers made available for a connection 1902 * (typicaly 16K or 32K). As machines use larger windows and/or window 1903 * scaling it is now fairly easy for even a single TCP connection to blow-out 1904 * all available buffer space not only on the local interface, but on 1905 * intermediate routers and switches as well. NewReno makes a misguided 1906 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1907 * then backing off, then steadily increasing the window again until another 1908 * failure occurs, ad-infinitum. This results in terrible oscillation that 1909 * is only made worse as network loads increase and the idea of intentionally 1910 * blowing out network buffers is, frankly, a terrible way to manage network 1911 * resources. 1912 * 1913 * It is far better to limit the transmit window prior to the failure 1914 * condition being achieved. There are two general ways to do this: First 1915 * you can 'scan' through different transmit window sizes and locate the 1916 * point where the RTT stops increasing, indicating that you have filled the 1917 * pipe, then scan backwards until you note that RTT stops decreasing, then 1918 * repeat ad-infinitum. This method works in principle but has severe 1919 * implementation issues due to RTT variances, timer granularity, and 1920 * instability in the algorithm which can lead to many false positives and 1921 * create oscillations as well as interact badly with other TCP streams 1922 * implementing the same algorithm. 1923 * 1924 * The second method is to limit the window to the bandwidth delay product 1925 * of the link. This is the method we implement. RTT variances and our 1926 * own manipulation of the congestion window, bwnd, can potentially 1927 * destabilize the algorithm. For this reason we have to stabilize the 1928 * elements used to calculate the window. We do this by using the minimum 1929 * observed RTT, the long term average of the observed bandwidth, and 1930 * by adding two segments worth of slop. It isn't perfect but it is able 1931 * to react to changing conditions and gives us a very stable basis on 1932 * which to extend the algorithm. 1933 */ 1934 void 1935 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1936 { 1937 u_long bw; 1938 u_long ibw; 1939 u_long bwnd; 1940 int save_ticks; 1941 int delta_ticks; 1942 1943 /* 1944 * If inflight_enable is disabled in the middle of a tcp connection, 1945 * make sure snd_bwnd is effectively disabled. 1946 */ 1947 if (!tcp_inflight_enable) { 1948 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1949 tp->snd_bandwidth = 0; 1950 return; 1951 } 1952 1953 /* 1954 * Validate the delta time. If a connection is new or has been idle 1955 * a long time we have to reset the bandwidth calculator. 1956 */ 1957 save_ticks = ticks; 1958 cpu_ccfence(); 1959 delta_ticks = save_ticks - tp->t_bw_rtttime; 1960 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { 1961 tp->t_bw_rtttime = save_ticks; 1962 tp->t_bw_rtseq = ack_seq; 1963 if (tp->snd_bandwidth == 0) 1964 tp->snd_bandwidth = tcp_inflight_min; 1965 return; 1966 } 1967 1968 /* 1969 * A delta of at least 1 tick is required. Waiting 2 ticks will 1970 * result in better (bw) accuracy. More than that and the ramp-up 1971 * will be too slow. 1972 */ 1973 if (delta_ticks == 0 || delta_ticks == 1) 1974 return; 1975 1976 /* 1977 * Sanity check, plus ignore pure window update acks. 1978 */ 1979 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) 1980 return; 1981 1982 /* 1983 * Figure out the bandwidth. Due to the tick granularity this 1984 * is a very rough number and it MUST be averaged over a fairly 1985 * long period of time. XXX we need to take into account a link 1986 * that is not using all available bandwidth, but for now our 1987 * slop will ramp us up if this case occurs and the bandwidth later 1988 * increases. 1989 */ 1990 ibw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; 1991 tp->t_bw_rtttime = save_ticks; 1992 tp->t_bw_rtseq = ack_seq; 1993 bw = ((int64_t)tp->snd_bandwidth * 15 + ibw) >> 4; 1994 1995 tp->snd_bandwidth = bw; 1996 1997 /* 1998 * Calculate the semi-static bandwidth delay product, plus two maximal 1999 * segments. The additional slop puts us squarely in the sweet 2000 * spot and also handles the bandwidth run-up case. Without the 2001 * slop we could be locking ourselves into a lower bandwidth. 2002 * 2003 * At very high speeds the bw calculation can become overly sensitive 2004 * and error prone when delta_ticks is low (e.g. usually 1). To deal 2005 * with the problem the stab must be scaled to the bw. A stab of 50 2006 * (the default) increases the bw for the purposes of the bwnd 2007 * calculation by 5%. 2008 * 2009 * Situations Handled: 2010 * (1) Prevents over-queueing of packets on LANs, especially on 2011 * high speed LANs, allowing larger TCP buffers to be 2012 * specified, and also does a good job preventing 2013 * over-queueing of packets over choke points like modems 2014 * (at least for the transmit side). 2015 * 2016 * (2) Is able to handle changing network loads (bandwidth 2017 * drops so bwnd drops, bandwidth increases so bwnd 2018 * increases). 2019 * 2020 * (3) Theoretically should stabilize in the face of multiple 2021 * connections implementing the same algorithm (this may need 2022 * a little work). 2023 * 2024 * (4) Stability value (defaults to 20 = 2 maximal packets) can 2025 * be adjusted with a sysctl but typically only needs to be on 2026 * very slow connections. A value no smaller then 5 should 2027 * be used, but only reduce this default if you have no other 2028 * choice. 2029 */ 2030 2031 #define USERTT ((tp->t_srtt + tp->t_rttvar) + tcp_inflight_adjrtt) 2032 bw += bw * tcp_inflight_stab / 1000; 2033 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 2034 (int)tp->t_maxseg * 2; 2035 #undef USERTT 2036 2037 if (tcp_inflight_debug > 0) { 2038 static int ltime; 2039 if ((u_int)(save_ticks - ltime) >= hz / tcp_inflight_debug) { 2040 ltime = save_ticks; 2041 kprintf("%p ibw %ld bw %ld rttvar %d srtt %d " 2042 "bwnd %ld delta %d snd_win %ld\n", 2043 tp, ibw, bw, tp->t_rttvar, tp->t_srtt, 2044 bwnd, delta_ticks, tp->snd_wnd); 2045 } 2046 } 2047 if ((long)bwnd < tcp_inflight_min) 2048 bwnd = tcp_inflight_min; 2049 if (bwnd > tcp_inflight_max) 2050 bwnd = tcp_inflight_max; 2051 if ((long)bwnd < tp->t_maxseg * 2) 2052 bwnd = tp->t_maxseg * 2; 2053 tp->snd_bwnd = bwnd; 2054 } 2055 2056 static void 2057 tcp_rmx_iwsegs(struct tcpcb *tp, u_long *maxsegs, u_long *capsegs) 2058 { 2059 struct rtentry *rt; 2060 struct inpcb *inp = tp->t_inpcb; 2061 #ifdef INET6 2062 boolean_t isipv6 = INP_ISIPV6(inp); 2063 #else 2064 const boolean_t isipv6 = FALSE; 2065 #endif 2066 2067 /* XXX */ 2068 if (tcp_iw_maxsegs < TCP_IW_MAXSEGS_DFLT) 2069 tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT; 2070 if (tcp_iw_capsegs < TCP_IW_CAPSEGS_DFLT) 2071 tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT; 2072 2073 if (isipv6) 2074 rt = tcp_rtlookup6(&inp->inp_inc); 2075 else 2076 rt = tcp_rtlookup(&inp->inp_inc); 2077 if (rt == NULL || 2078 rt->rt_rmx.rmx_iwmaxsegs < TCP_IW_MAXSEGS_DFLT || 2079 rt->rt_rmx.rmx_iwcapsegs < TCP_IW_CAPSEGS_DFLT) { 2080 *maxsegs = tcp_iw_maxsegs; 2081 *capsegs = tcp_iw_capsegs; 2082 return; 2083 } 2084 *maxsegs = rt->rt_rmx.rmx_iwmaxsegs; 2085 *capsegs = rt->rt_rmx.rmx_iwcapsegs; 2086 } 2087 2088 u_long 2089 tcp_initial_window(struct tcpcb *tp) 2090 { 2091 if (tcp_do_rfc3390) { 2092 /* 2093 * RFC3390: 2094 * "If the SYN or SYN/ACK is lost, the initial window 2095 * used by a sender after a correctly transmitted SYN 2096 * MUST be one segment consisting of MSS bytes." 2097 * 2098 * However, we do something a little bit more aggressive 2099 * then RFC3390 here: 2100 * - Only if time spent in the SYN or SYN|ACK retransmition 2101 * >= 3 seconds, the IW is reduced. We do this mainly 2102 * because when RFC3390 is published, the initial RTO is 2103 * still 3 seconds (the threshold we test here), while 2104 * after RFC6298, the initial RTO is 1 second. This 2105 * behaviour probably still falls within the spirit of 2106 * RFC3390. 2107 * - When IW is reduced, 2*MSS is used instead of 1*MSS. 2108 * Mainly to avoid sender and receiver deadlock until 2109 * delayed ACK timer expires. And even RFC2581 does not 2110 * try to reduce IW upon SYN or SYN|ACK retransmition 2111 * timeout. 2112 * 2113 * See also: 2114 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03 2115 */ 2116 if (tp->t_rxtsyn >= TCPTV_RTOBASE3) { 2117 return (2 * tp->t_maxseg); 2118 } else { 2119 u_long maxsegs, capsegs; 2120 2121 tcp_rmx_iwsegs(tp, &maxsegs, &capsegs); 2122 return min(maxsegs * tp->t_maxseg, 2123 max(2 * tp->t_maxseg, capsegs * 1460)); 2124 } 2125 } else { 2126 /* 2127 * Even RFC2581 (back to 1999) allows 2*SMSS IW. 2128 * 2129 * Mainly to avoid sender and receiver deadlock 2130 * until delayed ACK timer expires. 2131 */ 2132 return (2 * tp->t_maxseg); 2133 } 2134 } 2135 2136 #ifdef TCP_SIGNATURE 2137 /* 2138 * Compute TCP-MD5 hash of a TCP segment. (RFC2385) 2139 * 2140 * We do this over ip, tcphdr, segment data, and the key in the SADB. 2141 * When called from tcp_input(), we can be sure that th_sum has been 2142 * zeroed out and verified already. 2143 * 2144 * Return 0 if successful, otherwise return -1. 2145 * 2146 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a 2147 * search with the destination IP address, and a 'magic SPI' to be 2148 * determined by the application. This is hardcoded elsewhere to 1179 2149 * right now. Another branch of this code exists which uses the SPD to 2150 * specify per-application flows but it is unstable. 2151 */ 2152 int 2153 tcpsignature_compute( 2154 struct mbuf *m, /* mbuf chain */ 2155 int len, /* length of TCP data */ 2156 int optlen, /* length of TCP options */ 2157 u_char *buf, /* storage for MD5 digest */ 2158 u_int direction) /* direction of flow */ 2159 { 2160 struct ippseudo ippseudo; 2161 MD5_CTX ctx; 2162 int doff; 2163 struct ip *ip; 2164 struct ipovly *ipovly; 2165 struct secasvar *sav; 2166 struct tcphdr *th; 2167 #ifdef INET6 2168 struct ip6_hdr *ip6; 2169 struct in6_addr in6; 2170 uint32_t plen; 2171 uint16_t nhdr; 2172 #endif /* INET6 */ 2173 u_short savecsum; 2174 2175 KASSERT(m != NULL, ("passed NULL mbuf. Game over.")); 2176 KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature")); 2177 /* 2178 * Extract the destination from the IP header in the mbuf. 2179 */ 2180 ip = mtod(m, struct ip *); 2181 #ifdef INET6 2182 ip6 = NULL; /* Make the compiler happy. */ 2183 #endif /* INET6 */ 2184 /* 2185 * Look up an SADB entry which matches the address found in 2186 * the segment. 2187 */ 2188 switch (IP_VHL_V(ip->ip_vhl)) { 2189 case IPVERSION: 2190 sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, 2191 IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2192 break; 2193 #ifdef INET6 2194 case (IPV6_VERSION >> 4): 2195 ip6 = mtod(m, struct ip6_hdr *); 2196 sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, 2197 IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2198 break; 2199 #endif /* INET6 */ 2200 default: 2201 return (EINVAL); 2202 /* NOTREACHED */ 2203 break; 2204 } 2205 if (sav == NULL) { 2206 kprintf("%s: SADB lookup failed\n", __func__); 2207 return (EINVAL); 2208 } 2209 MD5Init(&ctx); 2210 2211 /* 2212 * Step 1: Update MD5 hash with IP pseudo-header. 2213 * 2214 * XXX The ippseudo header MUST be digested in network byte order, 2215 * or else we'll fail the regression test. Assume all fields we've 2216 * been doing arithmetic on have been in host byte order. 2217 * XXX One cannot depend on ipovly->ih_len here. When called from 2218 * tcp_output(), the underlying ip_len member has not yet been set. 2219 */ 2220 switch (IP_VHL_V(ip->ip_vhl)) { 2221 case IPVERSION: 2222 ipovly = (struct ipovly *)ip; 2223 ippseudo.ippseudo_src = ipovly->ih_src; 2224 ippseudo.ippseudo_dst = ipovly->ih_dst; 2225 ippseudo.ippseudo_pad = 0; 2226 ippseudo.ippseudo_p = IPPROTO_TCP; 2227 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); 2228 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); 2229 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip)); 2230 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen; 2231 break; 2232 #ifdef INET6 2233 /* 2234 * RFC 2385, 2.0 Proposal 2235 * For IPv6, the pseudo-header is as described in RFC 2460, namely the 2236 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero- 2237 * extended next header value (to form 32 bits), and 32-bit segment 2238 * length. 2239 * Note: Upper-Layer Packet Length comes before Next Header. 2240 */ 2241 case (IPV6_VERSION >> 4): 2242 in6 = ip6->ip6_src; 2243 in6_clearscope(&in6); 2244 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); 2245 in6 = ip6->ip6_dst; 2246 in6_clearscope(&in6); 2247 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); 2248 plen = htonl(len + sizeof(struct tcphdr) + optlen); 2249 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t)); 2250 nhdr = 0; 2251 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2252 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2253 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2254 nhdr = IPPROTO_TCP; 2255 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2256 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr)); 2257 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen; 2258 break; 2259 #endif /* INET6 */ 2260 default: 2261 return (EINVAL); 2262 /* NOTREACHED */ 2263 break; 2264 } 2265 /* 2266 * Step 2: Update MD5 hash with TCP header, excluding options. 2267 * The TCP checksum must be set to zero. 2268 */ 2269 savecsum = th->th_sum; 2270 th->th_sum = 0; 2271 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); 2272 th->th_sum = savecsum; 2273 /* 2274 * Step 3: Update MD5 hash with TCP segment data. 2275 * Use m_apply() to avoid an early m_pullup(). 2276 */ 2277 if (len > 0) 2278 m_apply(m, doff, len, tcpsignature_apply, &ctx); 2279 /* 2280 * Step 4: Update MD5 hash with shared secret. 2281 */ 2282 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); 2283 MD5Final(buf, &ctx); 2284 key_sa_recordxfer(sav, m); 2285 key_freesav(sav); 2286 return (0); 2287 } 2288 2289 int 2290 tcpsignature_apply(void *fstate, void *data, unsigned int len) 2291 { 2292 2293 MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len); 2294 return (0); 2295 } 2296 #endif /* TCP_SIGNATURE */ 2297 2298 static void 2299 tcp_drop_sysctl_dispatch(netmsg_t nmsg) 2300 { 2301 struct lwkt_msg *lmsg = &nmsg->lmsg; 2302 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2303 struct sockaddr_storage *addrs = lmsg->u.ms_resultp; 2304 int error; 2305 struct sockaddr_in *fin, *lin; 2306 #ifdef INET6 2307 struct sockaddr_in6 *fin6, *lin6; 2308 struct in6_addr f6, l6; 2309 #endif 2310 struct inpcb *inp; 2311 2312 switch (addrs[0].ss_family) { 2313 #ifdef INET6 2314 case AF_INET6: 2315 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2316 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2317 error = in6_embedscope(&f6, fin6, NULL, NULL); 2318 if (error) 2319 goto done; 2320 error = in6_embedscope(&l6, lin6, NULL, NULL); 2321 if (error) 2322 goto done; 2323 inp = in6_pcblookup_hash(&tcbinfo[mycpuid], &f6, 2324 fin6->sin6_port, &l6, lin6->sin6_port, FALSE, NULL); 2325 break; 2326 #endif 2327 #ifdef INET 2328 case AF_INET: 2329 fin = (struct sockaddr_in *)&addrs[0]; 2330 lin = (struct sockaddr_in *)&addrs[1]; 2331 inp = in_pcblookup_hash(&tcbinfo[mycpuid], fin->sin_addr, 2332 fin->sin_port, lin->sin_addr, lin->sin_port, FALSE, NULL); 2333 break; 2334 #endif 2335 default: 2336 /* 2337 * Must not reach here, since the address family was 2338 * checked in sysctl handler. 2339 */ 2340 panic("unknown address family %d", addrs[0].ss_family); 2341 } 2342 if (inp != NULL) { 2343 struct tcpcb *tp = intotcpcb(inp); 2344 2345 KASSERT((inp->inp_flags & INP_WILDCARD) == 0, 2346 ("in wildcard hash")); 2347 KASSERT(tp != NULL, ("tcp_drop_sysctl_dispatch: tp is NULL")); 2348 KASSERT((tp->t_flags & TF_LISTEN) == 0, ("listen socket")); 2349 tcp_drop(tp, ECONNABORTED); 2350 error = 0; 2351 } else { 2352 error = ESRCH; 2353 } 2354 #ifdef INET6 2355 done: 2356 #endif 2357 lwkt_replymsg(lmsg, error); 2358 } 2359 2360 static int 2361 sysctl_tcp_drop(SYSCTL_HANDLER_ARGS) 2362 { 2363 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2364 struct sockaddr_storage addrs[2]; 2365 struct sockaddr_in *fin, *lin; 2366 #ifdef INET6 2367 struct sockaddr_in6 *fin6, *lin6; 2368 #endif 2369 struct netmsg_base nmsg; 2370 struct lwkt_msg *lmsg = &nmsg.lmsg; 2371 struct lwkt_port *port = NULL; 2372 int error; 2373 2374 fin = lin = NULL; 2375 #ifdef INET6 2376 fin6 = lin6 = NULL; 2377 #endif 2378 error = 0; 2379 2380 if (req->oldptr != NULL || req->oldlen != 0) 2381 return (EINVAL); 2382 if (req->newptr == NULL) 2383 return (EPERM); 2384 if (req->newlen < sizeof(addrs)) 2385 return (ENOMEM); 2386 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 2387 if (error) 2388 return (error); 2389 2390 switch (addrs[0].ss_family) { 2391 #ifdef INET6 2392 case AF_INET6: 2393 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2394 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2395 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 2396 lin6->sin6_len != sizeof(struct sockaddr_in6)) 2397 return (EINVAL); 2398 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr) || 2399 IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 2400 return (EADDRNOTAVAIL); 2401 #if 0 2402 error = sa6_embedscope(fin6, V_ip6_use_defzone); 2403 if (error) 2404 return (error); 2405 error = sa6_embedscope(lin6, V_ip6_use_defzone); 2406 if (error) 2407 return (error); 2408 #endif 2409 port = tcp6_addrport(); 2410 break; 2411 #endif 2412 #ifdef INET 2413 case AF_INET: 2414 fin = (struct sockaddr_in *)&addrs[0]; 2415 lin = (struct sockaddr_in *)&addrs[1]; 2416 if (fin->sin_len != sizeof(struct sockaddr_in) || 2417 lin->sin_len != sizeof(struct sockaddr_in)) 2418 return (EINVAL); 2419 port = tcp_addrport(fin->sin_addr.s_addr, fin->sin_port, 2420 lin->sin_addr.s_addr, lin->sin_port); 2421 break; 2422 #endif 2423 default: 2424 return (EINVAL); 2425 } 2426 2427 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 2428 tcp_drop_sysctl_dispatch); 2429 lmsg->u.ms_resultp = addrs; 2430 return lwkt_domsg(port, lmsg, 0); 2431 } 2432 2433 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, drop, 2434 CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP, NULL, 2435 0, sysctl_tcp_drop, "", "Drop TCP connection"); 2436