1 /* $OpenBSD: tcp_subr.c,v 1.68 2003/07/09 22:03:16 itojun Exp $ */ 2 /* $NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 33 * 34 * NRL grants permission for redistribution and use in source and binary 35 * forms, with or without modification, of the software and documentation 36 * created at NRL provided that the following conditions are met: 37 * 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. All advertising materials mentioning features or use of this software 44 * must display the following acknowledgements: 45 * This product includes software developed by the University of 46 * California, Berkeley and its contributors. 47 * This product includes software developed at the Information 48 * Technology Division, US Naval Research Laboratory. 49 * 4. Neither the name of the NRL nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS 54 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 56 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR 57 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 58 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 59 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 60 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 61 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 62 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 63 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 * 65 * The views and conclusions contained in the software and documentation 66 * are those of the authors and should not be interpreted as representing 67 * official policies, either expressed or implied, of the US Naval 68 * Research Laboratory (NRL). 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/proc.h> 74 #include <sys/mbuf.h> 75 #include <sys/socket.h> 76 #include <sys/socketvar.h> 77 #include <sys/protosw.h> 78 #include <sys/kernel.h> 79 80 #include <net/route.h> 81 #include <net/if.h> 82 83 #include <netinet/in.h> 84 #include <netinet/in_systm.h> 85 #include <netinet/ip.h> 86 #include <netinet/in_pcb.h> 87 #include <netinet/ip_var.h> 88 #include <netinet/ip_icmp.h> 89 #include <netinet/tcp.h> 90 #include <netinet/tcp_fsm.h> 91 #include <netinet/tcp_seq.h> 92 #include <netinet/tcp_timer.h> 93 #include <netinet/tcp_var.h> 94 #include <netinet/tcpip.h> 95 #include <dev/rndvar.h> 96 97 #ifdef INET6 98 #include <netinet6/in6_var.h> 99 #include <netinet6/ip6protosw.h> 100 #endif /* INET6 */ 101 102 #ifdef TCP_SIGNATURE 103 #include <sys/md5k.h> 104 #endif /* TCP_SIGNATURE */ 105 106 /* patchable/settable parameters for tcp */ 107 int tcp_mssdflt = TCP_MSS; 108 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 109 110 /* 111 * Configure kernel with options "TCP_DO_RFC1323=0" to disable RFC1323 stuff. 112 * This is a good idea over slow SLIP/PPP links, because the timestamp 113 * pretty well destroys the VJ compression (any packet with a timestamp 114 * different from the previous one can't be compressed), as well as adding 115 * more overhead. 116 * XXX And it should be a settable per route characteristic (with this just 117 * used as the default). 118 */ 119 #ifndef TCP_DO_RFC1323 120 #define TCP_DO_RFC1323 1 121 #endif 122 int tcp_do_rfc1323 = TCP_DO_RFC1323; 123 124 #ifndef TCP_DO_SACK 125 #ifdef TCP_SACK 126 #define TCP_DO_SACK 1 127 #else 128 #define TCP_DO_SACK 0 129 #endif 130 #endif 131 int tcp_do_sack = TCP_DO_SACK; /* RFC 2018 selective ACKs */ 132 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */ 133 int tcp_do_ecn = 0; /* RFC3168 ECN enabled/disabled? */ 134 135 u_int32_t tcp_now; 136 137 #ifndef TCBHASHSIZE 138 #define TCBHASHSIZE 128 139 #endif 140 int tcbhashsize = TCBHASHSIZE; 141 142 #ifdef INET6 143 extern int ip6_defhlim; 144 #endif /* INET6 */ 145 146 struct pool tcpcb_pool; 147 #ifdef TCP_SACK 148 struct pool sackhl_pool; 149 #endif 150 151 int tcp_freeq(struct tcpcb *); 152 153 struct tcpstat tcpstat; /* tcp statistics */ 154 tcp_seq tcp_iss; 155 156 /* 157 * Tcp initialization 158 */ 159 void 160 tcp_init() 161 { 162 #ifdef TCP_COMPAT_42 163 tcp_iss = 1; /* wrong */ 164 #endif /* TCP_COMPAT_42 */ 165 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl", 166 NULL); 167 #ifdef TCP_SACK 168 pool_init(&sackhl_pool, sizeof(struct sackhole), 0, 0, 0, "sackhlpl", 169 NULL); 170 #endif /* TCP_SACK */ 171 in_pcbinit(&tcbtable, tcbhashsize); 172 tcp_now = arc4random() / 2; 173 174 #ifdef INET6 175 /* 176 * Since sizeof(struct ip6_hdr) > sizeof(struct ip), we 177 * do max length checks/computations only on the former. 178 */ 179 if (max_protohdr < (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) 180 max_protohdr = (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 181 if ((max_linkhdr + sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) > 182 MHLEN) 183 panic("tcp_init"); 184 185 icmp6_mtudisc_callback_register(tcp6_mtudisc_callback); 186 #endif /* INET6 */ 187 188 /* Initialize timer state. */ 189 tcp_timer_init(); 190 } 191 192 /* 193 * Create template to be used to send tcp packets on a connection. 194 * Call after host entry created, allocates an mbuf and fills 195 * in a skeletal tcp/ip header, minimizing the amount of work 196 * necessary when the connection is used. 197 * 198 * To support IPv6 in addition to IPv4 and considering that the sizes of 199 * the IPv4 and IPv6 headers are not the same, we now use a separate pointer 200 * for the TCP header. Also, we made the former tcpiphdr header pointer 201 * into just an IP overlay pointer, with casting as appropriate for v6. rja 202 */ 203 struct mbuf * 204 tcp_template(tp) 205 struct tcpcb *tp; 206 { 207 register struct inpcb *inp = tp->t_inpcb; 208 register struct mbuf *m; 209 register struct tcphdr *th; 210 211 if ((m = tp->t_template) == 0) { 212 m = m_get(M_DONTWAIT, MT_HEADER); 213 if (m == NULL) 214 return (0); 215 216 switch (tp->pf) { 217 case 0: /*default to PF_INET*/ 218 #ifdef INET 219 case AF_INET: 220 m->m_len = sizeof(struct ip); 221 break; 222 #endif /* INET */ 223 #ifdef INET6 224 case AF_INET6: 225 m->m_len = sizeof(struct ip6_hdr); 226 break; 227 #endif /* INET6 */ 228 } 229 m->m_len += sizeof (struct tcphdr); 230 231 /* 232 * The link header, network header, TCP header, and TCP options 233 * all must fit in this mbuf. For now, assume the worst case of 234 * TCP options size. Eventually, compute this from tp flags. 235 */ 236 if (m->m_len + MAX_TCPOPTLEN + max_linkhdr >= MHLEN) { 237 MCLGET(m, M_DONTWAIT); 238 if ((m->m_flags & M_EXT) == 0) { 239 m_free(m); 240 return (0); 241 } 242 } 243 } 244 245 switch(tp->pf) { 246 #ifdef INET 247 case AF_INET: 248 { 249 struct ipovly *ipovly; 250 251 ipovly = mtod(m, struct ipovly *); 252 253 bzero(ipovly->ih_x1, sizeof ipovly->ih_x1); 254 ipovly->ih_pr = IPPROTO_TCP; 255 ipovly->ih_len = htons(sizeof (struct tcphdr)); 256 ipovly->ih_src = inp->inp_laddr; 257 ipovly->ih_dst = inp->inp_faddr; 258 259 th = (struct tcphdr *)(mtod(m, caddr_t) + 260 sizeof(struct ip)); 261 th->th_sum = in_cksum_phdr(ipovly->ih_src.s_addr, 262 ipovly->ih_dst.s_addr, 263 htons(sizeof (struct tcphdr) + IPPROTO_TCP)); 264 } 265 break; 266 #endif /* INET */ 267 #ifdef INET6 268 case AF_INET6: 269 { 270 struct ip6_hdr *ip6; 271 272 ip6 = mtod(m, struct ip6_hdr *); 273 274 ip6->ip6_src = inp->inp_laddr6; 275 ip6->ip6_dst = inp->inp_faddr6; 276 ip6->ip6_flow = htonl(0x60000000) | 277 (inp->inp_ipv6.ip6_flow & htonl(0x0fffffff)); 278 279 ip6->ip6_nxt = IPPROTO_TCP; 280 ip6->ip6_plen = htons(sizeof(struct tcphdr)); /*XXX*/ 281 ip6->ip6_hlim = in6_selecthlim(inp, NULL); /*XXX*/ 282 283 th = (struct tcphdr *)(mtod(m, caddr_t) + 284 sizeof(struct ip6_hdr)); 285 th->th_sum = 0; 286 } 287 break; 288 #endif /* INET6 */ 289 } 290 291 th->th_sport = inp->inp_lport; 292 th->th_dport = inp->inp_fport; 293 th->th_seq = 0; 294 th->th_ack = 0; 295 th->th_x2 = 0; 296 th->th_off = 5; 297 th->th_flags = 0; 298 th->th_win = 0; 299 th->th_urp = 0; 300 return (m); 301 } 302 303 /* 304 * Send a single message to the TCP at address specified by 305 * the given TCP/IP header. If m == 0, then we make a copy 306 * of the tcpiphdr at ti and send directly to the addressed host. 307 * This is used to force keep alive messages out using the TCP 308 * template for a connection tp->t_template. If flags are given 309 * then we send a message back to the TCP which originated the 310 * segment ti, and discard the mbuf containing it and any other 311 * attached mbufs. 312 * 313 * In any case the ack and sequence number of the transmitted 314 * segment are as specified by the parameters. 315 */ 316 #ifdef INET6 317 /* This function looks hairy, because it was so IPv4-dependent. */ 318 #endif /* INET6 */ 319 void 320 tcp_respond(tp, template, m, ack, seq, flags) 321 struct tcpcb *tp; 322 caddr_t template; 323 register struct mbuf *m; 324 tcp_seq ack, seq; 325 int flags; 326 { 327 register int tlen; 328 int win = 0; 329 struct route *ro = 0; 330 register struct tcphdr *th; 331 register struct tcpiphdr *ti = (struct tcpiphdr *)template; 332 int af; /* af on wire */ 333 334 if (tp) { 335 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 336 /* 337 * If this is called with an unconnected 338 * socket/tp/pcb (tp->pf is 0), we lose. 339 */ 340 af = tp->pf; 341 342 /* 343 * The route/route6 distinction is meaningless 344 * unless you're allocating space or passing parameters. 345 */ 346 ro = &tp->t_inpcb->inp_route; 347 } else 348 af = (((struct ip *)ti)->ip_v == 6) ? AF_INET6 : AF_INET; 349 if (m == 0) { 350 m = m_gethdr(M_DONTWAIT, MT_HEADER); 351 if (m == NULL) 352 return; 353 #ifdef TCP_COMPAT_42 354 tlen = 1; 355 #else 356 tlen = 0; 357 #endif 358 m->m_data += max_linkhdr; 359 switch (af) { 360 #ifdef INET6 361 case AF_INET6: 362 bcopy(ti, mtod(m, caddr_t), sizeof(struct tcphdr) + 363 sizeof(struct ip6_hdr)); 364 break; 365 #endif /* INET6 */ 366 case AF_INET: 367 bcopy(ti, mtod(m, caddr_t), sizeof(struct tcphdr) + 368 sizeof(struct ip)); 369 break; 370 } 371 372 ti = mtod(m, struct tcpiphdr *); 373 flags = TH_ACK; 374 } else { 375 m_freem(m->m_next); 376 m->m_next = 0; 377 m->m_data = (caddr_t)ti; 378 tlen = 0; 379 #define xchg(a,b,type) do { type t; t=a; a=b; b=t; } while (0) 380 switch (af) { 381 #ifdef INET6 382 case AF_INET6: 383 m->m_len = sizeof(struct tcphdr) + sizeof(struct ip6_hdr); 384 xchg(((struct ip6_hdr *)ti)->ip6_dst, 385 ((struct ip6_hdr *)ti)->ip6_src, struct in6_addr); 386 th = (void *)((caddr_t)ti + sizeof(struct ip6_hdr)); 387 break; 388 #endif /* INET6 */ 389 case AF_INET: 390 m->m_len = sizeof (struct tcpiphdr); 391 xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_int32_t); 392 th = (void *)((caddr_t)ti + sizeof(struct ip)); 393 break; 394 } 395 xchg(th->th_dport, th->th_sport, u_int16_t); 396 #undef xchg 397 } 398 switch (af) { 399 #ifdef INET6 400 case AF_INET6: 401 tlen += sizeof(struct tcphdr) + sizeof(struct ip6_hdr); 402 th = (struct tcphdr *)((caddr_t)ti + sizeof(struct ip6_hdr)); 403 break; 404 #endif /* INET6 */ 405 case AF_INET: 406 ti->ti_len = htons((u_int16_t)(sizeof (struct tcphdr) + tlen)); 407 tlen += sizeof (struct tcpiphdr); 408 th = (struct tcphdr *)((caddr_t)ti + sizeof(struct ip)); 409 break; 410 } 411 412 m->m_len = tlen; 413 m->m_pkthdr.len = tlen; 414 m->m_pkthdr.rcvif = (struct ifnet *) 0; 415 th->th_seq = htonl(seq); 416 th->th_ack = htonl(ack); 417 th->th_x2 = 0; 418 th->th_off = sizeof (struct tcphdr) >> 2; 419 th->th_flags = flags; 420 if (tp) 421 win >>= tp->rcv_scale; 422 if (win > TCP_MAXWIN) 423 win = TCP_MAXWIN; 424 th->th_win = htons((u_int16_t)win); 425 th->th_urp = 0; 426 427 switch (af) { 428 #ifdef INET6 429 case AF_INET6: 430 ((struct ip6_hdr *)ti)->ip6_flow = htonl(0x60000000); 431 ((struct ip6_hdr *)ti)->ip6_nxt = IPPROTO_TCP; 432 ((struct ip6_hdr *)ti)->ip6_hlim = 433 in6_selecthlim(tp ? tp->t_inpcb : NULL, NULL); /*XXX*/ 434 ((struct ip6_hdr *)ti)->ip6_plen = tlen - sizeof(struct ip6_hdr); 435 th->th_sum = 0; 436 th->th_sum = in6_cksum(m, IPPROTO_TCP, 437 sizeof(struct ip6_hdr), ((struct ip6_hdr *)ti)->ip6_plen); 438 HTONS(((struct ip6_hdr *)ti)->ip6_plen); 439 ip6_output(m, tp ? tp->t_inpcb->inp_outputopts6 : NULL, 440 (struct route_in6 *)ro, 0, NULL, NULL); 441 break; 442 #endif /* INET6 */ 443 case AF_INET: 444 bzero(ti->ti_x1, sizeof ti->ti_x1); 445 ti->ti_len = htons((u_short)tlen - sizeof(struct ip)); 446 447 /* 448 * There's no point deferring to hardware checksum processing 449 * here, as we only send a minimal TCP packet whose checksum 450 * we need to compute in any case. 451 */ 452 th->th_sum = 0; 453 th->th_sum = in_cksum(m, tlen); 454 ((struct ip *)ti)->ip_len = htons(tlen); 455 ((struct ip *)ti)->ip_ttl = ip_defttl; 456 ip_output(m, (void *)NULL, ro, ip_mtudisc ? IP_MTUDISC : 0, 457 (void *)NULL, tp ? tp->t_inpcb : (void *)NULL); 458 } 459 } 460 461 /* 462 * Create a new TCP control block, making an 463 * empty reassembly queue and hooking it to the argument 464 * protocol control block. 465 */ 466 struct tcpcb * 467 tcp_newtcpcb(struct inpcb *inp) 468 { 469 struct tcpcb *tp; 470 int i; 471 472 tp = pool_get(&tcpcb_pool, PR_NOWAIT); 473 if (tp == NULL) 474 return ((struct tcpcb *)0); 475 bzero((char *) tp, sizeof(struct tcpcb)); 476 LIST_INIT(&tp->segq); 477 tp->t_maxseg = tcp_mssdflt; 478 tp->t_maxopd = 0; 479 480 TCP_INIT_DELACK(tp); 481 for (i = 0; i < TCPT_NTIMERS; i++) 482 TCP_TIMER_INIT(tp, i); 483 484 #ifdef TCP_SACK 485 tp->sack_disable = tcp_do_sack ? 0 : 1; 486 #endif 487 tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0; 488 tp->t_inpcb = inp; 489 /* 490 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 491 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives 492 * reasonable initial retransmit time. 493 */ 494 tp->t_srtt = TCPTV_SRTTBASE; 495 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1); 496 tp->t_rttmin = TCPTV_MIN; 497 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 498 TCPTV_MIN, TCPTV_REXMTMAX); 499 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 500 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 501 #ifdef INET6 502 /* we disallow IPv4 mapped address completely. */ 503 if ((inp->inp_flags & INP_IPV6) == 0) 504 tp->pf = PF_INET; 505 else 506 tp->pf = PF_INET6; 507 #else 508 tp->pf = PF_INET; 509 #endif 510 511 #ifdef INET6 512 if (inp->inp_flags & INP_IPV6) 513 inp->inp_ipv6.ip6_hlim = ip6_defhlim; 514 else 515 #endif /* INET6 */ 516 inp->inp_ip.ip_ttl = ip_defttl; 517 518 inp->inp_ppcb = (caddr_t)tp; 519 return (tp); 520 } 521 522 /* 523 * Drop a TCP connection, reporting 524 * the specified error. If connection is synchronized, 525 * then send a RST to peer. 526 */ 527 struct tcpcb * 528 tcp_drop(tp, errno) 529 register struct tcpcb *tp; 530 int errno; 531 { 532 struct socket *so = tp->t_inpcb->inp_socket; 533 534 if (TCPS_HAVERCVDSYN(tp->t_state)) { 535 tp->t_state = TCPS_CLOSED; 536 (void) tcp_output(tp); 537 tcpstat.tcps_drops++; 538 } else 539 tcpstat.tcps_conndrops++; 540 if (errno == ETIMEDOUT && tp->t_softerror) 541 errno = tp->t_softerror; 542 so->so_error = errno; 543 return (tcp_close(tp)); 544 } 545 546 /* 547 * Close a TCP control block: 548 * discard all space held by the tcp 549 * discard internet protocol block 550 * wake up any sleepers 551 */ 552 struct tcpcb * 553 tcp_close(struct tcpcb *tp) 554 { 555 struct inpcb *inp = tp->t_inpcb; 556 struct socket *so = inp->inp_socket; 557 #ifdef TCP_SACK 558 struct sackhole *p, *q; 559 #endif 560 #ifdef RTV_RTT 561 register struct rtentry *rt; 562 #ifdef INET6 563 register int bound_to_specific = 0; /* I.e. non-default */ 564 565 /* 566 * This code checks the nature of the route for this connection. 567 * Normally this is done by two simple checks in the next 568 * INET/INET6 ifdef block, but because of two possible lower layers, 569 * that check is done here. 570 * 571 * Perhaps should be doing this only for a RTF_HOST route. 572 */ 573 rt = inp->inp_route.ro_rt; /* Same for route or route6. */ 574 if (tp->pf == PF_INET6) { 575 if (rt) 576 bound_to_specific = 577 !(IN6_IS_ADDR_UNSPECIFIED(& 578 ((struct sockaddr_in6 *)rt_key(rt))->sin6_addr)); 579 } else { 580 if (rt) 581 bound_to_specific = 582 (((struct sockaddr_in *)rt_key(rt))-> 583 sin_addr.s_addr != INADDR_ANY); 584 } 585 #endif /* INET6 */ 586 587 /* 588 * If we sent enough data to get some meaningful characteristics, 589 * save them in the routing entry. 'Enough' is arbitrarily 590 * defined as the sendpipesize (default 4K) * 16. This would 591 * give us 16 rtt samples assuming we only get one sample per 592 * window (the usual case on a long haul net). 16 samples is 593 * enough for the srtt filter to converge to within 5% of the correct 594 * value; fewer samples and we could save a very bogus rtt. 595 * 596 * Don't update the default route's characteristics and don't 597 * update anything that the user "locked". 598 */ 599 #ifdef INET6 600 /* 601 * Note that rt and bound_to_specific are set above. 602 */ 603 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) && 604 rt && bound_to_specific) { 605 #else /* INET6 */ 606 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) && 607 (rt = inp->inp_route.ro_rt) && 608 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY) { 609 #endif /* INET6 */ 610 register u_long i = 0; 611 612 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 613 i = tp->t_srtt * 614 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE)); 615 if (rt->rt_rmx.rmx_rtt && i) 616 /* 617 * filter this update to half the old & half 618 * the new values, converting scale. 619 * See route.h and tcp_var.h for a 620 * description of the scaling constants. 621 */ 622 rt->rt_rmx.rmx_rtt = 623 (rt->rt_rmx.rmx_rtt + i) / 2; 624 else 625 rt->rt_rmx.rmx_rtt = i; 626 } 627 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 628 i = tp->t_rttvar * 629 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE)); 630 if (rt->rt_rmx.rmx_rttvar && i) 631 rt->rt_rmx.rmx_rttvar = 632 (rt->rt_rmx.rmx_rttvar + i) / 2; 633 else 634 rt->rt_rmx.rmx_rttvar = i; 635 } 636 /* 637 * update the pipelimit (ssthresh) if it has been updated 638 * already or if a pipesize was specified & the threshhold 639 * got below half the pipesize. I.e., wait for bad news 640 * before we start updating, then update on both good 641 * and bad news. 642 */ 643 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 644 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) || 645 i < (rt->rt_rmx.rmx_sendpipe / 2)) { 646 /* 647 * convert the limit from user data bytes to 648 * packets then to packet data bytes. 649 */ 650 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 651 if (i < 2) 652 i = 2; 653 #ifdef INET6 654 if (tp->pf == PF_INET6) 655 i *= (u_long)(tp->t_maxseg + sizeof (struct tcphdr) 656 + sizeof(struct ip6_hdr)); 657 else 658 #endif /* INET6 */ 659 i *= (u_long)(tp->t_maxseg + 660 sizeof (struct tcpiphdr)); 661 662 if (rt->rt_rmx.rmx_ssthresh) 663 rt->rt_rmx.rmx_ssthresh = 664 (rt->rt_rmx.rmx_ssthresh + i) / 2; 665 else 666 rt->rt_rmx.rmx_ssthresh = i; 667 } 668 } 669 #endif /* RTV_RTT */ 670 671 /* free the reassembly queue, if any */ 672 tcp_freeq(tp); 673 674 tcp_canceltimers(tp); 675 TCP_CLEAR_DELACK(tp); 676 677 #ifdef TCP_SACK 678 /* Free SACK holes. */ 679 q = p = tp->snd_holes; 680 while (p != 0) { 681 q = p->next; 682 pool_put(&sackhl_pool, p); 683 p = q; 684 } 685 #endif 686 if (tp->t_template) 687 (void) m_free(tp->t_template); 688 pool_put(&tcpcb_pool, tp); 689 inp->inp_ppcb = 0; 690 soisdisconnected(so); 691 in_pcbdetach(inp); 692 tcpstat.tcps_closed++; 693 return ((struct tcpcb *)0); 694 } 695 696 int 697 tcp_freeq(struct tcpcb *tp) 698 { 699 struct ipqent *qe; 700 int rv = 0; 701 702 while ((qe = LIST_FIRST(&tp->segq)) != NULL) { 703 LIST_REMOVE(qe, ipqe_q); 704 m_freem(qe->ipqe_m); 705 pool_put(&ipqent_pool, qe); 706 rv = 1; 707 } 708 return (rv); 709 } 710 711 void 712 tcp_drain() 713 { 714 715 } 716 717 /* 718 * Compute proper scaling value for receiver window from buffer space 719 */ 720 721 void 722 tcp_rscale(struct tcpcb *tp, u_long hiwat) 723 { 724 tp->request_r_scale = 0; 725 while (tp->request_r_scale < TCP_MAX_WINSHIFT && 726 TCP_MAXWIN << tp->request_r_scale < hiwat) 727 tp->request_r_scale++; 728 } 729 730 /* 731 * Notify a tcp user of an asynchronous error; 732 * store error as soft error, but wake up user 733 * (for now, won't do anything until can select for soft error). 734 */ 735 void 736 tcp_notify(inp, error) 737 struct inpcb *inp; 738 int error; 739 { 740 register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 741 register struct socket *so = inp->inp_socket; 742 743 /* 744 * Ignore some errors if we are hooked up. 745 * If connection hasn't completed, has retransmitted several times, 746 * and receives a second error, give up now. This is better 747 * than waiting a long time to establish a connection that 748 * can never complete. 749 */ 750 if (tp->t_state == TCPS_ESTABLISHED && 751 (error == EHOSTUNREACH || error == ENETUNREACH || 752 error == EHOSTDOWN)) { 753 return; 754 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 && 755 tp->t_rxtshift > 3 && tp->t_softerror) 756 so->so_error = error; 757 else 758 tp->t_softerror = error; 759 wakeup((caddr_t) &so->so_timeo); 760 sorwakeup(so); 761 sowwakeup(so); 762 } 763 764 #ifdef INET6 765 void 766 tcp6_ctlinput(cmd, sa, d) 767 int cmd; 768 struct sockaddr *sa; 769 void *d; 770 { 771 struct tcphdr th; 772 void (*notify)(struct inpcb *, int) = tcp_notify; 773 struct ip6_hdr *ip6; 774 const struct sockaddr_in6 *sa6_src = NULL; 775 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa; 776 struct mbuf *m; 777 int off; 778 struct { 779 u_int16_t th_sport; 780 u_int16_t th_dport; 781 } *thp; 782 783 if (sa->sa_family != AF_INET6 || 784 sa->sa_len != sizeof(struct sockaddr_in6)) 785 return; 786 if ((unsigned)cmd >= PRC_NCMDS) 787 return; 788 else if (cmd == PRC_QUENCH) { 789 /* XXX there's no PRC_QUENCH in IPv6 */ 790 notify = tcp_quench; 791 } else if (PRC_IS_REDIRECT(cmd)) 792 notify = in_rtchange, d = NULL; 793 else if (cmd == PRC_MSGSIZE) 794 ; /* special code is present, see below */ 795 else if (cmd == PRC_HOSTDEAD) 796 d = NULL; 797 else if (inet6ctlerrmap[cmd] == 0) 798 return; 799 800 /* if the parameter is from icmp6, decode it. */ 801 if (d != NULL) { 802 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d; 803 m = ip6cp->ip6c_m; 804 ip6 = ip6cp->ip6c_ip6; 805 off = ip6cp->ip6c_off; 806 sa6_src = ip6cp->ip6c_src; 807 } else { 808 m = NULL; 809 ip6 = NULL; 810 sa6_src = &sa6_any; 811 } 812 813 if (ip6) { 814 /* 815 * XXX: We assume that when ip6 is non NULL, 816 * M and OFF are valid. 817 */ 818 819 /* check if we can safely examine src and dst ports */ 820 if (m->m_pkthdr.len < off + sizeof(*thp)) 821 return; 822 823 bzero(&th, sizeof(th)); 824 #ifdef DIAGNOSTIC 825 if (sizeof(*thp) > sizeof(th)) 826 panic("assumption failed in tcp6_ctlinput"); 827 #endif 828 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 829 830 if (cmd == PRC_MSGSIZE) { 831 int valid = 0; 832 833 /* 834 * Check to see if we have a valid TCP connection 835 * corresponding to the address in the ICMPv6 message 836 * payload. 837 */ 838 if (in6_pcbhashlookup(&tcbtable, &sa6->sin6_addr, 839 th.th_dport, (struct in6_addr *)&sa6_src->sin6_addr, 840 th.th_sport)) 841 valid++; 842 else if (in_pcblookup(&tcbtable, &sa6->sin6_addr, 843 th.th_dport, (struct in6_addr *)&sa6_src->sin6_addr, 844 th.th_sport, INPLOOKUP_IPV6)) 845 valid++; 846 847 /* 848 * Depending on the value of "valid" and routing table 849 * size (mtudisc_{hi,lo}wat), we will: 850 * - recalcurate the new MTU and create the 851 * corresponding routing entry, or 852 * - ignore the MTU change notification. 853 */ 854 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid); 855 856 return; 857 } 858 859 (void) in6_pcbnotify(&tcbtable, sa, th.th_dport, 860 (struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify); 861 } else { 862 (void) in6_pcbnotify(&tcbtable, sa, 0, 863 (struct sockaddr *)sa6_src, 0, cmd, NULL, notify); 864 } 865 } 866 #endif 867 868 void * 869 tcp_ctlinput(cmd, sa, v) 870 int cmd; 871 struct sockaddr *sa; 872 register void *v; 873 { 874 register struct ip *ip = v; 875 register struct tcphdr *th; 876 extern int inetctlerrmap[]; 877 void (*notify)(struct inpcb *, int) = tcp_notify; 878 int errno; 879 880 if (sa->sa_family != AF_INET) 881 return NULL; 882 883 if ((unsigned)cmd >= PRC_NCMDS) 884 return NULL; 885 errno = inetctlerrmap[cmd]; 886 if (cmd == PRC_QUENCH) 887 notify = tcp_quench; 888 else if (PRC_IS_REDIRECT(cmd)) 889 notify = in_rtchange, ip = 0; 890 else if (cmd == PRC_MSGSIZE && ip_mtudisc) { 891 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 892 /* 893 * Verify that the packet in the icmp payload refers 894 * to an existing TCP connection. 895 */ 896 if (in_pcblookup(&tcbtable, 897 &ip->ip_dst, th->th_dport, 898 &ip->ip_src, th->th_sport, 899 INPLOOKUP_WILDCARD)) { 900 struct icmp *icp; 901 icp = (struct icmp *)((caddr_t)ip - 902 offsetof(struct icmp, icmp_ip)); 903 904 /* Calculate new mtu and create corresponding route */ 905 icmp_mtudisc(icp); 906 } 907 notify = tcp_mtudisc, ip = 0; 908 } else if (cmd == PRC_MTUINC) 909 notify = tcp_mtudisc_increase, ip = 0; 910 else if (cmd == PRC_HOSTDEAD) 911 ip = 0; 912 else if (errno == 0) 913 return NULL; 914 915 if (ip) { 916 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 917 in_pcbnotify(&tcbtable, sa, th->th_dport, ip->ip_src, 918 th->th_sport, errno, notify); 919 } else 920 in_pcbnotifyall(&tcbtable, sa, errno, notify); 921 922 return NULL; 923 } 924 925 /* 926 * When a source quench is received, close congestion window 927 * to one segment. We will gradually open it again as we proceed. 928 */ 929 void 930 tcp_quench(inp, errno) 931 struct inpcb *inp; 932 int errno; 933 { 934 struct tcpcb *tp = intotcpcb(inp); 935 936 if (tp) 937 tp->snd_cwnd = tp->t_maxseg; 938 } 939 940 #ifdef INET6 941 /* 942 * Path MTU Discovery handlers. 943 */ 944 void 945 tcp6_mtudisc_callback(faddr) 946 struct in6_addr *faddr; 947 { 948 struct sockaddr_in6 sin6; 949 950 bzero(&sin6, sizeof(sin6)); 951 sin6.sin6_family = AF_INET6; 952 sin6.sin6_len = sizeof(struct sockaddr_in6); 953 sin6.sin6_addr = *faddr; 954 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0, 955 (struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp_mtudisc); 956 } 957 #endif /* INET6 */ 958 959 /* 960 * On receipt of path MTU corrections, flush old route and replace it 961 * with the new one. Retransmit all unacknowledged packets, to ensure 962 * that all packets will be received. 963 */ 964 void 965 tcp_mtudisc(inp, errno) 966 struct inpcb *inp; 967 int errno; 968 { 969 struct tcpcb *tp = intotcpcb(inp); 970 struct rtentry *rt = in_pcbrtentry(inp); 971 972 if (tp != 0) { 973 if (rt != 0) { 974 /* 975 * If this was not a host route, remove and realloc. 976 */ 977 if ((rt->rt_flags & RTF_HOST) == 0) { 978 in_rtchange(inp, errno); 979 if ((rt = in_pcbrtentry(inp)) == 0) 980 return; 981 } 982 983 if (rt->rt_rmx.rmx_mtu != 0) { 984 /* also takes care of congestion window */ 985 tcp_mss(tp, -1); 986 } 987 } 988 989 /* 990 * Resend unacknowledged packets. 991 */ 992 tp->snd_nxt = tp->snd_una; 993 tcp_output(tp); 994 } 995 } 996 997 void 998 tcp_mtudisc_increase(inp, errno) 999 struct inpcb *inp; 1000 int errno; 1001 { 1002 struct tcpcb *tp = intotcpcb(inp); 1003 struct rtentry *rt = in_pcbrtentry(inp); 1004 1005 if (tp != 0 && rt != 0) { 1006 /* 1007 * If this was a host route, remove and realloc. 1008 */ 1009 if (rt->rt_flags & RTF_HOST) 1010 in_rtchange(inp, errno); 1011 1012 /* also takes care of congestion window */ 1013 tcp_mss(tp, -1); 1014 } 1015 } 1016 1017 #ifdef TCP_SIGNATURE 1018 int 1019 tcp_signature_tdb_attach() 1020 { 1021 return (0); 1022 } 1023 1024 int 1025 tcp_signature_tdb_init(tdbp, xsp, ii) 1026 struct tdb *tdbp; 1027 struct xformsw *xsp; 1028 struct ipsecinit *ii; 1029 { 1030 char *c; 1031 #define isdigit(c) (((c) >= '0') && ((c) <= '9')) 1032 #define isalpha(c) ( (((c) >= 'A') && ((c) <= 'Z')) || \ 1033 (((c) >= 'a') && ((c) <= 'z')) ) 1034 1035 if ((ii->ii_authkeylen < 1) || (ii->ii_authkeylen > 80)) 1036 return (EINVAL); 1037 1038 c = (char *)ii->ii_authkey; 1039 1040 while (c < (char *)ii->ii_authkey + ii->ii_authkeylen - 1) { 1041 if (isdigit(*c)) { 1042 if (*(c + 1) == ' ') 1043 return (EINVAL); 1044 } else { 1045 if (!isalpha(*c)) 1046 return (EINVAL); 1047 } 1048 1049 c++; 1050 } 1051 1052 if (!isdigit(*c) && !isalpha(*c)) 1053 return (EINVAL); 1054 1055 tdbp->tdb_amxkey = malloc(ii->ii_authkeylen, M_XDATA, M_DONTWAIT); 1056 if (tdbp->tdb_amxkey == NULL) 1057 return (ENOMEM); 1058 bcopy(ii->ii_authkey, tdbp->tdb_amxkey, ii->ii_authkeylen); 1059 tdbp->tdb_amxkeylen = ii->ii_authkeylen; 1060 1061 return (0); 1062 } 1063 1064 int 1065 tcp_signature_tdb_zeroize(tdbp) 1066 struct tdb *tdbp; 1067 { 1068 if (tdbp->tdb_amxkey) { 1069 bzero(tdbp->tdb_amxkey, tdbp->tdb_amxkeylen); 1070 free(tdbp->tdb_amxkey, M_XDATA); 1071 tdbp->tdb_amxkey = NULL; 1072 } 1073 1074 return (0); 1075 } 1076 1077 int 1078 tcp_signature_tdb_input(m, tdbp, skip, protoff) 1079 struct mbuf *m; 1080 struct tdb *tdbp; 1081 int skip, protoff; 1082 { 1083 return (0); 1084 } 1085 1086 int 1087 tcp_signature_tdb_output(m, tdbp, mp, skip, protoff) 1088 struct mbuf *m; 1089 struct tdb *tdbp; 1090 struct mbuf **mp; 1091 int skip, protoff; 1092 { 1093 return (EINVAL); 1094 } 1095 1096 int 1097 tcp_signature_apply(fstate, data, len) 1098 caddr_t fstate; 1099 caddr_t data; 1100 unsigned int len; 1101 { 1102 MD5Update((MD5_CTX *)fstate, (char *)data, len); 1103 return 0; 1104 } 1105 #endif /* TCP_SIGNATURE */ 1106 1107 #define TCP_RNDISS_ROUNDS 16 1108 #define TCP_RNDISS_OUT 7200 1109 #define TCP_RNDISS_MAX 30000 1110 1111 u_int8_t tcp_rndiss_sbox[128]; 1112 u_int16_t tcp_rndiss_msb; 1113 u_int16_t tcp_rndiss_cnt; 1114 long tcp_rndiss_reseed; 1115 1116 u_int16_t 1117 tcp_rndiss_encrypt(val) 1118 u_int16_t val; 1119 { 1120 u_int16_t sum = 0, i; 1121 1122 for (i = 0; i < TCP_RNDISS_ROUNDS; i++) { 1123 sum += 0x79b9; 1124 val ^= ((u_int16_t)tcp_rndiss_sbox[(val^sum) & 0x7f]) << 7; 1125 val = ((val & 0xff) << 7) | (val >> 8); 1126 } 1127 1128 return val; 1129 } 1130 1131 void 1132 tcp_rndiss_init() 1133 { 1134 get_random_bytes(tcp_rndiss_sbox, sizeof(tcp_rndiss_sbox)); 1135 1136 tcp_rndiss_reseed = time.tv_sec + TCP_RNDISS_OUT; 1137 tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000; 1138 tcp_rndiss_cnt = 0; 1139 } 1140 1141 tcp_seq 1142 tcp_rndiss_next() 1143 { 1144 if (tcp_rndiss_cnt >= TCP_RNDISS_MAX || 1145 time.tv_sec > tcp_rndiss_reseed) 1146 tcp_rndiss_init(); 1147 1148 /* (arc4random() & 0x7fff) ensures a 32768 byte gap between ISS */ 1149 return ((tcp_rndiss_encrypt(tcp_rndiss_cnt++) | tcp_rndiss_msb) <<16) | 1150 (arc4random() & 0x7fff); 1151 } 1152 1153