1 /* $NetBSD: tcp_subr.c,v 1.28 1997/09/22 21:50:02 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)tcp_subr.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/protosw.h> 46 #include <sys/errno.h> 47 #include <sys/kernel.h> 48 49 #include <net/route.h> 50 #include <net/if.h> 51 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/ip.h> 55 #include <netinet/in_pcb.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/ip_icmp.h> 58 #include <netinet/tcp.h> 59 #include <netinet/tcp_fsm.h> 60 #include <netinet/tcp_seq.h> 61 #include <netinet/tcp_timer.h> 62 #include <netinet/tcp_var.h> 63 #include <netinet/tcpip.h> 64 65 /* patchable/settable parameters for tcp */ 66 int tcp_mssdflt = TCP_MSS; 67 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 68 int tcp_do_rfc1323 = 1; 69 70 #ifndef TCBHASHSIZE 71 #define TCBHASHSIZE 128 72 #endif 73 int tcbhashsize = TCBHASHSIZE; 74 75 /* 76 * Tcp initialization 77 */ 78 void 79 tcp_init() 80 { 81 82 tcp_iss = 1; /* XXX wrong */ 83 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize); 84 if (max_protohdr < sizeof(struct tcpiphdr)) 85 max_protohdr = sizeof(struct tcpiphdr); 86 if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN) 87 panic("tcp_init"); 88 } 89 90 /* 91 * Create template to be used to send tcp packets on a connection. 92 * Call after host entry created, allocates an mbuf and fills 93 * in a skeletal tcp/ip header, minimizing the amount of work 94 * necessary when the connection is used. 95 */ 96 struct tcpiphdr * 97 tcp_template(tp) 98 struct tcpcb *tp; 99 { 100 register struct inpcb *inp = tp->t_inpcb; 101 register struct tcpiphdr *n; 102 103 if ((n = tp->t_template) == 0) { 104 MALLOC(n, struct tcpiphdr *, sizeof (struct tcpiphdr), 105 M_MBUF, M_NOWAIT); 106 if (n == NULL) 107 return (0); 108 } 109 bzero(n->ti_x1, sizeof n->ti_x1); 110 n->ti_pr = IPPROTO_TCP; 111 n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip)); 112 n->ti_src = inp->inp_laddr; 113 n->ti_dst = inp->inp_faddr; 114 n->ti_sport = inp->inp_lport; 115 n->ti_dport = inp->inp_fport; 116 n->ti_seq = 0; 117 n->ti_ack = 0; 118 n->ti_x2 = 0; 119 n->ti_off = 5; 120 n->ti_flags = 0; 121 n->ti_win = 0; 122 n->ti_sum = 0; 123 n->ti_urp = 0; 124 return (n); 125 } 126 127 /* 128 * Send a single message to the TCP at address specified by 129 * the given TCP/IP header. If m == 0, then we make a copy 130 * of the tcpiphdr at ti and send directly to the addressed host. 131 * This is used to force keep alive messages out using the TCP 132 * template for a connection tp->t_template. If flags are given 133 * then we send a message back to the TCP which originated the 134 * segment ti, and discard the mbuf containing it and any other 135 * attached mbufs. 136 * 137 * In any case the ack and sequence number of the transmitted 138 * segment are as specified by the parameters. 139 */ 140 int 141 tcp_respond(tp, ti, m, ack, seq, flags) 142 struct tcpcb *tp; 143 register struct tcpiphdr *ti; 144 register struct mbuf *m; 145 tcp_seq ack, seq; 146 int flags; 147 { 148 register int tlen; 149 int win = 0; 150 struct route *ro = 0; 151 152 if (tp) { 153 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 154 ro = &tp->t_inpcb->inp_route; 155 } 156 if (m == 0) { 157 m = m_gethdr(M_DONTWAIT, MT_HEADER); 158 if (m == NULL) 159 return (ENOBUFS); 160 #ifdef TCP_COMPAT_42 161 tlen = 1; 162 #else 163 tlen = 0; 164 #endif 165 m->m_data += max_linkhdr; 166 *mtod(m, struct tcpiphdr *) = *ti; 167 ti = mtod(m, struct tcpiphdr *); 168 flags = TH_ACK; 169 } else { 170 m_freem(m->m_next); 171 m->m_next = 0; 172 m->m_data = (caddr_t)ti; 173 m->m_len = sizeof (struct tcpiphdr); 174 tlen = 0; 175 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 176 xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_int32_t); 177 xchg(ti->ti_dport, ti->ti_sport, u_int16_t); 178 #undef xchg 179 } 180 bzero(ti->ti_x1, sizeof ti->ti_x1); 181 ti->ti_seq = htonl(seq); 182 ti->ti_ack = htonl(ack); 183 ti->ti_x2 = 0; 184 if ((flags & TH_SYN) == 0) { 185 if (tp) 186 ti->ti_win = htons((u_int16_t) (win >> tp->rcv_scale)); 187 else 188 ti->ti_win = htons((u_int16_t)win); 189 ti->ti_off = sizeof (struct tcphdr) >> 2; 190 tlen += sizeof (struct tcphdr); 191 } else 192 tlen += ti->ti_off << 2; 193 ti->ti_len = htons((u_int16_t)tlen); 194 tlen += sizeof (struct ip); 195 m->m_len = tlen; 196 m->m_pkthdr.len = tlen; 197 m->m_pkthdr.rcvif = (struct ifnet *) 0; 198 ti->ti_flags = flags; 199 ti->ti_urp = 0; 200 ti->ti_sum = 0; 201 ti->ti_sum = in_cksum(m, tlen); 202 ((struct ip *)ti)->ip_len = tlen; 203 ((struct ip *)ti)->ip_ttl = ip_defttl; 204 return ip_output(m, NULL, ro, 0, NULL); 205 } 206 207 /* 208 * Create a new TCP control block, making an 209 * empty reassembly queue and hooking it to the argument 210 * protocol control block. 211 */ 212 struct tcpcb * 213 tcp_newtcpcb(inp) 214 struct inpcb *inp; 215 { 216 register struct tcpcb *tp; 217 218 tp = malloc(sizeof(*tp), M_PCB, M_NOWAIT); 219 if (tp == NULL) 220 return ((struct tcpcb *)0); 221 bzero((caddr_t)tp, sizeof(struct tcpcb)); 222 LIST_INIT(&tp->segq); 223 tp->t_maxseg = tcp_mssdflt; 224 tp->t_ourmss = tcp_mssdflt; 225 226 tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0; 227 tp->t_inpcb = inp; 228 /* 229 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 230 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives 231 * reasonable initial retransmit time. 232 */ 233 tp->t_srtt = TCPTV_SRTTBASE; 234 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1); 235 tp->t_rttmin = TCPTV_MIN; 236 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 237 TCPTV_MIN, TCPTV_REXMTMAX); 238 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 239 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 240 inp->inp_ip.ip_ttl = ip_defttl; 241 inp->inp_ppcb = (caddr_t)tp; 242 return (tp); 243 } 244 245 /* 246 * Drop a TCP connection, reporting 247 * the specified error. If connection is synchronized, 248 * then send a RST to peer. 249 */ 250 struct tcpcb * 251 tcp_drop(tp, errno) 252 register struct tcpcb *tp; 253 int errno; 254 { 255 struct socket *so = tp->t_inpcb->inp_socket; 256 257 if (TCPS_HAVERCVDSYN(tp->t_state)) { 258 tp->t_state = TCPS_CLOSED; 259 (void) tcp_output(tp); 260 tcpstat.tcps_drops++; 261 } else 262 tcpstat.tcps_conndrops++; 263 if (errno == ETIMEDOUT && tp->t_softerror) 264 errno = tp->t_softerror; 265 so->so_error = errno; 266 return (tcp_close(tp)); 267 } 268 269 /* 270 * Close a TCP control block: 271 * discard all space held by the tcp 272 * discard internet protocol block 273 * wake up any sleepers 274 */ 275 struct tcpcb * 276 tcp_close(tp) 277 register struct tcpcb *tp; 278 { 279 register struct ipqent *qe; 280 struct inpcb *inp = tp->t_inpcb; 281 struct socket *so = inp->inp_socket; 282 #ifdef RTV_RTT 283 register struct rtentry *rt; 284 285 /* 286 * If we sent enough data to get some meaningful characteristics, 287 * save them in the routing entry. 'Enough' is arbitrarily 288 * defined as the sendpipesize (default 4K) * 16. This would 289 * give us 16 rtt samples assuming we only get one sample per 290 * window (the usual case on a long haul net). 16 samples is 291 * enough for the srtt filter to converge to within 5% of the correct 292 * value; fewer samples and we could save a very bogus rtt. 293 * 294 * Don't update the default route's characteristics and don't 295 * update anything that the user "locked". 296 */ 297 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) && 298 (rt = inp->inp_route.ro_rt) && 299 !in_nullhost(satosin(rt_key(rt))->sin_addr)) { 300 register u_long i = 0; 301 302 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 303 i = tp->t_srtt * 304 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2)); 305 if (rt->rt_rmx.rmx_rtt && i) 306 /* 307 * filter this update to half the old & half 308 * the new values, converting scale. 309 * See route.h and tcp_var.h for a 310 * description of the scaling constants. 311 */ 312 rt->rt_rmx.rmx_rtt = 313 (rt->rt_rmx.rmx_rtt + i) / 2; 314 else 315 rt->rt_rmx.rmx_rtt = i; 316 } 317 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 318 i = tp->t_rttvar * 319 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2)); 320 if (rt->rt_rmx.rmx_rttvar && i) 321 rt->rt_rmx.rmx_rttvar = 322 (rt->rt_rmx.rmx_rttvar + i) / 2; 323 else 324 rt->rt_rmx.rmx_rttvar = i; 325 } 326 /* 327 * update the pipelimit (ssthresh) if it has been updated 328 * already or if a pipesize was specified & the threshhold 329 * got below half the pipesize. I.e., wait for bad news 330 * before we start updating, then update on both good 331 * and bad news. 332 */ 333 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 334 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) || 335 i < (rt->rt_rmx.rmx_sendpipe / 2)) { 336 /* 337 * convert the limit from user data bytes to 338 * packets then to packet data bytes. 339 */ 340 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 341 if (i < 2) 342 i = 2; 343 i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr)); 344 if (rt->rt_rmx.rmx_ssthresh) 345 rt->rt_rmx.rmx_ssthresh = 346 (rt->rt_rmx.rmx_ssthresh + i) / 2; 347 else 348 rt->rt_rmx.rmx_ssthresh = i; 349 } 350 } 351 #endif /* RTV_RTT */ 352 /* free the reassembly queue, if any */ 353 while ((qe = tp->segq.lh_first) != NULL) { 354 LIST_REMOVE(qe, ipqe_q); 355 m_freem(qe->ipqe_m); 356 FREE(qe, M_IPQ); 357 } 358 if (tp->t_template) 359 FREE(tp->t_template, M_MBUF); 360 free(tp, M_PCB); 361 inp->inp_ppcb = 0; 362 soisdisconnected(so); 363 in_pcbdetach(inp); 364 tcpstat.tcps_closed++; 365 return ((struct tcpcb *)0); 366 } 367 368 void 369 tcp_drain() 370 { 371 372 } 373 374 /* 375 * Notify a tcp user of an asynchronous error; 376 * store error as soft error, but wake up user 377 * (for now, won't do anything until can select for soft error). 378 */ 379 void 380 tcp_notify(inp, error) 381 struct inpcb *inp; 382 int error; 383 { 384 register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 385 register struct socket *so = inp->inp_socket; 386 387 /* 388 * Ignore some errors if we are hooked up. 389 * If connection hasn't completed, has retransmitted several times, 390 * and receives a second error, give up now. This is better 391 * than waiting a long time to establish a connection that 392 * can never complete. 393 */ 394 if (tp->t_state == TCPS_ESTABLISHED && 395 (error == EHOSTUNREACH || error == ENETUNREACH || 396 error == EHOSTDOWN)) { 397 return; 398 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 && 399 tp->t_rxtshift > 3 && tp->t_softerror) 400 so->so_error = error; 401 else 402 tp->t_softerror = error; 403 wakeup((caddr_t) &so->so_timeo); 404 sorwakeup(so); 405 sowwakeup(so); 406 } 407 408 void * 409 tcp_ctlinput(cmd, sa, v) 410 int cmd; 411 struct sockaddr *sa; 412 register void *v; 413 { 414 register struct ip *ip = v; 415 register struct tcphdr *th; 416 extern int inetctlerrmap[]; 417 void (*notify) __P((struct inpcb *, int)) = tcp_notify; 418 int errno; 419 int nmatch; 420 421 if ((unsigned)cmd >= PRC_NCMDS) 422 return NULL; 423 errno = inetctlerrmap[cmd]; 424 if (cmd == PRC_QUENCH) 425 notify = tcp_quench; 426 else if (PRC_IS_REDIRECT(cmd)) 427 notify = in_rtchange, ip = 0; 428 else if (cmd == PRC_HOSTDEAD) 429 ip = 0; 430 else if (errno == 0) 431 return NULL; 432 if (ip) { 433 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 434 nmatch = in_pcbnotify(&tcbtable, satosin(sa)->sin_addr, 435 th->th_dport, ip->ip_src, th->th_sport, errno, notify); 436 if (nmatch == 0 && syn_cache_count && 437 (inetctlerrmap[cmd] == EHOSTUNREACH || 438 inetctlerrmap[cmd] == ENETUNREACH || 439 inetctlerrmap[cmd] == EHOSTDOWN)) 440 syn_cache_unreach(ip, th); 441 } else 442 (void)in_pcbnotifyall(&tcbtable, satosin(sa)->sin_addr, errno, 443 notify); 444 return NULL; 445 } 446 447 /* 448 * When a source quench is received, close congestion window 449 * to one segment. We will gradually open it again as we proceed. 450 */ 451 void 452 tcp_quench(inp, errno) 453 struct inpcb *inp; 454 int errno; 455 { 456 struct tcpcb *tp = intotcpcb(inp); 457 458 if (tp) 459 tp->snd_cwnd = tp->t_maxseg; 460 } 461 462 /* 463 * Compute the MSS to advertise to the peer. Called only during 464 * the 3-way handshake. If we are the server (peer initiated 465 * connection), we are called with the TCPCB for the listen 466 * socket. If we are the client (we initiated connection), we 467 * are called witht he TCPCB for the actual connection. 468 */ 469 int 470 tcp_mss_to_advertise(tp) 471 const struct tcpcb *tp; 472 { 473 extern u_long in_maxmtu; 474 struct inpcb *inp; 475 struct socket *so; 476 int mss; 477 478 inp = tp->t_inpcb; 479 so = inp->inp_socket; 480 481 /* 482 * In order to avoid defeating path MTU discovery on the peer, 483 * we advertise the max MTU of all attached networks as our MSS, 484 * per RFC 1191, section 3.1. 485 * 486 * XXX Should we allow room for the timestamp option if 487 * XXX rfc1323 is enabled? 488 */ 489 mss = in_maxmtu - sizeof(struct tcpiphdr); 490 491 return (mss); 492 } 493 494 /* 495 * Set connection variables based on the peer's advertised MSS. 496 * We are passed the TCPCB for the actual connection. If we 497 * are the server, we are called by the compressed state engine 498 * when the 3-way handshake is complete. If we are the client, 499 * we are called when we recieve the SYN,ACK from the server. 500 * 501 * NOTE: Our advertised MSS value must be initialized in the TCPCB 502 * before this routine is called! 503 */ 504 void 505 tcp_mss_from_peer(tp, offer) 506 struct tcpcb *tp; 507 int offer; 508 { 509 struct inpcb *inp = tp->t_inpcb; 510 struct socket *so = inp->inp_socket; 511 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH) 512 struct rtentry *rt = in_pcbrtentry(inp); 513 #endif 514 u_long bufsize; 515 int mss; 516 517 /* 518 * Assume our MSS is the MSS of the peer, unless they sent us 519 * an offer. Do not accept offers less than 32 bytes. 520 */ 521 mss = tp->t_ourmss; 522 if (offer) 523 mss = offer; 524 mss = max(mss, 32); /* sanity */ 525 526 /* 527 * If there's a pipesize, change the socket buffer to that size. 528 * Make the socket buffer an integral number of MSS units. If 529 * the MSS is larger than the socket buffer, artificially decrease 530 * the MSS. 531 */ 532 #ifdef RTV_SPIPE 533 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0) 534 bufsize = rt->rt_rmx.rmx_sendpipe; 535 else 536 #endif 537 bufsize = so->so_snd.sb_hiwat; 538 if (bufsize < mss) 539 mss = bufsize; 540 else { 541 bufsize = roundup(bufsize, mss); 542 if (bufsize > sb_max) 543 bufsize = sb_max; 544 (void) sbreserve(&so->so_snd, bufsize); 545 } 546 tp->t_maxseg = mss; 547 548 /* Initialize the initial congestion window. */ 549 tp->snd_cwnd = mss; 550 551 #ifdef RTV_SSTHRESH 552 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) { 553 /* 554 * There's some sort of gateway or interface buffer 555 * limit on the path. Use this to set the slow 556 * start threshold, but set the threshold to no less 557 * than 2 * MSS. 558 */ 559 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 560 } 561 #endif 562 } 563 564 /* 565 * Processing necessary when a TCP connection is established. 566 */ 567 void 568 tcp_established(tp) 569 struct tcpcb *tp; 570 { 571 struct inpcb *inp = tp->t_inpcb; 572 struct socket *so = inp->inp_socket; 573 #ifdef RTV_RPIPE 574 struct rtentry *rt = in_pcbrtentry(inp); 575 #endif 576 u_long bufsize; 577 578 tp->t_state = TCPS_ESTABLISHED; 579 tp->t_timer[TCPT_KEEP] = tcp_keepidle; 580 581 #ifdef RTV_RPIPE 582 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0) 583 bufsize = rt->rt_rmx.rmx_recvpipe; 584 else 585 #endif 586 bufsize = so->so_rcv.sb_hiwat; 587 if (bufsize > tp->t_ourmss) { 588 bufsize = roundup(bufsize, tp->t_ourmss); 589 if (bufsize > sb_max) 590 bufsize = sb_max; 591 (void) sbreserve(&so->so_rcv, bufsize); 592 } 593 } 594 595 /* 596 * Check if there's an initial rtt or rttvar. Convert from the 597 * route-table units to scaled multiples of the slow timeout timer. 598 * Called only during the 3-way handshake. 599 */ 600 void 601 tcp_rmx_rtt(tp) 602 struct tcpcb *tp; 603 { 604 #ifdef RTV_RTT 605 struct rtentry *rt; 606 int rtt; 607 608 if ((rt = in_pcbrtentry(tp->t_inpcb)) == NULL) 609 return; 610 611 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 612 /* 613 * XXX The lock bit for MTU indicates that the value 614 * is also a minimum value; this is subject to time. 615 */ 616 if (rt->rt_rmx.rmx_locks & RTV_RTT) 617 tp->t_rttmin = rtt / (RTM_RTTUNIT / PR_SLOWHZ); 618 tp->t_srtt = rtt / 619 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2)); 620 if (rt->rt_rmx.rmx_rttvar) { 621 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 622 ((RTM_RTTUNIT / PR_SLOWHZ) >> 623 (TCP_RTTVAR_SHIFT + 2)); 624 } else { 625 /* Default variation is +- 1 rtt */ 626 tp->t_rttvar = 627 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT); 628 } 629 TCPT_RANGESET(tp->t_rxtcur, 630 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2), 631 tp->t_rttmin, TCPTV_REXMTMAX); 632 } 633 #endif 634 } 635