1 /* 2 * Copyright (c) 2003-2004 Jeffrey M. Hsu. All rights reserved. 3 * 4 * All advertising materials mentioning features or use of this software 5 * must display the following acknowledgement: 6 * This product includes software developed by Jeffrey M. Hsu. 7 * 8 * Copyright (c) 2001 Networks Associates Technologies, Inc. 9 * All rights reserved. 10 * 11 * This software was developed for the FreeBSD Project by Jonathan Lemon 12 * and NAI Labs, the Security Research Division of Network Associates, Inc. 13 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 14 * DARPA CHATS research program. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. The name of the author may not be used to endorse or promote 25 * products derived from this software without specific prior written 26 * permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $ 41 * $DragonFly: src/sys/netinet/tcp_syncache.c,v 1.14 2004/07/02 04:41:01 hsu Exp $ 42 */ 43 44 #include "opt_inet6.h" 45 #include "opt_ipsec.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/sysctl.h> 51 #include <sys/malloc.h> 52 #include <sys/mbuf.h> 53 #include <sys/md5.h> 54 #include <sys/proc.h> /* for proc0 declaration */ 55 #include <sys/random.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/in_cksum.h> 59 60 #include <net/if.h> 61 #include <net/route.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/ip.h> 66 #include <netinet/in_var.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet/ip_var.h> 69 #ifdef INET6 70 #include <netinet/ip6.h> 71 #include <netinet/icmp6.h> 72 #include <netinet6/nd6.h> 73 #include <netinet6/ip6_var.h> 74 #include <netinet6/in6_pcb.h> 75 #endif 76 #include <netinet/tcp.h> 77 #include <netinet/tcp_fsm.h> 78 #include <netinet/tcp_seq.h> 79 #include <netinet/tcp_timer.h> 80 #include <netinet/tcp_var.h> 81 #ifdef INET6 82 #include <netinet6/tcp6_var.h> 83 #endif 84 85 #ifdef IPSEC 86 #include <netinet6/ipsec.h> 87 #ifdef INET6 88 #include <netinet6/ipsec6.h> 89 #endif 90 #include <netproto/key/key.h> 91 #endif /*IPSEC*/ 92 93 #ifdef FAST_IPSEC 94 #include <netipsec/ipsec.h> 95 #ifdef INET6 96 #include <netipsec/ipsec6.h> 97 #endif 98 #include <netipsec/key.h> 99 #define IPSEC 100 #endif /*FAST_IPSEC*/ 101 102 #include <vm/vm_zone.h> 103 104 static int tcp_syncookies = 1; 105 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 106 &tcp_syncookies, 0, 107 "Use TCP SYN cookies if the syncache overflows"); 108 109 static void syncache_drop(struct syncache *, struct syncache_head *); 110 static void syncache_free(struct syncache *); 111 static void syncache_insert(struct syncache *, struct syncache_head *); 112 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); 113 static int syncache_respond(struct syncache *, struct mbuf *); 114 static struct socket *syncache_socket(struct syncache *, struct socket *); 115 static void syncache_timer(void *); 116 static u_int32_t syncookie_generate(struct syncache *); 117 static struct syncache *syncookie_lookup(struct in_conninfo *, 118 struct tcphdr *, struct socket *); 119 120 /* 121 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 122 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds, 123 * the odds are that the user has given up attempting to connect by then. 124 */ 125 #define SYNCACHE_MAXREXMTS 3 126 127 /* Arbitrary values */ 128 #define TCP_SYNCACHE_HASHSIZE 512 129 #define TCP_SYNCACHE_BUCKETLIMIT 30 130 131 struct tcp_syncache { 132 struct syncache_head *hashbase; 133 struct vm_zone *zone; 134 u_int hashsize; 135 u_int hashmask; 136 u_int bucket_limit; 137 u_int cache_count; 138 u_int cache_limit; 139 u_int rexmt_limit; 140 u_int hash_secret; 141 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1]; 142 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; 143 }; 144 static struct tcp_syncache tcp_syncache; 145 146 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 147 148 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD, 149 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 150 151 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD, 152 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 153 154 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, 155 &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); 156 157 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD, 158 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 159 160 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 161 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 162 163 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 164 165 #define SYNCACHE_HASH(inc, mask) \ 166 ((tcp_syncache.hash_secret ^ \ 167 (inc)->inc_faddr.s_addr ^ \ 168 ((inc)->inc_faddr.s_addr >> 16) ^ \ 169 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 170 171 #define SYNCACHE_HASH6(inc, mask) \ 172 ((tcp_syncache.hash_secret ^ \ 173 (inc)->inc6_faddr.s6_addr32[0] ^ \ 174 (inc)->inc6_faddr.s6_addr32[3] ^ \ 175 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 176 177 #define ENDPTS_EQ(a, b) ( \ 178 (a)->ie_fport == (b)->ie_fport && \ 179 (a)->ie_lport == (b)->ie_lport && \ 180 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 181 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 182 ) 183 184 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 185 186 #define SYNCACHE_TIMEOUT(sc, slot) do { \ 187 sc->sc_rxtslot = slot; \ 188 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[slot]; \ 189 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[slot], sc, sc_timerq); \ 190 if (!callout_active(&tcp_syncache.tt_timerq[slot])) \ 191 callout_reset(&tcp_syncache.tt_timerq[slot], \ 192 TCPTV_RTOBASE * tcp_backoff[slot], \ 193 syncache_timer, (void *)((intptr_t)slot)); \ 194 } while (0) 195 196 static void 197 syncache_free(struct syncache *sc) 198 { 199 struct rtentry *rt; 200 201 if (sc->sc_ipopts) 202 (void) m_free(sc->sc_ipopts); 203 #ifdef INET6 204 if (sc->sc_inc.inc_isipv6) 205 rt = sc->sc_route6.ro_rt; 206 else 207 #endif 208 rt = sc->sc_route.ro_rt; 209 if (rt != NULL) { 210 /* 211 * If this is the only reference to a protocol cloned 212 * route, remove it immediately. 213 */ 214 if (rt->rt_flags & RTF_WASCLONED && 215 (sc->sc_flags & SCF_KEEPROUTE) == 0 && 216 rt->rt_refcnt == 1) 217 rtrequest(RTM_DELETE, rt_key(rt), 218 rt->rt_gateway, rt_mask(rt), 219 rt->rt_flags, NULL); 220 RTFREE(rt); 221 } 222 zfree(tcp_syncache.zone, sc); 223 } 224 225 void 226 syncache_init(void) 227 { 228 int i; 229 230 tcp_syncache.cache_count = 0; 231 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 232 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 233 tcp_syncache.cache_limit = 234 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 235 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 236 tcp_syncache.hash_secret = arc4random(); 237 238 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 239 &tcp_syncache.hashsize); 240 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 241 &tcp_syncache.cache_limit); 242 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 243 &tcp_syncache.bucket_limit); 244 if (!powerof2(tcp_syncache.hashsize)) { 245 printf("WARNING: syncache hash size is not a power of 2.\n"); 246 tcp_syncache.hashsize = 512; /* safe default */ 247 } 248 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 249 250 /* Allocate the hash table. */ 251 MALLOC(tcp_syncache.hashbase, struct syncache_head *, 252 tcp_syncache.hashsize * sizeof(struct syncache_head), 253 M_SYNCACHE, M_WAITOK); 254 255 /* Initialize the hash buckets. */ 256 for (i = 0; i < tcp_syncache.hashsize; i++) { 257 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket); 258 tcp_syncache.hashbase[i].sch_length = 0; 259 } 260 261 /* Initialize the timer queues. */ 262 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { 263 TAILQ_INIT(&tcp_syncache.timerq[i]); 264 callout_init(&tcp_syncache.tt_timerq[i]); 265 } 266 267 /* 268 * Allocate the syncache entries. Allow the zone to allocate one 269 * more entry than cache limit, so a new entry can bump out an 270 * older one. 271 */ 272 tcp_syncache.zone = zinit("syncache", sizeof(struct syncache), 273 tcp_syncache.cache_limit, ZONE_INTERRUPT, 0); 274 tcp_syncache.cache_limit -= 1; 275 } 276 277 static void 278 syncache_insert(sc, sch) 279 struct syncache *sc; 280 struct syncache_head *sch; 281 { 282 struct syncache *sc2; 283 int i; 284 285 /* 286 * Make sure that we don't overflow the per-bucket 287 * limit or the total cache size limit. 288 */ 289 if (sch->sch_length >= tcp_syncache.bucket_limit) { 290 /* 291 * The bucket is full, toss the oldest element. 292 */ 293 sc2 = TAILQ_FIRST(&sch->sch_bucket); 294 sc2->sc_tp->ts_recent = ticks; 295 syncache_drop(sc2, sch); 296 tcpstat.tcps_sc_bucketoverflow++; 297 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) { 298 /* 299 * The cache is full. Toss the oldest entry in the 300 * entire cache. This is the front entry in the 301 * first non-empty timer queue with the largest 302 * timeout value. 303 */ 304 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 305 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]); 306 if (sc2 != NULL) 307 break; 308 } 309 sc2->sc_tp->ts_recent = ticks; 310 syncache_drop(sc2, NULL); 311 tcpstat.tcps_sc_cacheoverflow++; 312 } 313 314 /* Initialize the entry's timer. */ 315 SYNCACHE_TIMEOUT(sc, 0); 316 317 /* Put it into the bucket. */ 318 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); 319 sch->sch_length++; 320 tcp_syncache.cache_count++; 321 tcpstat.tcps_sc_added++; 322 } 323 324 static void 325 syncache_drop(sc, sch) 326 struct syncache *sc; 327 struct syncache_head *sch; 328 { 329 330 if (sch == NULL) { 331 #ifdef INET6 332 if (sc->sc_inc.inc_isipv6) { 333 sch = &tcp_syncache.hashbase[ 334 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; 335 } else 336 #endif 337 { 338 sch = &tcp_syncache.hashbase[ 339 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; 340 } 341 } 342 343 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 344 sch->sch_length--; 345 tcp_syncache.cache_count--; 346 347 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); 348 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot])) 349 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]); 350 351 syncache_free(sc); 352 } 353 354 /* 355 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 356 * If we have retransmitted an entry the maximum number of times, expire it. 357 */ 358 static void 359 syncache_timer(xslot) 360 void *xslot; 361 { 362 intptr_t slot = (intptr_t)xslot; 363 struct syncache *sc, *nsc; 364 struct inpcb *inp; 365 int s; 366 367 s = splnet(); 368 if (callout_pending(&tcp_syncache.tt_timerq[slot]) || 369 !callout_active(&tcp_syncache.tt_timerq[slot])) { 370 splx(s); 371 return; 372 } 373 callout_deactivate(&tcp_syncache.tt_timerq[slot]); 374 375 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]); 376 while (nsc != NULL) { 377 if (ticks < nsc->sc_rxttime) 378 break; 379 sc = nsc; 380 inp = sc->sc_tp->t_inpcb; 381 if (slot == SYNCACHE_MAXREXMTS || 382 slot >= tcp_syncache.rexmt_limit || 383 inp->inp_gencnt != sc->sc_inp_gencnt) { 384 nsc = TAILQ_NEXT(sc, sc_timerq); 385 syncache_drop(sc, NULL); 386 tcpstat.tcps_sc_stale++; 387 continue; 388 } 389 /* 390 * syncache_respond() may call back into the syncache to 391 * to modify another entry, so do not obtain the next 392 * entry on the timer chain until it has completed. 393 */ 394 (void) syncache_respond(sc, NULL); 395 nsc = TAILQ_NEXT(sc, sc_timerq); 396 tcpstat.tcps_sc_retransmitted++; 397 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq); 398 SYNCACHE_TIMEOUT(sc, slot + 1); 399 } 400 if (nsc != NULL) 401 callout_reset(&tcp_syncache.tt_timerq[slot], 402 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot)); 403 splx(s); 404 } 405 406 /* 407 * Find an entry in the syncache. 408 */ 409 struct syncache * 410 syncache_lookup(inc, schp) 411 struct in_conninfo *inc; 412 struct syncache_head **schp; 413 { 414 struct syncache *sc; 415 struct syncache_head *sch; 416 417 #ifdef INET6 418 if (inc->inc_isipv6) { 419 sch = &tcp_syncache.hashbase[ 420 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 421 *schp = sch; 422 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 423 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 424 return (sc); 425 } else 426 #endif 427 { 428 sch = &tcp_syncache.hashbase[ 429 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 430 *schp = sch; 431 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 432 #ifdef INET6 433 if (sc->sc_inc.inc_isipv6) 434 continue; 435 #endif 436 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 437 return (sc); 438 } 439 } 440 return (NULL); 441 } 442 443 /* 444 * This function is called when we get a RST for a 445 * non-existent connection, so that we can see if the 446 * connection is in the syn cache. If it is, zap it. 447 */ 448 void 449 syncache_chkrst(inc, th) 450 struct in_conninfo *inc; 451 struct tcphdr *th; 452 { 453 struct syncache *sc; 454 struct syncache_head *sch; 455 456 sc = syncache_lookup(inc, &sch); 457 if (sc == NULL) 458 return; 459 /* 460 * If the RST bit is set, check the sequence number to see 461 * if this is a valid reset segment. 462 * RFC 793 page 37: 463 * In all states except SYN-SENT, all reset (RST) segments 464 * are validated by checking their SEQ-fields. A reset is 465 * valid if its sequence number is in the window. 466 * 467 * The sequence number in the reset segment is normally an 468 * echo of our outgoing acknowlegement numbers, but some hosts 469 * send a reset with the sequence number at the rightmost edge 470 * of our receive window, and we have to handle this case. 471 */ 472 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 473 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 474 syncache_drop(sc, sch); 475 tcpstat.tcps_sc_reset++; 476 } 477 } 478 479 void 480 syncache_badack(inc) 481 struct in_conninfo *inc; 482 { 483 struct syncache *sc; 484 struct syncache_head *sch; 485 486 sc = syncache_lookup(inc, &sch); 487 if (sc != NULL) { 488 syncache_drop(sc, sch); 489 tcpstat.tcps_sc_badack++; 490 } 491 } 492 493 void 494 syncache_unreach(inc, th) 495 struct in_conninfo *inc; 496 struct tcphdr *th; 497 { 498 struct syncache *sc; 499 struct syncache_head *sch; 500 501 /* we are called at splnet() here */ 502 sc = syncache_lookup(inc, &sch); 503 if (sc == NULL) 504 return; 505 506 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 507 if (ntohl(th->th_seq) != sc->sc_iss) 508 return; 509 510 /* 511 * If we've rertransmitted 3 times and this is our second error, 512 * we remove the entry. Otherwise, we allow it to continue on. 513 * This prevents us from incorrectly nuking an entry during a 514 * spurious network outage. 515 * 516 * See tcp_notify(). 517 */ 518 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { 519 sc->sc_flags |= SCF_UNREACH; 520 return; 521 } 522 syncache_drop(sc, sch); 523 tcpstat.tcps_sc_unreach++; 524 } 525 526 /* 527 * Build a new TCP socket structure from a syncache entry. 528 */ 529 static struct socket * 530 syncache_socket(sc, lso) 531 struct syncache *sc; 532 struct socket *lso; 533 { 534 struct inpcb *inp = NULL; 535 struct socket *so; 536 struct tcpcb *tp; 537 538 /* 539 * Ok, create the full blown connection, and set things up 540 * as they would have been set up if we had created the 541 * connection when the SYN arrived. If we can't create 542 * the connection, abort it. 543 */ 544 so = sonewconn(lso, SS_ISCONNECTED); 545 if (so == NULL) { 546 /* 547 * Drop the connection; we will send a RST if the peer 548 * retransmits the ACK, 549 */ 550 tcpstat.tcps_listendrop++; 551 goto abort; 552 } 553 554 inp = sotoinpcb(so); 555 556 /* 557 * Insert new socket into hash list. 558 */ 559 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 560 #ifdef INET6 561 if (sc->sc_inc.inc_isipv6) { 562 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 563 } else { 564 inp->inp_vflag &= ~INP_IPV6; 565 inp->inp_vflag |= INP_IPV4; 566 #endif 567 inp->inp_laddr = sc->sc_inc.inc_laddr; 568 #ifdef INET6 569 } 570 #endif 571 inp->inp_lport = sc->sc_inc.inc_lport; 572 if (in_pcbinsporthash(inp) != 0) { 573 /* 574 * Undo the assignments above if we failed to 575 * put the PCB on the hash lists. 576 */ 577 #ifdef INET6 578 if (sc->sc_inc.inc_isipv6) 579 inp->in6p_laddr = in6addr_any; 580 else 581 #endif 582 inp->inp_laddr.s_addr = INADDR_ANY; 583 inp->inp_lport = 0; 584 goto abort; 585 } 586 #ifdef IPSEC 587 /* copy old policy into new socket's */ 588 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) 589 printf("syncache_expand: could not copy policy\n"); 590 #endif 591 #ifdef INET6 592 if (sc->sc_inc.inc_isipv6) { 593 struct inpcb *oinp = sotoinpcb(lso); 594 struct in6_addr laddr6; 595 struct sockaddr_in6 sin6; 596 /* 597 * Inherit socket options from the listening socket. 598 * Note that in6p_inputopts are not (and should not be) 599 * copied, since it stores previously received options and is 600 * used to detect if each new option is different than the 601 * previous one and hence should be passed to a user. 602 * If we copied in6p_inputopts, a user would not be able to 603 * receive options just after calling the accept system call. 604 */ 605 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; 606 if (oinp->in6p_outputopts) 607 inp->in6p_outputopts = 608 ip6_copypktopts(oinp->in6p_outputopts, M_INTWAIT); 609 inp->in6p_route = sc->sc_route6; 610 sc->sc_route6.ro_rt = NULL; 611 612 sin6.sin6_family = AF_INET6; 613 sin6.sin6_len = sizeof sin6; 614 sin6.sin6_addr = sc->sc_inc.inc6_faddr; 615 sin6.sin6_port = sc->sc_inc.inc_fport; 616 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; 617 laddr6 = inp->in6p_laddr; 618 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 619 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 620 if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, &thread0)) { 621 inp->in6p_laddr = laddr6; 622 goto abort; 623 } 624 } else 625 #endif 626 { 627 struct in_addr laddr; 628 struct sockaddr_in sin; 629 630 inp->inp_options = ip_srcroute(); 631 if (inp->inp_options == NULL) { 632 inp->inp_options = sc->sc_ipopts; 633 sc->sc_ipopts = NULL; 634 } 635 inp->inp_route = sc->sc_route; 636 sc->sc_route.ro_rt = NULL; 637 638 sin.sin_family = AF_INET; 639 sin.sin_len = sizeof sin; 640 sin.sin_addr = sc->sc_inc.inc_faddr; 641 sin.sin_port = sc->sc_inc.inc_fport; 642 bzero(sin.sin_zero, sizeof sin.sin_zero); 643 laddr = inp->inp_laddr; 644 if (inp->inp_laddr.s_addr == INADDR_ANY) 645 inp->inp_laddr = sc->sc_inc.inc_laddr; 646 if (in_pcbconnect(inp, (struct sockaddr *)&sin, &thread0)) { 647 inp->inp_laddr = laddr; 648 goto abort; 649 } 650 } 651 652 tp = intotcpcb(inp); 653 tp->t_state = TCPS_SYN_RECEIVED; 654 tp->iss = sc->sc_iss; 655 tp->irs = sc->sc_irs; 656 tcp_rcvseqinit(tp); 657 tcp_sendseqinit(tp); 658 tp->snd_wl1 = sc->sc_irs; 659 tp->rcv_up = sc->sc_irs + 1; 660 tp->rcv_wnd = sc->sc_wnd; 661 tp->rcv_adv += tp->rcv_wnd; 662 663 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); 664 if (sc->sc_flags & SCF_NOOPT) 665 tp->t_flags |= TF_NOOPT; 666 if (sc->sc_flags & SCF_WINSCALE) { 667 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; 668 tp->requested_s_scale = sc->sc_requested_s_scale; 669 tp->request_r_scale = sc->sc_request_r_scale; 670 } 671 if (sc->sc_flags & SCF_TIMESTAMP) { 672 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; 673 tp->ts_recent = sc->sc_tsrecent; 674 tp->ts_recent_age = ticks; 675 } 676 if (sc->sc_flags & SCF_CC) { 677 /* 678 * Initialization of the tcpcb for transaction; 679 * set SND.WND = SEG.WND, 680 * initialize CCsend and CCrecv. 681 */ 682 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC; 683 tp->cc_send = sc->sc_cc_send; 684 tp->cc_recv = sc->sc_cc_recv; 685 } 686 687 tcp_mss(tp, sc->sc_peer_mss); 688 689 /* 690 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment. 691 */ 692 if (sc->sc_rxtslot != 0) 693 tp->snd_cwnd = tp->t_maxseg; 694 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); 695 696 tcpstat.tcps_accepts++; 697 return (so); 698 699 abort: 700 if (so != NULL) 701 (void) soabort(so); 702 return (NULL); 703 } 704 705 /* 706 * This function gets called when we receive an ACK for a 707 * socket in the LISTEN state. We look up the connection 708 * in the syncache, and if its there, we pull it out of 709 * the cache and turn it into a full-blown connection in 710 * the SYN-RECEIVED state. 711 */ 712 int 713 syncache_expand(inc, th, sop, m) 714 struct in_conninfo *inc; 715 struct tcphdr *th; 716 struct socket **sop; 717 struct mbuf *m; 718 { 719 struct syncache *sc; 720 struct syncache_head *sch; 721 struct socket *so; 722 723 sc = syncache_lookup(inc, &sch); 724 if (sc == NULL) { 725 /* 726 * There is no syncache entry, so see if this ACK is 727 * a returning syncookie. To do this, first: 728 * A. See if this socket has had a syncache entry dropped in 729 * the past. We don't want to accept a bogus syncookie 730 * if we've never received a SYN. 731 * B. check that the syncookie is valid. If it is, then 732 * cobble up a fake syncache entry, and return. 733 */ 734 if (!tcp_syncookies) 735 return (0); 736 sc = syncookie_lookup(inc, th, *sop); 737 if (sc == NULL) 738 return (0); 739 sch = NULL; 740 tcpstat.tcps_sc_recvcookie++; 741 } 742 743 /* 744 * If seg contains an ACK, but not for our SYN/ACK, send a RST. 745 */ 746 if (th->th_ack != sc->sc_iss + 1) 747 return (0); 748 749 so = syncache_socket(sc, *sop); 750 if (so == NULL) { 751 #if 0 752 resetandabort: 753 /* XXXjlemon check this - is this correct? */ 754 (void) tcp_respond(NULL, m, m, th, 755 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK); 756 #endif 757 m_freem(m); /* XXX only needed for above */ 758 tcpstat.tcps_sc_aborted++; 759 } else { 760 sc->sc_flags |= SCF_KEEPROUTE; 761 tcpstat.tcps_sc_completed++; 762 } 763 if (sch == NULL) 764 syncache_free(sc); 765 else 766 syncache_drop(sc, sch); 767 *sop = so; 768 return (1); 769 } 770 771 /* 772 * Given a LISTEN socket and an inbound SYN request, add 773 * this to the syn cache, and send back a segment: 774 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 775 * to the source. 776 * 777 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 778 * Doing so would require that we hold onto the data and deliver it 779 * to the application. However, if we are the target of a SYN-flood 780 * DoS attack, an attacker could send data which would eventually 781 * consume all available buffer space if it were ACKed. By not ACKing 782 * the data, we avoid this DoS scenario. 783 */ 784 int 785 syncache_add(inc, to, th, sop, m) 786 struct in_conninfo *inc; 787 struct tcpopt *to; 788 struct tcphdr *th; 789 struct socket **sop; 790 struct mbuf *m; 791 { 792 struct tcpcb *tp; 793 struct socket *so; 794 struct syncache *sc = NULL; 795 struct syncache_head *sch; 796 struct mbuf *ipopts = NULL; 797 struct rmxp_tao *taop; 798 int win; 799 800 so = *sop; 801 tp = sototcpcb(so); 802 803 /* 804 * Remember the IP options, if any. 805 */ 806 #ifdef INET6 807 if (!inc->inc_isipv6) 808 #endif 809 ipopts = ip_srcroute(); 810 811 /* 812 * See if we already have an entry for this connection. 813 * If we do, resend the SYN,ACK, and reset the retransmit timer. 814 * 815 * XXX 816 * should the syncache be re-initialized with the contents 817 * of the new SYN here (which may have different options?) 818 */ 819 sc = syncache_lookup(inc, &sch); 820 if (sc != NULL) { 821 tcpstat.tcps_sc_dupsyn++; 822 if (ipopts) { 823 /* 824 * If we were remembering a previous source route, 825 * forget it and use the new one we've been given. 826 */ 827 if (sc->sc_ipopts) 828 (void) m_free(sc->sc_ipopts); 829 sc->sc_ipopts = ipopts; 830 } 831 /* 832 * Update timestamp if present. 833 */ 834 if (sc->sc_flags & SCF_TIMESTAMP) 835 sc->sc_tsrecent = to->to_tsval; 836 /* 837 * PCB may have changed, pick up new values. 838 */ 839 sc->sc_tp = tp; 840 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 841 if (syncache_respond(sc, m) == 0) { 842 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], 843 sc, sc_timerq); 844 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot); 845 tcpstat.tcps_sndacks++; 846 tcpstat.tcps_sndtotal++; 847 } 848 *sop = NULL; 849 return (1); 850 } 851 852 /* 853 * This allocation is guaranteed to succeed because we 854 * preallocate one more syncache entry than cache_limit. 855 */ 856 sc = zalloc(tcp_syncache.zone); 857 858 /* 859 * Fill in the syncache values. 860 */ 861 sc->sc_tp = tp; 862 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 863 sc->sc_ipopts = ipopts; 864 sc->sc_inc.inc_fport = inc->inc_fport; 865 sc->sc_inc.inc_lport = inc->inc_lport; 866 #ifdef INET6 867 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 868 if (inc->inc_isipv6) { 869 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 870 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 871 sc->sc_route6.ro_rt = NULL; 872 } else 873 #endif 874 { 875 sc->sc_inc.inc_faddr = inc->inc_faddr; 876 sc->sc_inc.inc_laddr = inc->inc_laddr; 877 sc->sc_route.ro_rt = NULL; 878 } 879 sc->sc_irs = th->th_seq; 880 sc->sc_flags = 0; 881 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; 882 if (tcp_syncookies) 883 sc->sc_iss = syncookie_generate(sc); 884 else 885 sc->sc_iss = arc4random(); 886 887 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */ 888 win = sbspace(&so->so_rcv); 889 win = imax(win, 0); 890 win = imin(win, TCP_MAXWIN); 891 sc->sc_wnd = win; 892 893 if (tcp_do_rfc1323) { 894 /* 895 * A timestamp received in a SYN makes 896 * it ok to send timestamp requests and replies. 897 */ 898 if (to->to_flags & TOF_TS) { 899 sc->sc_tsrecent = to->to_tsval; 900 sc->sc_flags |= SCF_TIMESTAMP; 901 } 902 if (to->to_flags & TOF_SCALE) { 903 int wscale = 0; 904 905 /* Compute proper scaling value from buffer space */ 906 while (wscale < TCP_MAX_WINSHIFT && 907 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat) 908 wscale++; 909 sc->sc_request_r_scale = wscale; 910 sc->sc_requested_s_scale = to->to_requested_s_scale; 911 sc->sc_flags |= SCF_WINSCALE; 912 } 913 } 914 if (tcp_do_rfc1644) { 915 /* 916 * A CC or CC.new option received in a SYN makes 917 * it ok to send CC in subsequent segments. 918 */ 919 if (to->to_flags & (TOF_CC|TOF_CCNEW)) { 920 sc->sc_cc_recv = to->to_cc; 921 sc->sc_cc_send = CC_INC(tcp_ccgen); 922 sc->sc_flags |= SCF_CC; 923 } 924 } 925 if (tp->t_flags & TF_NOOPT) 926 sc->sc_flags = SCF_NOOPT; 927 928 /* 929 * XXX 930 * We have the option here of not doing TAO (even if the segment 931 * qualifies) and instead fall back to a normal 3WHS via the syncache. 932 * This allows us to apply synflood protection to TAO-qualifying SYNs 933 * also. However, there should be a hueristic to determine when to 934 * do this, and is not present at the moment. 935 */ 936 937 /* 938 * Perform TAO test on incoming CC (SEG.CC) option, if any. 939 * - compare SEG.CC against cached CC from the same host, if any. 940 * - if SEG.CC > chached value, SYN must be new and is accepted 941 * immediately: save new CC in the cache, mark the socket 942 * connected, enter ESTABLISHED state, turn on flag to 943 * send a SYN in the next segment. 944 * A virtual advertised window is set in rcv_adv to 945 * initialize SWS prevention. Then enter normal segment 946 * processing: drop SYN, process data and FIN. 947 * - otherwise do a normal 3-way handshake. 948 */ 949 taop = tcp_gettaocache(&sc->sc_inc); 950 if ((to->to_flags & TOF_CC) != 0) { 951 if (((tp->t_flags & TF_NOPUSH) != 0) && 952 sc->sc_flags & SCF_CC && 953 taop != NULL && taop->tao_cc != 0 && 954 CC_GT(to->to_cc, taop->tao_cc)) { 955 sc->sc_rxtslot = 0; 956 so = syncache_socket(sc, *sop); 957 if (so != NULL) { 958 sc->sc_flags |= SCF_KEEPROUTE; 959 taop->tao_cc = to->to_cc; 960 *sop = so; 961 } 962 syncache_free(sc); 963 return (so != NULL); 964 } 965 } else { 966 /* 967 * No CC option, but maybe CC.NEW: invalidate cached value. 968 */ 969 if (taop != NULL) 970 taop->tao_cc = 0; 971 } 972 /* 973 * TAO test failed or there was no CC option, 974 * do a standard 3-way handshake. 975 */ 976 if (syncache_respond(sc, m) == 0) { 977 syncache_insert(sc, sch); 978 tcpstat.tcps_sndacks++; 979 tcpstat.tcps_sndtotal++; 980 } else { 981 syncache_free(sc); 982 tcpstat.tcps_sc_dropped++; 983 } 984 *sop = NULL; 985 return (1); 986 } 987 988 static int 989 syncache_respond(sc, m) 990 struct syncache *sc; 991 struct mbuf *m; 992 { 993 u_int8_t *optp; 994 int optlen, error; 995 u_int16_t tlen, hlen, mssopt; 996 struct ip *ip = NULL; 997 struct rtentry *rt; 998 struct tcphdr *th; 999 #ifdef INET6 1000 struct ip6_hdr *ip6 = NULL; 1001 #endif 1002 1003 #ifdef INET6 1004 if (sc->sc_inc.inc_isipv6) { 1005 rt = tcp_rtlookup6(&sc->sc_inc); 1006 if (rt != NULL) 1007 mssopt = rt->rt_ifp->if_mtu - 1008 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 1009 else 1010 mssopt = tcp_v6mssdflt; 1011 hlen = sizeof(struct ip6_hdr); 1012 } else 1013 #endif 1014 { 1015 rt = tcp_rtlookup(&sc->sc_inc); 1016 if (rt != NULL) 1017 mssopt = rt->rt_ifp->if_mtu - 1018 (sizeof(struct ip) + sizeof(struct tcphdr)); 1019 else 1020 mssopt = tcp_mssdflt; 1021 hlen = sizeof(struct ip); 1022 } 1023 1024 /* Compute the size of the TCP options. */ 1025 if (sc->sc_flags & SCF_NOOPT) { 1026 optlen = 0; 1027 } else { 1028 optlen = TCPOLEN_MAXSEG + 1029 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + 1030 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + 1031 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0); 1032 } 1033 tlen = hlen + sizeof(struct tcphdr) + optlen; 1034 1035 /* 1036 * XXX 1037 * assume that the entire packet will fit in a header mbuf 1038 */ 1039 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); 1040 1041 /* 1042 * XXX shouldn't this reuse the mbuf if possible ? 1043 * Create the IP+TCP header from scratch. 1044 */ 1045 if (m) 1046 m_freem(m); 1047 1048 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1049 if (m == NULL) 1050 return (ENOBUFS); 1051 m->m_data += max_linkhdr; 1052 m->m_len = tlen; 1053 m->m_pkthdr.len = tlen; 1054 m->m_pkthdr.rcvif = NULL; 1055 1056 #ifdef INET6 1057 if (sc->sc_inc.inc_isipv6) { 1058 ip6 = mtod(m, struct ip6_hdr *); 1059 ip6->ip6_vfc = IPV6_VERSION; 1060 ip6->ip6_nxt = IPPROTO_TCP; 1061 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1062 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1063 ip6->ip6_plen = htons(tlen - hlen); 1064 /* ip6_hlim is set after checksum */ 1065 /* ip6_flow = ??? */ 1066 1067 th = (struct tcphdr *)(ip6 + 1); 1068 } else 1069 #endif 1070 { 1071 ip = mtod(m, struct ip *); 1072 ip->ip_v = IPVERSION; 1073 ip->ip_hl = sizeof(struct ip) >> 2; 1074 ip->ip_len = tlen; 1075 ip->ip_id = 0; 1076 ip->ip_off = 0; 1077 ip->ip_sum = 0; 1078 ip->ip_p = IPPROTO_TCP; 1079 ip->ip_src = sc->sc_inc.inc_laddr; 1080 ip->ip_dst = sc->sc_inc.inc_faddr; 1081 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */ 1082 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */ 1083 1084 /* 1085 * See if we should do MTU discovery. Route lookups are expensive, 1086 * so we will only unset the DF bit if: 1087 * 1088 * 1) path_mtu_discovery is disabled 1089 * 2) the SCF_UNREACH flag has been set 1090 */ 1091 if (path_mtu_discovery 1092 && ((sc->sc_flags & SCF_UNREACH) == 0)) { 1093 ip->ip_off |= IP_DF; 1094 } 1095 1096 th = (struct tcphdr *)(ip + 1); 1097 } 1098 th->th_sport = sc->sc_inc.inc_lport; 1099 th->th_dport = sc->sc_inc.inc_fport; 1100 1101 th->th_seq = htonl(sc->sc_iss); 1102 th->th_ack = htonl(sc->sc_irs + 1); 1103 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1104 th->th_x2 = 0; 1105 th->th_flags = TH_SYN|TH_ACK; 1106 th->th_win = htons(sc->sc_wnd); 1107 th->th_urp = 0; 1108 1109 /* Tack on the TCP options. */ 1110 if (optlen == 0) 1111 goto no_options; 1112 optp = (u_int8_t *)(th + 1); 1113 *optp++ = TCPOPT_MAXSEG; 1114 *optp++ = TCPOLEN_MAXSEG; 1115 *optp++ = (mssopt >> 8) & 0xff; 1116 *optp++ = mssopt & 0xff; 1117 1118 if (sc->sc_flags & SCF_WINSCALE) { 1119 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | 1120 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | 1121 sc->sc_request_r_scale); 1122 optp += 4; 1123 } 1124 1125 if (sc->sc_flags & SCF_TIMESTAMP) { 1126 u_int32_t *lp = (u_int32_t *)(optp); 1127 1128 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1129 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1130 *lp++ = htonl(ticks); 1131 *lp = htonl(sc->sc_tsrecent); 1132 optp += TCPOLEN_TSTAMP_APPA; 1133 } 1134 1135 /* 1136 * Send CC and CC.echo if we received CC from our peer. 1137 */ 1138 if (sc->sc_flags & SCF_CC) { 1139 u_int32_t *lp = (u_int32_t *)(optp); 1140 1141 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1142 *lp++ = htonl(sc->sc_cc_send); 1143 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO)); 1144 *lp = htonl(sc->sc_cc_recv); 1145 optp += TCPOLEN_CC_APPA * 2; 1146 } 1147 no_options: 1148 1149 #ifdef INET6 1150 if (sc->sc_inc.inc_isipv6) { 1151 struct route_in6 *ro6 = &sc->sc_route6; 1152 1153 th->th_sum = 0; 1154 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); 1155 ip6->ip6_hlim = in6_selecthlim(NULL, 1156 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL); 1157 error = ip6_output(m, NULL, ro6, 0, NULL, NULL, 1158 sc->sc_tp->t_inpcb); 1159 } else 1160 #endif 1161 { 1162 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1163 htons(tlen - hlen + IPPROTO_TCP)); 1164 m->m_pkthdr.csum_flags = CSUM_TCP; 1165 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1166 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 0, NULL, 1167 sc->sc_tp->t_inpcb); 1168 } 1169 return (error); 1170 } 1171 1172 /* 1173 * cookie layers: 1174 * 1175 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| 1176 * | peer iss | 1177 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| 1178 * | 0 |(A)| | 1179 * (A): peer mss index 1180 */ 1181 1182 /* 1183 * The values below are chosen to minimize the size of the tcp_secret 1184 * table, as well as providing roughly a 16 second lifetime for the cookie. 1185 */ 1186 1187 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ 1188 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ 1189 1190 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) 1191 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) 1192 #define SYNCOOKIE_TIMEOUT \ 1193 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) 1194 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) 1195 1196 static struct { 1197 u_int32_t ts_secbits[4]; 1198 u_int ts_expire; 1199 } tcp_secret[SYNCOOKIE_NSECRETS]; 1200 1201 static int tcp_msstab[] = { 0, 536, 1460, 8960 }; 1202 1203 static MD5_CTX syn_ctx; 1204 1205 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) 1206 1207 struct md5_add { 1208 u_int32_t laddr, faddr; 1209 u_int32_t secbits[4]; 1210 u_int16_t lport, fport; 1211 }; 1212 1213 #ifdef CTASSERT 1214 CTASSERT(sizeof(struct md5_add) == 28); 1215 #endif 1216 1217 /* 1218 * Consider the problem of a recreated (and retransmitted) cookie. If the 1219 * original SYN was accepted, the connection is established. The second 1220 * SYN is inflight, and if it arrives with an ISN that falls within the 1221 * receive window, the connection is killed. 1222 * 1223 * However, since cookies have other problems, this may not be worth 1224 * worrying about. 1225 */ 1226 1227 static u_int32_t 1228 syncookie_generate(struct syncache *sc) 1229 { 1230 u_int32_t md5_buffer[4]; 1231 u_int32_t data; 1232 int idx, i; 1233 struct md5_add add; 1234 1235 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; 1236 if (tcp_secret[idx].ts_expire < ticks) { 1237 for (i = 0; i < 4; i++) 1238 tcp_secret[idx].ts_secbits[i] = arc4random(); 1239 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; 1240 } 1241 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--) 1242 if (tcp_msstab[data] <= sc->sc_peer_mss) 1243 break; 1244 data = (data << SYNCOOKIE_WNDBITS) | idx; 1245 data ^= sc->sc_irs; /* peer's iss */ 1246 MD5Init(&syn_ctx); 1247 #ifdef INET6 1248 if (sc->sc_inc.inc_isipv6) { 1249 MD5Add(sc->sc_inc.inc6_laddr); 1250 MD5Add(sc->sc_inc.inc6_faddr); 1251 add.laddr = 0; 1252 add.faddr = 0; 1253 } else 1254 #endif 1255 { 1256 add.laddr = sc->sc_inc.inc_laddr.s_addr; 1257 add.faddr = sc->sc_inc.inc_faddr.s_addr; 1258 } 1259 add.lport = sc->sc_inc.inc_lport; 1260 add.fport = sc->sc_inc.inc_fport; 1261 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1262 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1263 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1264 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1265 MD5Add(add); 1266 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1267 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); 1268 return (data); 1269 } 1270 1271 static struct syncache * 1272 syncookie_lookup(inc, th, so) 1273 struct in_conninfo *inc; 1274 struct tcphdr *th; 1275 struct socket *so; 1276 { 1277 u_int32_t md5_buffer[4]; 1278 struct syncache *sc; 1279 u_int32_t data; 1280 int wnd, idx; 1281 struct md5_add add; 1282 1283 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ 1284 idx = data & SYNCOOKIE_WNDMASK; 1285 if (tcp_secret[idx].ts_expire < ticks || 1286 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) 1287 return (NULL); 1288 MD5Init(&syn_ctx); 1289 #ifdef INET6 1290 if (inc->inc_isipv6) { 1291 MD5Add(inc->inc6_laddr); 1292 MD5Add(inc->inc6_faddr); 1293 add.laddr = 0; 1294 add.faddr = 0; 1295 } else 1296 #endif 1297 { 1298 add.laddr = inc->inc_laddr.s_addr; 1299 add.faddr = inc->inc_faddr.s_addr; 1300 } 1301 add.lport = inc->inc_lport; 1302 add.fport = inc->inc_fport; 1303 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1304 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1305 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1306 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1307 MD5Add(add); 1308 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1309 data ^= md5_buffer[0]; 1310 if ((data & ~SYNCOOKIE_DATAMASK) != 0) 1311 return (NULL); 1312 data = data >> SYNCOOKIE_WNDBITS; 1313 1314 /* 1315 * This allocation is guaranteed to succeed because we 1316 * preallocate one more syncache entry than cache_limit. 1317 */ 1318 sc = zalloc(tcp_syncache.zone); 1319 1320 /* 1321 * Fill in the syncache values. 1322 * XXX duplicate code from syncache_add 1323 */ 1324 sc->sc_ipopts = NULL; 1325 sc->sc_inc.inc_fport = inc->inc_fport; 1326 sc->sc_inc.inc_lport = inc->inc_lport; 1327 #ifdef INET6 1328 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1329 if (inc->inc_isipv6) { 1330 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1331 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1332 sc->sc_route6.ro_rt = NULL; 1333 } else 1334 #endif 1335 { 1336 sc->sc_inc.inc_faddr = inc->inc_faddr; 1337 sc->sc_inc.inc_laddr = inc->inc_laddr; 1338 sc->sc_route.ro_rt = NULL; 1339 } 1340 sc->sc_irs = th->th_seq - 1; 1341 sc->sc_iss = th->th_ack - 1; 1342 wnd = sbspace(&so->so_rcv); 1343 wnd = imax(wnd, 0); 1344 wnd = imin(wnd, TCP_MAXWIN); 1345 sc->sc_wnd = wnd; 1346 sc->sc_flags = 0; 1347 sc->sc_rxtslot = 0; 1348 sc->sc_peer_mss = tcp_msstab[data]; 1349 return (sc); 1350 } 1351