1 /* $OpenBSD: ip_spd.c,v 1.99 2018/10/22 15:32:19 cheloha Exp $ */ 2 /* 3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 4 * 5 * Copyright (c) 2000-2001 Angelos D. Keromytis. 6 * 7 * Permission to use, copy, and modify this software with or without fee 8 * is hereby granted, provided that this entire notice is included in 9 * all copies of any software which is or includes a copy or 10 * modification of this software. 11 * You may use this code under the GNU public license if you so wish. Please 12 * contribute changes back to the authors under this freer than GPL license 13 * so that we may further the use of strong encryption without limitations to 14 * all. 15 * 16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 20 * PURPOSE. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/mbuf.h> 26 #include <sys/socket.h> 27 #include <sys/kernel.h> 28 #include <sys/socketvar.h> 29 #include <sys/domain.h> 30 #include <sys/protosw.h> 31 #include <sys/pool.h> 32 #include <sys/timeout.h> 33 34 #include <net/route.h> 35 #include <net/netisr.h> 36 37 #include <netinet/in.h> 38 #include <netinet/ip.h> 39 #include <netinet/ip_var.h> 40 #include <netinet/in_pcb.h> 41 #include <netinet/ip_ipsp.h> 42 #include <net/pfkeyv2.h> 43 44 int ipsp_acquire_sa(struct ipsec_policy *, union sockaddr_union *, 45 union sockaddr_union *, struct sockaddr_encap *, struct mbuf *); 46 struct ipsec_acquire *ipsp_pending_acquire(struct ipsec_policy *, 47 union sockaddr_union *); 48 void ipsp_delete_acquire_timo(void *); 49 void ipsp_delete_acquire(struct ipsec_acquire *); 50 51 struct pool ipsec_policy_pool; 52 struct pool ipsec_acquire_pool; 53 int ipsec_policy_pool_initialized = 0; 54 55 /* Protected by the NET_LOCK(). */ 56 int ipsec_acquire_pool_initialized = 0; 57 struct radix_node_head **spd_tables; 58 unsigned int spd_table_max; 59 TAILQ_HEAD(ipsec_acquire_head, ipsec_acquire) ipsec_acquire_head = 60 TAILQ_HEAD_INITIALIZER(ipsec_acquire_head); 61 62 struct radix_node_head * 63 spd_table_get(unsigned int rtableid) 64 { 65 unsigned int rdomain; 66 67 NET_ASSERT_LOCKED(); 68 69 if (spd_tables == NULL) 70 return (NULL); 71 72 rdomain = rtable_l2(rtableid); 73 if (rdomain > spd_table_max) 74 return (NULL); 75 76 return (spd_tables[rdomain]); 77 } 78 79 struct radix_node_head * 80 spd_table_add(unsigned int rtableid) 81 { 82 struct radix_node_head *rnh = NULL; 83 unsigned int rdomain; 84 void *p; 85 86 NET_ASSERT_LOCKED(); 87 88 rdomain = rtable_l2(rtableid); 89 if (spd_tables == NULL || rdomain > spd_table_max) { 90 if ((p = mallocarray(rdomain + 1, sizeof(*rnh), 91 M_RTABLE, M_NOWAIT|M_ZERO)) == NULL) 92 return (NULL); 93 94 if (spd_tables != NULL) { 95 memcpy(p, spd_tables, sizeof(*rnh) * (spd_table_max+1)); 96 free(spd_tables, M_RTABLE, 0); 97 } 98 spd_tables = p; 99 spd_table_max = rdomain; 100 } 101 102 if (spd_tables[rdomain] == NULL) { 103 if (rn_inithead((void **)&rnh, 104 offsetof(struct sockaddr_encap, sen_type)) == 0) 105 rnh = NULL; 106 spd_tables[rdomain] = rnh; 107 } 108 109 return (spd_tables[rdomain]); 110 } 111 112 int 113 spd_table_walk(unsigned int rtableid, 114 int (*func)(struct ipsec_policy *, void *, unsigned int), void *arg) 115 { 116 struct radix_node_head *rnh; 117 int (*walker)(struct radix_node *, void *, u_int) = (void *)func; 118 int error; 119 120 rnh = spd_table_get(rtableid); 121 if (rnh == NULL) 122 return (0); 123 124 /* EGAIN means the tree changed. */ 125 while ((error = rn_walktree(rnh, walker, arg)) == EAGAIN) 126 continue; 127 128 return (error); 129 } 130 131 /* 132 * Lookup at the SPD based on the headers contained on the mbuf. The second 133 * argument indicates what protocol family the header at the beginning of 134 * the mbuf is. hlen is the offset of the transport protocol header 135 * in the mbuf. 136 * 137 * Return combinations (of return value and in *error): 138 * - NULL/0 -> no IPsec required on packet 139 * - NULL/-EINVAL -> silently drop the packet 140 * - NULL/errno -> drop packet and return error 141 * or a pointer to a TDB (and 0 in *error). 142 * 143 * In the case of incoming flows, only the first three combinations are 144 * returned. 145 */ 146 struct tdb * 147 ipsp_spd_lookup(struct mbuf *m, int af, int hlen, int *error, int direction, 148 struct tdb *tdbp, struct inpcb *inp, u_int32_t ipsecflowinfo) 149 { 150 struct radix_node_head *rnh; 151 struct radix_node *rn; 152 union sockaddr_union sdst, ssrc; 153 struct sockaddr_encap *ddst, dst; 154 struct ipsec_policy *ipo; 155 struct ipsec_ids *ids = NULL; 156 int signore = 0, dignore = 0; 157 u_int rdomain = rtable_l2(m->m_pkthdr.ph_rtableid); 158 159 NET_ASSERT_LOCKED(); 160 161 /* 162 * If there are no flows in place, there's no point 163 * continuing with the SPD lookup. 164 */ 165 if (!ipsec_in_use && inp == NULL) { 166 *error = 0; 167 return NULL; 168 } 169 170 /* 171 * If an input packet is destined to a BYPASS socket, just accept it. 172 */ 173 if ((inp != NULL) && (direction == IPSP_DIRECTION_IN) && 174 (inp->inp_seclevel[SL_ESP_TRANS] == IPSEC_LEVEL_BYPASS) && 175 (inp->inp_seclevel[SL_ESP_NETWORK] == IPSEC_LEVEL_BYPASS) && 176 (inp->inp_seclevel[SL_AUTH] == IPSEC_LEVEL_BYPASS)) { 177 *error = 0; 178 return NULL; 179 } 180 181 memset(&dst, 0, sizeof(dst)); 182 memset(&sdst, 0, sizeof(union sockaddr_union)); 183 memset(&ssrc, 0, sizeof(union sockaddr_union)); 184 ddst = (struct sockaddr_encap *)&dst; 185 ddst->sen_family = PF_KEY; 186 ddst->sen_len = SENT_LEN; 187 188 switch (af) { 189 case AF_INET: 190 if (hlen < sizeof (struct ip) || m->m_pkthdr.len < hlen) { 191 *error = EINVAL; 192 return NULL; 193 } 194 ddst->sen_direction = direction; 195 ddst->sen_type = SENT_IP4; 196 197 m_copydata(m, offsetof(struct ip, ip_src), 198 sizeof(struct in_addr), (caddr_t) &(ddst->sen_ip_src)); 199 m_copydata(m, offsetof(struct ip, ip_dst), 200 sizeof(struct in_addr), (caddr_t) &(ddst->sen_ip_dst)); 201 m_copydata(m, offsetof(struct ip, ip_p), sizeof(u_int8_t), 202 (caddr_t) &(ddst->sen_proto)); 203 204 sdst.sin.sin_family = ssrc.sin.sin_family = AF_INET; 205 sdst.sin.sin_len = ssrc.sin.sin_len = 206 sizeof(struct sockaddr_in); 207 ssrc.sin.sin_addr = ddst->sen_ip_src; 208 sdst.sin.sin_addr = ddst->sen_ip_dst; 209 210 /* 211 * If TCP/UDP, extract the port numbers to use in the lookup. 212 */ 213 switch (ddst->sen_proto) { 214 case IPPROTO_UDP: 215 case IPPROTO_TCP: 216 /* Make sure there's enough data in the packet. */ 217 if (m->m_pkthdr.len < hlen + 2 * sizeof(u_int16_t)) { 218 *error = EINVAL; 219 return NULL; 220 } 221 222 /* 223 * Luckily, the offset of the src/dst ports in 224 * both the UDP and TCP headers is the same (first 225 * two 16-bit values in the respective headers), 226 * so we can just copy them. 227 */ 228 m_copydata(m, hlen, sizeof(u_int16_t), 229 (caddr_t) &(ddst->sen_sport)); 230 m_copydata(m, hlen + sizeof(u_int16_t), sizeof(u_int16_t), 231 (caddr_t) &(ddst->sen_dport)); 232 break; 233 234 default: 235 ddst->sen_sport = 0; 236 ddst->sen_dport = 0; 237 } 238 239 break; 240 241 #ifdef INET6 242 case AF_INET6: 243 if (hlen < sizeof (struct ip6_hdr) || m->m_pkthdr.len < hlen) { 244 *error = EINVAL; 245 return NULL; 246 } 247 ddst->sen_type = SENT_IP6; 248 ddst->sen_ip6_direction = direction; 249 250 m_copydata(m, offsetof(struct ip6_hdr, ip6_src), 251 sizeof(struct in6_addr), 252 (caddr_t) &(ddst->sen_ip6_src)); 253 m_copydata(m, offsetof(struct ip6_hdr, ip6_dst), 254 sizeof(struct in6_addr), 255 (caddr_t) &(ddst->sen_ip6_dst)); 256 m_copydata(m, offsetof(struct ip6_hdr, ip6_nxt), 257 sizeof(u_int8_t), 258 (caddr_t) &(ddst->sen_ip6_proto)); 259 260 sdst.sin6.sin6_family = ssrc.sin6.sin6_family = AF_INET6; 261 sdst.sin6.sin6_len = ssrc.sin6.sin6_len = 262 sizeof(struct sockaddr_in6); 263 in6_recoverscope(&ssrc.sin6, &ddst->sen_ip6_src); 264 in6_recoverscope(&sdst.sin6, &ddst->sen_ip6_dst); 265 266 /* 267 * If TCP/UDP, extract the port numbers to use in the lookup. 268 */ 269 switch (ddst->sen_ip6_proto) { 270 case IPPROTO_UDP: 271 case IPPROTO_TCP: 272 /* Make sure there's enough data in the packet. */ 273 if (m->m_pkthdr.len < hlen + 2 * sizeof(u_int16_t)) { 274 *error = EINVAL; 275 return NULL; 276 } 277 278 /* 279 * Luckily, the offset of the src/dst ports in 280 * both the UDP and TCP headers is the same 281 * (first two 16-bit values in the respective 282 * headers), so we can just copy them. 283 */ 284 m_copydata(m, hlen, sizeof(u_int16_t), 285 (caddr_t) &(ddst->sen_ip6_sport)); 286 m_copydata(m, hlen + sizeof(u_int16_t), sizeof(u_int16_t), 287 (caddr_t) &(ddst->sen_ip6_dport)); 288 break; 289 290 default: 291 ddst->sen_ip6_sport = 0; 292 ddst->sen_ip6_dport = 0; 293 } 294 295 break; 296 #endif /* INET6 */ 297 298 default: 299 *error = EAFNOSUPPORT; 300 return NULL; 301 } 302 303 /* Actual SPD lookup. */ 304 if ((rnh = spd_table_get(rdomain)) == NULL || 305 (rn = rn_match((caddr_t)&dst, rnh)) == NULL) { 306 /* 307 * Return whatever the socket requirements are, there are no 308 * system-wide policies. 309 */ 310 *error = 0; 311 return ipsp_spd_inp(m, af, hlen, error, direction, 312 tdbp, inp, NULL); 313 } 314 ipo = (struct ipsec_policy *)rn; 315 316 switch (ipo->ipo_type) { 317 case IPSP_PERMIT: 318 *error = 0; 319 return ipsp_spd_inp(m, af, hlen, error, direction, tdbp, 320 inp, ipo); 321 322 case IPSP_DENY: 323 *error = EHOSTUNREACH; 324 return NULL; 325 326 case IPSP_IPSEC_USE: 327 case IPSP_IPSEC_ACQUIRE: 328 case IPSP_IPSEC_REQUIRE: 329 case IPSP_IPSEC_DONTACQ: 330 /* Nothing more needed here. */ 331 break; 332 333 default: 334 *error = EINVAL; 335 return NULL; 336 } 337 338 /* Check for non-specific destination in the policy. */ 339 switch (ipo->ipo_dst.sa.sa_family) { 340 case AF_INET: 341 if ((ipo->ipo_dst.sin.sin_addr.s_addr == INADDR_ANY) || 342 (ipo->ipo_dst.sin.sin_addr.s_addr == INADDR_BROADCAST)) 343 dignore = 1; 344 break; 345 346 #ifdef INET6 347 case AF_INET6: 348 if ((IN6_IS_ADDR_UNSPECIFIED(&ipo->ipo_dst.sin6.sin6_addr)) || 349 (memcmp(&ipo->ipo_dst.sin6.sin6_addr, &in6mask128, 350 sizeof(in6mask128)) == 0)) 351 dignore = 1; 352 break; 353 #endif /* INET6 */ 354 } 355 356 /* Likewise for source. */ 357 switch (ipo->ipo_src.sa.sa_family) { 358 case AF_INET: 359 if (ipo->ipo_src.sin.sin_addr.s_addr == INADDR_ANY) 360 signore = 1; 361 break; 362 363 #ifdef INET6 364 case AF_INET6: 365 if (IN6_IS_ADDR_UNSPECIFIED(&ipo->ipo_src.sin6.sin6_addr)) 366 signore = 1; 367 break; 368 #endif /* INET6 */ 369 } 370 371 /* Do we have a cached entry ? If so, check if it's still valid. */ 372 if ((ipo->ipo_tdb) && (ipo->ipo_tdb->tdb_flags & TDBF_INVALID)) { 373 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo, 374 ipo_tdb_next); 375 ipo->ipo_tdb = NULL; 376 } 377 378 /* Outgoing packet policy check. */ 379 if (direction == IPSP_DIRECTION_OUT) { 380 /* 381 * If the packet is destined for the policy-specified 382 * gateway/endhost, and the socket has the BYPASS 383 * option set, skip IPsec processing. 384 */ 385 if ((inp != NULL) && 386 (inp->inp_seclevel[SL_ESP_TRANS] == IPSEC_LEVEL_BYPASS) && 387 (inp->inp_seclevel[SL_ESP_NETWORK] == 388 IPSEC_LEVEL_BYPASS) && 389 (inp->inp_seclevel[SL_AUTH] == IPSEC_LEVEL_BYPASS)) { 390 /* Direct match. */ 391 if (dignore || 392 !memcmp(&sdst, &ipo->ipo_dst, sdst.sa.sa_len)) { 393 *error = 0; 394 return NULL; 395 } 396 } 397 398 if (ipsecflowinfo) 399 ids = ipsp_ids_lookup(ipsecflowinfo); 400 401 /* Check that the cached TDB (if present), is appropriate. */ 402 if (ipo->ipo_tdb) { 403 if ((ipo->ipo_last_searched <= ipsec_last_added) || 404 (ipo->ipo_sproto != ipo->ipo_tdb->tdb_sproto) || 405 memcmp(dignore ? &sdst : &ipo->ipo_dst, 406 &ipo->ipo_tdb->tdb_dst, 407 ipo->ipo_tdb->tdb_dst.sa.sa_len)) 408 goto nomatchout; 409 410 if (!ipsp_aux_match(ipo->ipo_tdb, 411 ids ? ids : ipo->ipo_ids, 412 &ipo->ipo_addr, &ipo->ipo_mask)) 413 goto nomatchout; 414 415 /* Cached entry is good. */ 416 *error = 0; 417 return ipsp_spd_inp(m, af, hlen, error, direction, 418 tdbp, inp, ipo); 419 420 nomatchout: 421 /* Cached TDB was not good. */ 422 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo, 423 ipo_tdb_next); 424 ipo->ipo_tdb = NULL; 425 ipo->ipo_last_searched = 0; 426 } 427 428 /* 429 * If no SA has been added since the last time we did a 430 * lookup, there's no point searching for one. However, if the 431 * destination gateway is left unspecified (or is all-1's), 432 * always lookup since this is a generic-match rule 433 * (otherwise, we can have situations where SAs to some 434 * destinations exist but are not used, possibly leading to an 435 * explosion in the number of acquired SAs). 436 */ 437 if (ipo->ipo_last_searched <= ipsec_last_added) { 438 /* "Touch" the entry. */ 439 if (dignore == 0) 440 ipo->ipo_last_searched = time_uptime; 441 442 /* Find an appropriate SA from the existing ones. */ 443 ipo->ipo_tdb = 444 gettdbbydst(rdomain, 445 dignore ? &sdst : &ipo->ipo_dst, 446 ipo->ipo_sproto, 447 ids ? ids: ipo->ipo_ids, 448 &ipo->ipo_addr, &ipo->ipo_mask); 449 if (ipo->ipo_tdb) { 450 TAILQ_INSERT_TAIL(&ipo->ipo_tdb->tdb_policy_head, 451 ipo, ipo_tdb_next); 452 *error = 0; 453 return ipsp_spd_inp(m, af, hlen, error, 454 direction, tdbp, inp, ipo); 455 } 456 } 457 458 /* So, we don't have an SA -- just a policy. */ 459 switch (ipo->ipo_type) { 460 case IPSP_IPSEC_REQUIRE: 461 /* Acquire SA through key management. */ 462 if (ipsp_acquire_sa(ipo, 463 dignore ? &sdst : &ipo->ipo_dst, 464 signore ? NULL : &ipo->ipo_src, ddst, m) != 0) { 465 *error = EACCES; 466 return NULL; 467 } 468 469 /* FALLTHROUGH */ 470 case IPSP_IPSEC_DONTACQ: 471 *error = -EINVAL; /* Silently drop packet. */ 472 return NULL; 473 474 case IPSP_IPSEC_ACQUIRE: 475 /* Acquire SA through key management. */ 476 ipsp_acquire_sa(ipo, dignore ? &sdst : &ipo->ipo_dst, 477 signore ? NULL : &ipo->ipo_src, ddst, NULL); 478 479 /* FALLTHROUGH */ 480 case IPSP_IPSEC_USE: 481 *error = 0; 482 return ipsp_spd_inp(m, af, hlen, error, direction, 483 tdbp, inp, ipo); 484 } 485 } else { /* IPSP_DIRECTION_IN */ 486 if (tdbp != NULL) { 487 /* Direct match in the cache. */ 488 if (ipo->ipo_tdb == tdbp) { 489 *error = 0; 490 return ipsp_spd_inp(m, af, hlen, error, 491 direction, tdbp, inp, ipo); 492 } 493 494 if (memcmp(dignore ? &ssrc : &ipo->ipo_dst, 495 &tdbp->tdb_src, tdbp->tdb_src.sa.sa_len) || 496 (ipo->ipo_sproto != tdbp->tdb_sproto)) 497 goto nomatchin; 498 499 /* Match source/dest IDs. */ 500 if (ipo->ipo_ids) 501 if (tdbp->tdb_ids == NULL || 502 !ipsp_ids_match(ipo->ipo_ids, tdbp->tdb_ids)) 503 goto nomatchin; 504 505 /* Add it to the cache. */ 506 if (ipo->ipo_tdb) 507 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, 508 ipo, ipo_tdb_next); 509 ipo->ipo_tdb = tdbp; 510 TAILQ_INSERT_TAIL(&tdbp->tdb_policy_head, ipo, 511 ipo_tdb_next); 512 *error = 0; 513 return ipsp_spd_inp(m, af, hlen, error, direction, 514 tdbp, inp, ipo); 515 516 nomatchin: /* Nothing needed here, falling through */ 517 ; 518 } 519 520 /* Check whether cached entry applies. */ 521 if (ipo->ipo_tdb) { 522 /* 523 * We only need to check that the correct 524 * security protocol and security gateway are 525 * set; IDs will be the same since the cached 526 * entry is linked on this policy. 527 */ 528 if (ipo->ipo_sproto == ipo->ipo_tdb->tdb_sproto && 529 !memcmp(&ipo->ipo_tdb->tdb_src, 530 dignore ? &ssrc : &ipo->ipo_dst, 531 ipo->ipo_tdb->tdb_src.sa.sa_len)) 532 goto skipinputsearch; 533 534 /* Not applicable, unlink. */ 535 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo, 536 ipo_tdb_next); 537 ipo->ipo_last_searched = 0; 538 ipo->ipo_tdb = NULL; 539 } 540 541 /* Find whether there exists an appropriate SA. */ 542 if (ipo->ipo_last_searched <= ipsec_last_added) { 543 if (dignore == 0) 544 ipo->ipo_last_searched = time_uptime; 545 546 ipo->ipo_tdb = 547 gettdbbysrc(rdomain, 548 dignore ? &ssrc : &ipo->ipo_dst, 549 ipo->ipo_sproto, ipo->ipo_ids, 550 &ipo->ipo_addr, &ipo->ipo_mask); 551 if (ipo->ipo_tdb) 552 TAILQ_INSERT_TAIL(&ipo->ipo_tdb->tdb_policy_head, 553 ipo, ipo_tdb_next); 554 } 555 skipinputsearch: 556 557 switch (ipo->ipo_type) { 558 case IPSP_IPSEC_REQUIRE: 559 /* If appropriate SA exists, don't acquire another. */ 560 if (ipo->ipo_tdb) { 561 *error = -EINVAL; 562 return NULL; 563 } 564 565 /* Acquire SA through key management. */ 566 if ((*error = ipsp_acquire_sa(ipo, 567 dignore ? &ssrc : &ipo->ipo_dst, 568 signore ? NULL : &ipo->ipo_src, ddst, m)) != 0) 569 return NULL; 570 571 /* FALLTHROUGH */ 572 case IPSP_IPSEC_DONTACQ: 573 /* Drop packet. */ 574 *error = -EINVAL; 575 return NULL; 576 577 case IPSP_IPSEC_ACQUIRE: 578 /* If appropriate SA exists, don't acquire another. */ 579 if (ipo->ipo_tdb) { 580 *error = 0; 581 return ipsp_spd_inp(m, af, hlen, error, 582 direction, tdbp, inp, ipo); 583 } 584 585 /* Acquire SA through key management. */ 586 ipsp_acquire_sa(ipo, dignore ? &ssrc : &ipo->ipo_dst, 587 signore ? NULL : &ipo->ipo_src, ddst, NULL); 588 589 /* FALLTHROUGH */ 590 case IPSP_IPSEC_USE: 591 *error = 0; 592 return ipsp_spd_inp(m, af, hlen, error, direction, 593 tdbp, inp, ipo); 594 } 595 } 596 597 /* Shouldn't ever get this far. */ 598 *error = EINVAL; 599 return NULL; 600 } 601 602 /* 603 * Delete a policy from the SPD. 604 */ 605 int 606 ipsec_delete_policy(struct ipsec_policy *ipo) 607 { 608 struct ipsec_acquire *ipa; 609 struct radix_node_head *rnh; 610 struct radix_node *rn = (struct radix_node *)ipo; 611 int err = 0; 612 613 NET_ASSERT_LOCKED(); 614 615 if (--ipo->ipo_ref_count > 0) 616 return 0; 617 618 /* Delete from SPD. */ 619 if ((rnh = spd_table_get(ipo->ipo_rdomain)) == NULL || 620 rn_delete(&ipo->ipo_addr, &ipo->ipo_mask, rnh, rn) == NULL) 621 return (ESRCH); 622 623 if (ipo->ipo_tdb != NULL) 624 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo, 625 ipo_tdb_next); 626 627 while ((ipa = TAILQ_FIRST(&ipo->ipo_acquires)) != NULL) 628 ipsp_delete_acquire(ipa); 629 630 TAILQ_REMOVE(&ipsec_policy_head, ipo, ipo_list); 631 632 if (ipo->ipo_ids) 633 ipsp_ids_free(ipo->ipo_ids); 634 635 ipsec_in_use--; 636 637 pool_put(&ipsec_policy_pool, ipo); 638 639 return err; 640 } 641 642 void 643 ipsp_delete_acquire_timo(void *v) 644 { 645 struct ipsec_acquire *ipa = v; 646 647 NET_LOCK(); 648 ipsp_delete_acquire(ipa); 649 NET_UNLOCK(); 650 } 651 652 /* 653 * Delete a pending IPsec acquire record. 654 */ 655 void 656 ipsp_delete_acquire(struct ipsec_acquire *ipa) 657 { 658 NET_ASSERT_LOCKED(); 659 660 timeout_del(&ipa->ipa_timeout); 661 TAILQ_REMOVE(&ipsec_acquire_head, ipa, ipa_next); 662 if (ipa->ipa_policy != NULL) 663 TAILQ_REMOVE(&ipa->ipa_policy->ipo_acquires, ipa, 664 ipa_ipo_next); 665 pool_put(&ipsec_acquire_pool, ipa); 666 } 667 668 /* 669 * Find out if there's an ACQUIRE pending. 670 * XXX Need a better structure. 671 */ 672 struct ipsec_acquire * 673 ipsp_pending_acquire(struct ipsec_policy *ipo, union sockaddr_union *gw) 674 { 675 struct ipsec_acquire *ipa; 676 677 NET_ASSERT_LOCKED(); 678 679 TAILQ_FOREACH (ipa, &ipo->ipo_acquires, ipa_ipo_next) { 680 if (!memcmp(gw, &ipa->ipa_addr, gw->sa.sa_len)) 681 return ipa; 682 } 683 684 return NULL; 685 } 686 687 /* 688 * Signal key management that we need an SA. 689 * XXX For outgoing policies, we could try to hold on to the mbuf. 690 */ 691 int 692 ipsp_acquire_sa(struct ipsec_policy *ipo, union sockaddr_union *gw, 693 union sockaddr_union *laddr, struct sockaddr_encap *ddst, struct mbuf *m) 694 { 695 struct ipsec_acquire *ipa; 696 697 NET_ASSERT_LOCKED(); 698 699 /* Check whether request has been made already. */ 700 if ((ipa = ipsp_pending_acquire(ipo, gw)) != NULL) 701 return 0; 702 703 /* Add request in cache and proceed. */ 704 if (ipsec_acquire_pool_initialized == 0) { 705 ipsec_acquire_pool_initialized = 1; 706 pool_init(&ipsec_acquire_pool, sizeof(struct ipsec_acquire), 707 0, IPL_SOFTNET, 0, "ipsec acquire", NULL); 708 } 709 710 ipa = pool_get(&ipsec_acquire_pool, PR_NOWAIT|PR_ZERO); 711 if (ipa == NULL) 712 return ENOMEM; 713 714 ipa->ipa_addr = *gw; 715 716 timeout_set_proc(&ipa->ipa_timeout, ipsp_delete_acquire_timo, ipa); 717 718 ipa->ipa_info.sen_len = ipa->ipa_mask.sen_len = SENT_LEN; 719 ipa->ipa_info.sen_family = ipa->ipa_mask.sen_family = PF_KEY; 720 721 /* Just copy the right information. */ 722 switch (ipo->ipo_addr.sen_type) { 723 case SENT_IP4: 724 ipa->ipa_info.sen_type = ipa->ipa_mask.sen_type = SENT_IP4; 725 ipa->ipa_info.sen_direction = ipo->ipo_addr.sen_direction; 726 ipa->ipa_mask.sen_direction = ipo->ipo_mask.sen_direction; 727 728 if (ipsp_is_unspecified(ipo->ipo_dst)) { 729 ipa->ipa_info.sen_ip_src = ddst->sen_ip_src; 730 ipa->ipa_mask.sen_ip_src.s_addr = INADDR_BROADCAST; 731 732 ipa->ipa_info.sen_ip_dst = ddst->sen_ip_dst; 733 ipa->ipa_mask.sen_ip_dst.s_addr = INADDR_BROADCAST; 734 } else { 735 ipa->ipa_info.sen_ip_src = ipo->ipo_addr.sen_ip_src; 736 ipa->ipa_mask.sen_ip_src = ipo->ipo_mask.sen_ip_src; 737 738 ipa->ipa_info.sen_ip_dst = ipo->ipo_addr.sen_ip_dst; 739 ipa->ipa_mask.sen_ip_dst = ipo->ipo_mask.sen_ip_dst; 740 } 741 742 ipa->ipa_info.sen_proto = ipo->ipo_addr.sen_proto; 743 ipa->ipa_mask.sen_proto = ipo->ipo_mask.sen_proto; 744 745 if (ipo->ipo_addr.sen_proto) { 746 ipa->ipa_info.sen_sport = ipo->ipo_addr.sen_sport; 747 ipa->ipa_mask.sen_sport = ipo->ipo_mask.sen_sport; 748 749 ipa->ipa_info.sen_dport = ipo->ipo_addr.sen_dport; 750 ipa->ipa_mask.sen_dport = ipo->ipo_mask.sen_dport; 751 } 752 break; 753 754 #ifdef INET6 755 case SENT_IP6: 756 ipa->ipa_info.sen_type = ipa->ipa_mask.sen_type = SENT_IP6; 757 ipa->ipa_info.sen_ip6_direction = 758 ipo->ipo_addr.sen_ip6_direction; 759 ipa->ipa_mask.sen_ip6_direction = 760 ipo->ipo_mask.sen_ip6_direction; 761 762 if (ipsp_is_unspecified(ipo->ipo_dst)) { 763 ipa->ipa_info.sen_ip6_src = ddst->sen_ip6_src; 764 ipa->ipa_mask.sen_ip6_src = in6mask128; 765 766 ipa->ipa_info.sen_ip6_dst = ddst->sen_ip6_dst; 767 ipa->ipa_mask.sen_ip6_dst = in6mask128; 768 } else { 769 ipa->ipa_info.sen_ip6_src = ipo->ipo_addr.sen_ip6_src; 770 ipa->ipa_mask.sen_ip6_src = ipo->ipo_mask.sen_ip6_src; 771 772 ipa->ipa_info.sen_ip6_dst = ipo->ipo_addr.sen_ip6_dst; 773 ipa->ipa_mask.sen_ip6_dst = ipo->ipo_mask.sen_ip6_dst; 774 } 775 776 ipa->ipa_info.sen_ip6_proto = ipo->ipo_addr.sen_ip6_proto; 777 ipa->ipa_mask.sen_ip6_proto = ipo->ipo_mask.sen_ip6_proto; 778 779 if (ipo->ipo_mask.sen_ip6_proto) { 780 ipa->ipa_info.sen_ip6_sport = 781 ipo->ipo_addr.sen_ip6_sport; 782 ipa->ipa_mask.sen_ip6_sport = 783 ipo->ipo_mask.sen_ip6_sport; 784 ipa->ipa_info.sen_ip6_dport = 785 ipo->ipo_addr.sen_ip6_dport; 786 ipa->ipa_mask.sen_ip6_dport = 787 ipo->ipo_mask.sen_ip6_dport; 788 } 789 break; 790 #endif /* INET6 */ 791 792 default: 793 pool_put(&ipsec_acquire_pool, ipa); 794 return 0; 795 } 796 797 #ifdef IPSEC 798 timeout_add_sec(&ipa->ipa_timeout, ipsec_expire_acquire); 799 #endif 800 801 TAILQ_INSERT_TAIL(&ipsec_acquire_head, ipa, ipa_next); 802 TAILQ_INSERT_TAIL(&ipo->ipo_acquires, ipa, ipa_ipo_next); 803 ipa->ipa_policy = ipo; 804 805 /* PF_KEYv2 notification message. */ 806 return pfkeyv2_acquire(ipo, gw, laddr, &ipa->ipa_seq, ddst); 807 } 808 809 /* 810 * Deal with PCB security requirements. 811 */ 812 struct tdb * 813 ipsp_spd_inp(struct mbuf *m, int af, int hlen, int *error, int direction, 814 struct tdb *tdbp, struct inpcb *inp, struct ipsec_policy *ipo) 815 { 816 /* Sanity check. */ 817 if (inp == NULL) 818 goto justreturn; 819 820 /* We only support IPSEC_LEVEL_BYPASS or IPSEC_LEVEL_AVAIL */ 821 822 if (inp->inp_seclevel[SL_ESP_TRANS] == IPSEC_LEVEL_BYPASS && 823 inp->inp_seclevel[SL_ESP_NETWORK] == IPSEC_LEVEL_BYPASS && 824 inp->inp_seclevel[SL_AUTH] == IPSEC_LEVEL_BYPASS) 825 goto justreturn; 826 827 if (inp->inp_seclevel[SL_ESP_TRANS] == IPSEC_LEVEL_AVAIL && 828 inp->inp_seclevel[SL_ESP_NETWORK] == IPSEC_LEVEL_AVAIL && 829 inp->inp_seclevel[SL_AUTH] == IPSEC_LEVEL_AVAIL) 830 goto justreturn; 831 832 *error = -EINVAL; 833 return NULL; 834 835 justreturn: 836 if (ipo != NULL) 837 return ipo->ipo_tdb; 838 else 839 return NULL; 840 } 841 842 /* 843 * Find a pending ACQUIRE record based on its sequence number. 844 * XXX Need to use a better data structure. 845 */ 846 struct ipsec_acquire * 847 ipsec_get_acquire(u_int32_t seq) 848 { 849 struct ipsec_acquire *ipa; 850 851 NET_ASSERT_LOCKED(); 852 853 TAILQ_FOREACH (ipa, &ipsec_acquire_head, ipa_next) 854 if (ipa->ipa_seq == seq) 855 return ipa; 856 857 return NULL; 858 } 859