1 /* $OpenBSD: route.c,v 1.330 2016/09/17 07:35:05 phessler Exp $ */ 2 /* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1980, 1986, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)route.c 8.2 (Berkeley) 11/15/93 62 */ 63 64 /* 65 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 66 * 67 * NRL grants permission for redistribution and use in source and binary 68 * forms, with or without modification, of the software and documentation 69 * created at NRL provided that the following conditions are met: 70 * 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgements: 78 * This product includes software developed by the University of 79 * California, Berkeley and its contributors. 80 * This product includes software developed at the Information 81 * Technology Division, US Naval Research Laboratory. 82 * 4. Neither the name of the NRL nor the names of its contributors 83 * may be used to endorse or promote products derived from this software 84 * without specific prior written permission. 85 * 86 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS 87 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 88 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 89 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR 90 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 91 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 92 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 93 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 94 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 95 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 96 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 * 98 * The views and conclusions contained in the software and documentation 99 * are those of the authors and should not be interpreted as representing 100 * official policies, either expressed or implied, of the US Naval 101 * Research Laboratory (NRL). 102 */ 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/mbuf.h> 107 #include <sys/socket.h> 108 #include <sys/socketvar.h> 109 #include <sys/timeout.h> 110 #include <sys/domain.h> 111 #include <sys/protosw.h> 112 #include <sys/ioctl.h> 113 #include <sys/kernel.h> 114 #include <sys/queue.h> 115 #include <sys/pool.h> 116 #include <sys/atomic.h> 117 118 #include <net/if.h> 119 #include <net/if_var.h> 120 #include <net/if_dl.h> 121 #include <net/route.h> 122 123 #include <netinet/in.h> 124 #include <netinet/ip_var.h> 125 #include <netinet/in_var.h> 126 127 #ifdef INET6 128 #include <netinet/ip6.h> 129 #include <netinet6/ip6_var.h> 130 #include <netinet6/in6_var.h> 131 #endif 132 133 #ifdef MPLS 134 #include <netmpls/mpls.h> 135 #endif 136 137 #ifdef IPSEC 138 #include <netinet/ip_ipsp.h> 139 #include <net/if_enc.h> 140 #endif 141 142 #ifdef BFD 143 #include <net/bfd.h> 144 #endif 145 146 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 147 148 /* Give some jitter to hash, to avoid synchronization between routers. */ 149 static uint32_t rt_hashjitter; 150 151 extern unsigned int rtmap_limit; 152 153 struct rtstat rtstat; 154 int rttrash; /* routes not in table but not freed */ 155 int ifatrash; /* ifas not in ifp list but not free */ 156 157 struct pool rtentry_pool; /* pool for rtentry structures */ 158 struct pool rttimer_pool; /* pool for rttimer structures */ 159 160 void rt_timer_init(void); 161 int rt_setgwroute(struct rtentry *, u_int); 162 void rt_putgwroute(struct rtentry *); 163 int rtflushclone1(struct rtentry *, void *, u_int); 164 void rtflushclone(unsigned int, struct rtentry *); 165 int rt_ifa_purge_walker(struct rtentry *, void *, unsigned int); 166 struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int); 167 struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *); 168 169 struct ifaddr *ifa_ifwithroute(int, struct sockaddr *, struct sockaddr *, 170 u_int); 171 int rtrequest_delete(struct rt_addrinfo *, u_int8_t, struct ifnet *, 172 struct rtentry **, u_int); 173 174 #ifdef DDB 175 void db_print_sa(struct sockaddr *); 176 void db_print_ifa(struct ifaddr *); 177 int db_show_rtentry(struct rtentry *, void *, unsigned int); 178 #endif 179 180 #define LABELID_MAX 50000 181 182 struct rt_label { 183 TAILQ_ENTRY(rt_label) rtl_entry; 184 char rtl_name[RTLABEL_LEN]; 185 u_int16_t rtl_id; 186 int rtl_ref; 187 }; 188 189 TAILQ_HEAD(rt_labels, rt_label) rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels); 190 191 void 192 route_init(void) 193 { 194 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_SOFTNET, 0, 195 "rtentry", NULL); 196 197 while (rt_hashjitter == 0) 198 rt_hashjitter = arc4random(); 199 200 if (rtable_add(0) != 0) 201 panic("route_init rtable_add"); 202 #ifdef BFD 203 bfdinit(); 204 #endif 205 } 206 207 /* 208 * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise. 209 */ 210 int 211 rtisvalid(struct rtentry *rt) 212 { 213 if (rt == NULL) 214 return (0); 215 216 if (!ISSET(rt->rt_flags, RTF_UP)) 217 return (0); 218 219 if (ISSET(rt->rt_flags, RTF_GATEWAY)) { 220 KASSERT(rt->rt_gwroute != NULL); 221 KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY)); 222 if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP)) 223 return (0); 224 } 225 226 return (1); 227 } 228 229 /* 230 * Do the actual lookup for rtalloc(9), do not use directly! 231 * 232 * Return the best matching entry for the destination ``dst''. 233 * 234 * "RT_RESOLVE" means that a corresponding L2 entry should 235 * be added to the routing table and resolved (via ARP or 236 * NDP), if it does not exist. 237 */ 238 struct rtentry * 239 rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid) 240 { 241 struct rtentry *rt0, *rt = NULL; 242 int s, error = 0; 243 244 s = splsoftnet(); 245 rt = rtable_match(tableid, dst, src); 246 if (rt != NULL) { 247 if ((rt->rt_flags & RTF_CLONING) && ISSET(flags, RT_RESOLVE)) { 248 struct rt_addrinfo info; 249 250 rt0 = rt; 251 252 memset(&info, 0, sizeof(info)); 253 info.rti_info[RTAX_DST] = dst; 254 255 KERNEL_LOCK(); 256 error = rtrequest(RTM_RESOLVE, &info, RTP_DEFAULT, 257 &rt, tableid); 258 if (error) { 259 rt_missmsg(RTM_MISS, &info, 0, RTP_NONE, 0, 260 error, tableid); 261 } else { 262 /* Inform listeners of the new route */ 263 rt_sendmsg(rt, RTM_ADD, tableid); 264 rtfree(rt0); 265 } 266 KERNEL_UNLOCK(); 267 } 268 rt->rt_use++; 269 } else 270 rtstat.rts_unreach++; 271 splx(s); 272 return (rt); 273 } 274 275 #ifndef SMALL_KERNEL 276 /* 277 * Originated from bridge_hash() in if_bridge.c 278 */ 279 #define mix(a, b, c) do { \ 280 a -= b; a -= c; a ^= (c >> 13); \ 281 b -= c; b -= a; b ^= (a << 8); \ 282 c -= a; c -= b; c ^= (b >> 13); \ 283 a -= b; a -= c; a ^= (c >> 12); \ 284 b -= c; b -= a; b ^= (a << 16); \ 285 c -= a; c -= b; c ^= (b >> 5); \ 286 a -= b; a -= c; a ^= (c >> 3); \ 287 b -= c; b -= a; b ^= (a << 10); \ 288 c -= a; c -= b; c ^= (b >> 15); \ 289 } while (0) 290 291 int 292 rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src) 293 { 294 uint32_t a, b, c; 295 296 if (src == NULL || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH)) 297 return (-1); 298 299 a = b = 0x9e3779b9; 300 c = rt_hashjitter; 301 302 switch (dst->sa_family) { 303 case AF_INET: 304 { 305 struct sockaddr_in *sin; 306 307 if (!ipmultipath) 308 return (-1); 309 310 sin = satosin(dst); 311 a += sin->sin_addr.s_addr; 312 b += (src != NULL) ? src[0] : 0; 313 mix(a, b, c); 314 break; 315 } 316 #ifdef INET6 317 case AF_INET6: 318 { 319 struct sockaddr_in6 *sin6; 320 321 if (!ip6_multipath) 322 return (-1); 323 324 sin6 = satosin6(dst); 325 a += sin6->sin6_addr.s6_addr32[0]; 326 b += sin6->sin6_addr.s6_addr32[2]; 327 c += (src != NULL) ? src[0] : 0; 328 mix(a, b, c); 329 a += sin6->sin6_addr.s6_addr32[1]; 330 b += sin6->sin6_addr.s6_addr32[3]; 331 c += (src != NULL) ? src[1] : 0; 332 mix(a, b, c); 333 a += sin6->sin6_addr.s6_addr32[2]; 334 b += sin6->sin6_addr.s6_addr32[1]; 335 c += (src != NULL) ? src[2] : 0; 336 mix(a, b, c); 337 a += sin6->sin6_addr.s6_addr32[3]; 338 b += sin6->sin6_addr.s6_addr32[0]; 339 c += (src != NULL) ? src[3] : 0; 340 mix(a, b, c); 341 break; 342 } 343 #endif /* INET6 */ 344 } 345 346 return (c & 0xffff); 347 } 348 349 /* 350 * Allocate a route, potentially using multipath to select the peer. 351 */ 352 struct rtentry * 353 rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid) 354 { 355 return (rt_match(dst, src, RT_RESOLVE, rtableid)); 356 } 357 #endif /* SMALL_KERNEL */ 358 359 /* 360 * Look in the routing table for the best matching entry for 361 * ``dst''. 362 * 363 * If a route with a gateway is found and its next hop is no 364 * longer valid, try to cache it. 365 */ 366 struct rtentry * 367 rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid) 368 { 369 return (rt_match(dst, NULL, flags, rtableid)); 370 } 371 372 /* 373 * Cache the route entry corresponding to a reachable next hop in 374 * the gateway entry ``rt''. 375 */ 376 int 377 rt_setgwroute(struct rtentry *rt, u_int rtableid) 378 { 379 struct rtentry *nhrt; 380 381 KERNEL_ASSERT_LOCKED(); 382 383 KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY)); 384 385 /* If we cannot find a valid next hop bail. */ 386 nhrt = rt_match(rt->rt_gateway, NULL, RT_RESOLVE, rtable_l2(rtableid)); 387 if (nhrt == NULL) 388 return (ENOENT); 389 390 /* Next hop entry must be on the same interface. */ 391 if (nhrt->rt_ifidx != rt->rt_ifidx) { 392 rtfree(nhrt); 393 return (EHOSTUNREACH); 394 } 395 396 /* 397 * Next hop must be reachable, this also prevents rtentry 398 * loops for example when rt->rt_gwroute points to rt. 399 */ 400 if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)) { 401 rtfree(nhrt); 402 return (ELOOP); 403 } 404 405 /* Next hop is valid so remove possible old cache. */ 406 rt_putgwroute(rt); 407 KASSERT(rt->rt_gwroute == NULL); 408 409 /* 410 * If the MTU of next hop is 0, this will reset the MTU of the 411 * route to run PMTUD again from scratch. 412 */ 413 if (!ISSET(rt->rt_locks, RTV_MTU) && (rt->rt_mtu > nhrt->rt_mtu)) 414 rt->rt_mtu = nhrt->rt_mtu; 415 416 /* 417 * To avoid reference counting problems when writting link-layer 418 * addresses in an outgoing packet, we ensure that the lifetime 419 * of a cached entry is greater that the bigger lifetime of the 420 * gateway entries it is pointed by. 421 */ 422 nhrt->rt_flags |= RTF_CACHED; 423 nhrt->rt_cachecnt++; 424 425 rt->rt_gwroute = nhrt; 426 427 return (0); 428 } 429 430 /* 431 * Invalidate the cached route entry of the gateway entry ``rt''. 432 */ 433 void 434 rt_putgwroute(struct rtentry *rt) 435 { 436 struct rtentry *nhrt = rt->rt_gwroute; 437 438 KERNEL_ASSERT_LOCKED(); 439 440 if (!ISSET(rt->rt_flags, RTF_GATEWAY) || nhrt == NULL) 441 return; 442 443 KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED)); 444 KASSERT(nhrt->rt_cachecnt > 0); 445 446 --nhrt->rt_cachecnt; 447 if (nhrt->rt_cachecnt == 0) 448 nhrt->rt_flags &= ~RTF_CACHED; 449 450 rtfree(rt->rt_gwroute); 451 rt->rt_gwroute = NULL; 452 } 453 454 void 455 rtref(struct rtentry *rt) 456 { 457 atomic_inc_int(&rt->rt_refcnt); 458 } 459 460 void 461 rtfree(struct rtentry *rt) 462 { 463 int refcnt; 464 465 if (rt == NULL) 466 return; 467 468 refcnt = (int)atomic_dec_int_nv(&rt->rt_refcnt); 469 if (refcnt <= 0) { 470 KASSERT(!ISSET(rt->rt_flags, RTF_UP)); 471 KASSERT(!RT_ROOT(rt)); 472 atomic_dec_int(&rttrash); 473 if (refcnt < 0) { 474 printf("rtfree: %p not freed (neg refs)\n", rt); 475 return; 476 } 477 478 KERNEL_LOCK(); 479 rt_timer_remove_all(rt); 480 ifafree(rt->rt_ifa); 481 rtlabel_unref(rt->rt_labelid); 482 #ifdef MPLS 483 if (rt->rt_flags & RTF_MPLS) 484 free(rt->rt_llinfo, M_TEMP, sizeof(struct rt_mpls)); 485 #endif 486 free(rt->rt_gateway, M_RTABLE, ROUNDUP(rt->rt_gateway->sa_len)); 487 free(rt_key(rt), M_RTABLE, rt_key(rt)->sa_len); 488 KERNEL_UNLOCK(); 489 490 pool_put(&rtentry_pool, rt); 491 } 492 } 493 494 void 495 rt_sendmsg(struct rtentry *rt, int cmd, u_int rtableid) 496 { 497 struct rt_addrinfo info; 498 struct ifnet *ifp; 499 struct sockaddr_rtlabel sa_rl; 500 struct sockaddr_in6 sa_mask; 501 502 memset(&info, 0, sizeof(info)); 503 info.rti_info[RTAX_DST] = rt_key(rt); 504 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 505 if (!ISSET(rt->rt_flags, RTF_HOST)) 506 info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask); 507 info.rti_info[RTAX_LABEL] = rtlabel_id2sa(rt->rt_labelid, &sa_rl); 508 ifp = if_get(rt->rt_ifidx); 509 if (ifp != NULL) { 510 info.rti_info[RTAX_IFP] = sdltosa(ifp->if_sadl); 511 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; 512 } 513 514 rt_missmsg(cmd, &info, rt->rt_flags, rt->rt_priority, rt->rt_ifidx, 0, 515 rtableid); 516 if_put(ifp); 517 } 518 519 void 520 ifafree(struct ifaddr *ifa) 521 { 522 if (ifa == NULL) 523 panic("ifafree"); 524 if (ifa->ifa_refcnt == 0) { 525 ifatrash--; 526 free(ifa, M_IFADDR, 0); 527 } else 528 ifa->ifa_refcnt--; 529 } 530 531 /* 532 * Force a routing table entry to the specified 533 * destination to go through the given gateway. 534 * Normally called as a result of a routing redirect 535 * message from the network layer. 536 * 537 * N.B.: must be called at splsoftnet 538 */ 539 void 540 rtredirect(struct sockaddr *dst, struct sockaddr *gateway, 541 struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain) 542 { 543 struct rtentry *rt; 544 int error = 0; 545 u_int32_t *stat = NULL; 546 struct rt_addrinfo info; 547 struct ifaddr *ifa; 548 unsigned int ifidx = 0; 549 int flags = RTF_GATEWAY|RTF_HOST; 550 uint8_t prio = RTP_NONE; 551 552 splsoftassert(IPL_SOFTNET); 553 554 /* verify the gateway is directly reachable */ 555 if ((ifa = ifa_ifwithnet(gateway, rdomain)) == NULL) { 556 error = ENETUNREACH; 557 goto out; 558 } 559 ifidx = ifa->ifa_ifp->if_index; 560 rt = rtable_lookup(rdomain, dst, NULL, NULL, RTP_ANY); 561 /* 562 * If the redirect isn't from our current router for this dst, 563 * it's either old or wrong. If it redirects us to ourselves, 564 * we have a routing loop, perhaps as a result of an interface 565 * going down recently. 566 */ 567 #define equal(a1, a2) \ 568 ((a1)->sa_len == (a2)->sa_len && \ 569 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) 570 if (rt != NULL && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 571 error = EINVAL; 572 else if (ifa_ifwithaddr(gateway, rdomain) != NULL || 573 (gateway->sa_family = AF_INET && 574 in_broadcast(satosin(gateway)->sin_addr, rdomain))) 575 error = EHOSTUNREACH; 576 if (error) 577 goto done; 578 /* 579 * Create a new entry if we just got back a wildcard entry 580 * or the lookup failed. This is necessary for hosts 581 * which use routing redirects generated by smart gateways 582 * to dynamically build the routing tables. 583 */ 584 if (rt == NULL) 585 goto create; 586 /* 587 * Don't listen to the redirect if it's 588 * for a route to an interface. 589 */ 590 if (ISSET(rt->rt_flags, RTF_GATEWAY)) { 591 if (!ISSET(rt->rt_flags, RTF_HOST)) { 592 /* 593 * Changing from route to net => route to host. 594 * Create new route, rather than smashing route to net. 595 */ 596 create: 597 rtfree(rt); 598 flags |= RTF_DYNAMIC; 599 bzero(&info, sizeof(info)); 600 info.rti_info[RTAX_DST] = dst; 601 info.rti_info[RTAX_GATEWAY] = gateway; 602 info.rti_ifa = ifa; 603 info.rti_flags = flags; 604 rt = NULL; 605 error = rtrequest(RTM_ADD, &info, RTP_DEFAULT, &rt, 606 rdomain); 607 if (error == 0) { 608 flags = rt->rt_flags; 609 prio = rt->rt_priority; 610 } 611 stat = &rtstat.rts_dynamic; 612 } else { 613 /* 614 * Smash the current notion of the gateway to 615 * this destination. Should check about netmask!!! 616 */ 617 rt->rt_flags |= RTF_MODIFIED; 618 flags |= RTF_MODIFIED; 619 prio = rt->rt_priority; 620 stat = &rtstat.rts_newgateway; 621 rt_setgate(rt, gateway, rdomain); 622 } 623 } else 624 error = EHOSTUNREACH; 625 done: 626 if (rt) { 627 if (rtp && !error) 628 *rtp = rt; 629 else 630 rtfree(rt); 631 } 632 out: 633 if (error) 634 rtstat.rts_badredirect++; 635 else if (stat != NULL) 636 (*stat)++; 637 bzero((caddr_t)&info, sizeof(info)); 638 info.rti_info[RTAX_DST] = dst; 639 info.rti_info[RTAX_GATEWAY] = gateway; 640 info.rti_info[RTAX_AUTHOR] = src; 641 rt_missmsg(RTM_REDIRECT, &info, flags, prio, ifidx, error, rdomain); 642 } 643 644 /* 645 * Delete a route and generate a message 646 */ 647 int 648 rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid) 649 { 650 int error; 651 struct rt_addrinfo info; 652 unsigned int ifidx; 653 struct sockaddr_in6 sa_mask; 654 655 KASSERT(rt->rt_ifidx == ifp->if_index); 656 657 /* 658 * Request the new route so that the entry is not actually 659 * deleted. That will allow the information being reported to 660 * be accurate (and consistent with route_output()). 661 */ 662 bzero((caddr_t)&info, sizeof(info)); 663 info.rti_info[RTAX_DST] = rt_key(rt); 664 if (!ISSET(rt->rt_flags, RTF_HOST)) 665 info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask); 666 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 667 info.rti_flags = rt->rt_flags; 668 ifidx = rt->rt_ifidx; 669 error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid); 670 rt_missmsg(RTM_DELETE, &info, info.rti_flags, rt->rt_priority, ifidx, 671 error, tableid); 672 if (error == 0) 673 rtfree(rt); 674 return (error); 675 } 676 677 static inline int 678 rtequal(struct rtentry *a, struct rtentry *b) 679 { 680 if (a == b) 681 return 1; 682 683 if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len) == 0 && 684 rt_plen(a) == rt_plen(b)) 685 return 1; 686 else 687 return 0; 688 } 689 690 int 691 rtflushclone1(struct rtentry *rt, void *arg, u_int id) 692 { 693 struct rtentry *parent = arg; 694 struct ifnet *ifp; 695 int error; 696 697 ifp = if_get(rt->rt_ifidx); 698 699 /* 700 * This happens when an interface with a RTF_CLONING route is 701 * being detached. In this case it's safe to bail because all 702 * the routes are being purged by rt_ifa_purge(). 703 */ 704 if (ifp == NULL) 705 return 0; 706 707 if (ISSET(rt->rt_flags, RTF_CLONED) && rtequal(rt->rt_parent, parent)) { 708 error = rtdeletemsg(rt, ifp, id); 709 if (error == 0) 710 error = EAGAIN; 711 } else 712 error = 0; 713 714 if_put(ifp); 715 return error; 716 } 717 718 void 719 rtflushclone(unsigned int rtableid, struct rtentry *parent) 720 { 721 722 #ifdef DIAGNOSTIC 723 if (!parent || (parent->rt_flags & RTF_CLONING) == 0) 724 panic("rtflushclone: called with a non-cloning route"); 725 #endif 726 rtable_walk(rtableid, rt_key(parent)->sa_family, rtflushclone1, parent); 727 } 728 729 int 730 rtioctl(u_long req, caddr_t data, struct proc *p) 731 { 732 return (EOPNOTSUPP); 733 } 734 735 struct ifaddr * 736 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway, 737 u_int rtableid) 738 { 739 struct ifaddr *ifa; 740 741 if ((flags & RTF_GATEWAY) == 0) { 742 /* 743 * If we are adding a route to an interface, 744 * and the interface is a pt to pt link 745 * we should search for the destination 746 * as our clue to the interface. Otherwise 747 * we can use the local address. 748 */ 749 ifa = NULL; 750 if (flags & RTF_HOST) 751 ifa = ifa_ifwithdstaddr(dst, rtableid); 752 if (ifa == NULL) 753 ifa = ifa_ifwithaddr(gateway, rtableid); 754 } else { 755 /* 756 * If we are adding a route to a remote net 757 * or host, the gateway may still be on the 758 * other end of a pt to pt link. 759 */ 760 ifa = ifa_ifwithdstaddr(gateway, rtableid); 761 } 762 if (ifa == NULL) { 763 if (gateway->sa_family == AF_LINK) { 764 struct sockaddr_dl *sdl = satosdl(gateway); 765 struct ifnet *ifp = if_get(sdl->sdl_index); 766 767 if (ifp != NULL) 768 ifa = ifaof_ifpforaddr(dst, ifp); 769 if_put(ifp); 770 } else { 771 struct rtentry *rt; 772 773 rt = rtalloc(gateway, RT_RESOLVE, rtableid); 774 if (rt != NULL) 775 ifa = rt->rt_ifa; 776 rtfree(rt); 777 } 778 } 779 if (ifa == NULL) 780 return (NULL); 781 if (ifa->ifa_addr->sa_family != dst->sa_family) { 782 struct ifaddr *oifa = ifa; 783 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 784 if (ifa == NULL) 785 ifa = oifa; 786 } 787 return (ifa); 788 } 789 790 int 791 rt_getifa(struct rt_addrinfo *info, u_int rtid) 792 { 793 struct ifnet *ifp = NULL; 794 795 /* 796 * ifp may be specified by sockaddr_dl when protocol address 797 * is ambiguous 798 */ 799 if (info->rti_info[RTAX_IFP] != NULL) { 800 struct sockaddr_dl *sdl; 801 802 sdl = satosdl(info->rti_info[RTAX_IFP]); 803 ifp = if_get(sdl->sdl_index); 804 } 805 806 #ifdef IPSEC 807 /* 808 * If the destination is a PF_KEY address, we'll look 809 * for the existence of a encap interface number or address 810 * in the options list of the gateway. By default, we'll return 811 * enc0. 812 */ 813 if (info->rti_info[RTAX_DST] && 814 info->rti_info[RTAX_DST]->sa_family == PF_KEY) 815 info->rti_ifa = enc_getifa(rtid, 0); 816 #endif 817 818 if (info->rti_ifa == NULL && info->rti_info[RTAX_IFA] != NULL) 819 info->rti_ifa = ifa_ifwithaddr(info->rti_info[RTAX_IFA], rtid); 820 821 if (info->rti_ifa == NULL) { 822 struct sockaddr *sa; 823 824 if ((sa = info->rti_info[RTAX_IFA]) == NULL) 825 if ((sa = info->rti_info[RTAX_GATEWAY]) == NULL) 826 sa = info->rti_info[RTAX_DST]; 827 828 if (sa != NULL && ifp != NULL) 829 info->rti_ifa = ifaof_ifpforaddr(sa, ifp); 830 else if (info->rti_info[RTAX_DST] != NULL && 831 info->rti_info[RTAX_GATEWAY] != NULL) 832 info->rti_ifa = ifa_ifwithroute(info->rti_flags, 833 info->rti_info[RTAX_DST], 834 info->rti_info[RTAX_GATEWAY], 835 rtid); 836 else if (sa != NULL) 837 info->rti_ifa = ifa_ifwithroute(info->rti_flags, 838 sa, sa, rtid); 839 } 840 841 if_put(ifp); 842 843 if (info->rti_ifa == NULL) 844 return (ENETUNREACH); 845 846 return (0); 847 } 848 849 int 850 rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp, 851 struct rtentry **ret_nrt, u_int tableid) 852 { 853 struct rtentry *rt; 854 int error; 855 856 splsoftassert(IPL_SOFTNET); 857 858 if (!rtable_exists(tableid)) 859 return (EAFNOSUPPORT); 860 rt = rtable_lookup(tableid, info->rti_info[RTAX_DST], 861 info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], prio); 862 if (rt == NULL) 863 return (ESRCH); 864 865 /* Make sure that's the route the caller want to delete. */ 866 if (ifp != NULL && ifp->if_index != rt->rt_ifidx) { 867 rtfree(rt); 868 return (ESRCH); 869 } 870 871 #ifndef SMALL_KERNEL 872 /* 873 * If we got multipath routes, we require users to specify 874 * a matching gateway. 875 */ 876 if ((rt->rt_flags & RTF_MPATH) && 877 info->rti_info[RTAX_GATEWAY] == NULL) { 878 rtfree(rt); 879 return (ESRCH); 880 } 881 #endif 882 883 error = rtable_delete(tableid, info->rti_info[RTAX_DST], 884 info->rti_info[RTAX_NETMASK], rt); 885 if (error != 0) { 886 rtfree(rt); 887 return (ESRCH); 888 } 889 890 #ifdef BFD 891 if (ISSET(rt->rt_flags, RTF_BFD)) 892 bfdclear(rt); 893 #endif 894 895 /* Release next hop cache before flushing cloned entries. */ 896 rt_putgwroute(rt); 897 898 /* Clean up any cloned children. */ 899 if (ISSET(rt->rt_flags, RTF_CLONING)) 900 rtflushclone(tableid, rt); 901 902 rtfree(rt->rt_parent); 903 rt->rt_parent = NULL; 904 905 rt->rt_flags &= ~RTF_UP; 906 907 if (ifp == NULL) { 908 ifp = if_get(rt->rt_ifidx); 909 if (ifp != NULL) { 910 ifp->if_rtrequest(ifp, RTM_DELETE, rt); 911 if_put(ifp); 912 } 913 } else { 914 KASSERT(ifp->if_index == rt->rt_ifidx); 915 ifp->if_rtrequest(ifp, RTM_DELETE, rt); 916 } 917 918 atomic_inc_int(&rttrash); 919 920 if (ret_nrt != NULL) 921 *ret_nrt = rt; 922 else 923 rtfree(rt); 924 925 return (0); 926 } 927 928 int 929 rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio, 930 struct rtentry **ret_nrt, u_int tableid) 931 { 932 struct ifnet *ifp; 933 struct rtentry *rt, *crt; 934 struct ifaddr *ifa; 935 struct sockaddr *ndst; 936 struct sockaddr_rtlabel *sa_rl, sa_rl2; 937 struct sockaddr_dl sa_dl = { sizeof(sa_dl), AF_LINK }; 938 int dlen, error; 939 #ifdef MPLS 940 struct sockaddr_mpls *sa_mpls; 941 #endif 942 943 splsoftassert(IPL_SOFTNET); 944 945 if (!rtable_exists(tableid)) 946 return (EAFNOSUPPORT); 947 if (info->rti_flags & RTF_HOST) 948 info->rti_info[RTAX_NETMASK] = NULL; 949 switch (req) { 950 case RTM_DELETE: 951 error = rtrequest_delete(info, prio, NULL, ret_nrt, tableid); 952 if (error) 953 return (error); 954 break; 955 956 case RTM_RESOLVE: 957 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 958 return (EINVAL); 959 if ((rt->rt_flags & RTF_CLONING) == 0) 960 return (EINVAL); 961 KASSERT(rt->rt_ifa->ifa_ifp != NULL); 962 info->rti_ifa = rt->rt_ifa; 963 info->rti_flags = rt->rt_flags | (RTF_CLONED|RTF_HOST); 964 info->rti_flags &= ~(RTF_CLONING|RTF_CONNECTED|RTF_STATIC); 965 info->rti_info[RTAX_GATEWAY] = sdltosa(&sa_dl); 966 info->rti_info[RTAX_LABEL] = 967 rtlabel_id2sa(rt->rt_labelid, &sa_rl2); 968 /* FALLTHROUGH */ 969 970 case RTM_ADD: 971 if (info->rti_ifa == NULL && (error = rt_getifa(info, tableid))) 972 return (error); 973 ifa = info->rti_ifa; 974 ifp = ifa->ifa_ifp; 975 if (prio == 0) 976 prio = ifp->if_priority + RTP_STATIC; 977 978 dlen = info->rti_info[RTAX_DST]->sa_len; 979 ndst = malloc(dlen, M_RTABLE, M_NOWAIT); 980 if (ndst == NULL) 981 return (ENOBUFS); 982 983 if (info->rti_info[RTAX_NETMASK] != NULL) 984 rt_maskedcopy(info->rti_info[RTAX_DST], ndst, 985 info->rti_info[RTAX_NETMASK]); 986 else 987 memcpy(ndst, info->rti_info[RTAX_DST], dlen); 988 989 rt = pool_get(&rtentry_pool, PR_NOWAIT | PR_ZERO); 990 if (rt == NULL) { 991 free(ndst, M_RTABLE, dlen); 992 return (ENOBUFS); 993 } 994 995 rt->rt_refcnt = 1; 996 rt->rt_flags = info->rti_flags | RTF_UP; 997 rt->rt_priority = prio; /* init routing priority */ 998 LIST_INIT(&rt->rt_timer); 999 1000 #ifndef SMALL_KERNEL 1001 /* Check the link state if the table supports it. */ 1002 if (rtable_mpath_capable(tableid, ndst->sa_family) && 1003 !ISSET(rt->rt_flags, RTF_LOCAL) && 1004 (!LINK_STATE_IS_UP(ifp->if_link_state) || 1005 !ISSET(ifp->if_flags, IFF_UP))) { 1006 rt->rt_flags &= ~RTF_UP; 1007 rt->rt_priority |= RTP_DOWN; 1008 } 1009 #endif 1010 1011 if (info->rti_info[RTAX_LABEL] != NULL) { 1012 sa_rl = (struct sockaddr_rtlabel *) 1013 info->rti_info[RTAX_LABEL]; 1014 rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label); 1015 } 1016 1017 #ifdef MPLS 1018 /* We have to allocate additional space for MPLS infos */ 1019 if (info->rti_flags & RTF_MPLS && 1020 (info->rti_info[RTAX_SRC] != NULL || 1021 info->rti_info[RTAX_DST]->sa_family == AF_MPLS)) { 1022 struct rt_mpls *rt_mpls; 1023 1024 sa_mpls = (struct sockaddr_mpls *) 1025 info->rti_info[RTAX_SRC]; 1026 1027 rt->rt_llinfo = malloc(sizeof(struct rt_mpls), 1028 M_TEMP, M_NOWAIT|M_ZERO); 1029 1030 if (rt->rt_llinfo == NULL) { 1031 free(ndst, M_RTABLE, dlen); 1032 pool_put(&rtentry_pool, rt); 1033 return (ENOMEM); 1034 } 1035 1036 rt_mpls = (struct rt_mpls *)rt->rt_llinfo; 1037 1038 if (sa_mpls != NULL) 1039 rt_mpls->mpls_label = sa_mpls->smpls_label; 1040 1041 rt_mpls->mpls_operation = info->rti_mpls; 1042 1043 /* XXX: set experimental bits */ 1044 1045 rt->rt_flags |= RTF_MPLS; 1046 } else 1047 rt->rt_flags &= ~RTF_MPLS; 1048 #endif 1049 1050 ifa->ifa_refcnt++; 1051 rt->rt_ifa = ifa; 1052 rt->rt_ifidx = ifp->if_index; 1053 if (rt->rt_flags & RTF_CLONED) { 1054 /* 1055 * Copy both metrics and a back pointer to the cloned 1056 * route's parent. 1057 */ 1058 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 1059 rt->rt_priority = (*ret_nrt)->rt_priority; 1060 rt->rt_parent = *ret_nrt; /* Back ptr. to parent. */ 1061 rtref(rt->rt_parent); 1062 } 1063 1064 /* 1065 * We must set rt->rt_gateway before adding ``rt'' to 1066 * the routing table because the radix MPATH code use 1067 * it to (re)order routes. 1068 */ 1069 if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY], 1070 tableid))) { 1071 ifafree(ifa); 1072 rtfree(rt->rt_parent); 1073 rt_putgwroute(rt); 1074 free(rt->rt_gateway, M_RTABLE, 0); 1075 free(ndst, M_RTABLE, dlen); 1076 pool_put(&rtentry_pool, rt); 1077 return (error); 1078 } 1079 1080 error = rtable_insert(tableid, ndst, 1081 info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], 1082 rt->rt_priority, rt); 1083 if (error != 0 && (crt = rtalloc(ndst, 0, tableid)) != NULL) { 1084 /* overwrite cloned route */ 1085 if (ISSET(crt->rt_flags, RTF_CLONED)) { 1086 struct ifnet *cifp; 1087 1088 cifp = if_get(crt->rt_ifidx); 1089 KASSERT(cifp != NULL); 1090 rtdeletemsg(crt, cifp, tableid); 1091 if_put(cifp); 1092 1093 error = rtable_insert(tableid, ndst, 1094 info->rti_info[RTAX_NETMASK], 1095 info->rti_info[RTAX_GATEWAY], 1096 rt->rt_priority, rt); 1097 } 1098 rtfree(crt); 1099 } 1100 if (error != 0) { 1101 ifafree(ifa); 1102 rtfree(rt->rt_parent); 1103 rt_putgwroute(rt); 1104 free(rt->rt_gateway, M_RTABLE, 0); 1105 free(ndst, M_RTABLE, dlen); 1106 pool_put(&rtentry_pool, rt); 1107 return (EEXIST); 1108 } 1109 ifp->if_rtrequest(ifp, req, rt); 1110 1111 if_group_routechange(info->rti_info[RTAX_DST], 1112 info->rti_info[RTAX_NETMASK]); 1113 1114 if (ret_nrt != NULL) 1115 *ret_nrt = rt; 1116 else 1117 rtfree(rt); 1118 break; 1119 } 1120 1121 return (0); 1122 } 1123 1124 int 1125 rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid) 1126 { 1127 int glen = ROUNDUP(gate->sa_len); 1128 struct sockaddr *sa; 1129 1130 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) { 1131 sa = malloc(glen, M_RTABLE, M_NOWAIT); 1132 if (sa == NULL) 1133 return (ENOBUFS); 1134 free(rt->rt_gateway, M_RTABLE, 0); 1135 rt->rt_gateway = sa; 1136 } 1137 memmove(rt->rt_gateway, gate, glen); 1138 1139 if (ISSET(rt->rt_flags, RTF_GATEWAY)) 1140 return (rt_setgwroute(rt, rtableid)); 1141 1142 return (0); 1143 } 1144 1145 /* 1146 * Return the route entry containing the next hop link-layer 1147 * address corresponding to ``rt''. 1148 */ 1149 struct rtentry * 1150 rt_getll(struct rtentry *rt) 1151 { 1152 if (ISSET(rt->rt_flags, RTF_GATEWAY)) { 1153 KASSERT(rt->rt_gwroute != NULL); 1154 return (rt->rt_gwroute); 1155 } 1156 1157 return (rt); 1158 } 1159 1160 void 1161 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, 1162 struct sockaddr *netmask) 1163 { 1164 u_char *cp1 = (u_char *)src; 1165 u_char *cp2 = (u_char *)dst; 1166 u_char *cp3 = (u_char *)netmask; 1167 u_char *cplim = cp2 + *cp3; 1168 u_char *cplim2 = cp2 + *cp1; 1169 1170 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1171 cp3 += 2; 1172 if (cplim > cplim2) 1173 cplim = cplim2; 1174 while (cp2 < cplim) 1175 *cp2++ = *cp1++ & *cp3++; 1176 if (cp2 < cplim2) 1177 bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2)); 1178 } 1179 1180 int 1181 rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst) 1182 { 1183 struct ifnet *ifp = ifa->ifa_ifp; 1184 struct rtentry *rt; 1185 struct sockaddr_rtlabel sa_rl; 1186 struct rt_addrinfo info; 1187 unsigned int rtableid = ifp->if_rdomain; 1188 uint8_t prio = ifp->if_priority + RTP_STATIC; 1189 int error; 1190 1191 memset(&info, 0, sizeof(info)); 1192 info.rti_ifa = ifa; 1193 info.rti_flags = flags | RTF_MPATH; 1194 info.rti_info[RTAX_DST] = dst; 1195 if (flags & RTF_LLINFO) 1196 info.rti_info[RTAX_GATEWAY] = sdltosa(ifp->if_sadl); 1197 else 1198 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1199 info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl); 1200 1201 #ifdef MPLS 1202 if ((flags & RTF_MPLS) == RTF_MPLS) { 1203 info.rti_mpls = MPLS_OP_POP; 1204 1205 /* MPLS routes only exist in rdomain 0 */ 1206 rtableid = 0; 1207 } 1208 #endif /* MPLS */ 1209 1210 if ((flags & RTF_HOST) == 0) 1211 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 1212 1213 if (flags & (RTF_LOCAL|RTF_BROADCAST)) 1214 prio = RTP_LOCAL; 1215 1216 if (flags & RTF_CONNECTED) 1217 prio = RTP_CONNECTED; 1218 1219 error = rtrequest(RTM_ADD, &info, prio, &rt, rtableid); 1220 if (error == 0) { 1221 /* 1222 * A local route is created for every address configured 1223 * on an interface, so use this information to notify 1224 * userland that a new address has been added. 1225 */ 1226 if (flags & RTF_LOCAL) 1227 rt_sendaddrmsg(rt, RTM_NEWADDR, ifa); 1228 rt_sendmsg(rt, RTM_ADD, rtableid); 1229 rtfree(rt); 1230 } 1231 return (error); 1232 } 1233 1234 int 1235 rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst) 1236 { 1237 struct ifnet *ifp = ifa->ifa_ifp; 1238 struct rtentry *rt; 1239 struct mbuf *m = NULL; 1240 struct sockaddr *deldst; 1241 struct rt_addrinfo info; 1242 struct sockaddr_rtlabel sa_rl; 1243 unsigned int rtableid = ifp->if_rdomain; 1244 uint8_t prio = ifp->if_priority + RTP_STATIC; 1245 int error; 1246 1247 #ifdef MPLS 1248 if ((flags & RTF_MPLS) == RTF_MPLS) 1249 /* MPLS routes only exist in rdomain 0 */ 1250 rtableid = 0; 1251 #endif /* MPLS */ 1252 1253 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) { 1254 m = m_get(M_DONTWAIT, MT_SONAME); 1255 if (m == NULL) 1256 return (ENOBUFS); 1257 deldst = mtod(m, struct sockaddr *); 1258 rt_maskedcopy(dst, deldst, ifa->ifa_netmask); 1259 dst = deldst; 1260 } 1261 1262 memset(&info, 0, sizeof(info)); 1263 info.rti_ifa = ifa; 1264 info.rti_flags = flags; 1265 info.rti_info[RTAX_DST] = dst; 1266 if ((flags & RTF_LLINFO) == 0) 1267 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1268 info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl); 1269 1270 if ((flags & RTF_HOST) == 0) 1271 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 1272 1273 if (flags & (RTF_LOCAL|RTF_BROADCAST)) 1274 prio = RTP_LOCAL; 1275 1276 if (flags & RTF_CONNECTED) 1277 prio = RTP_CONNECTED; 1278 1279 error = rtrequest_delete(&info, prio, ifp, &rt, rtableid); 1280 if (error == 0) { 1281 rt_sendmsg(rt, RTM_DELETE, rtableid); 1282 if (flags & RTF_LOCAL) 1283 rt_sendaddrmsg(rt, RTM_DELADDR, ifa); 1284 rtfree(rt); 1285 } 1286 if (m != NULL) 1287 m_free(m); 1288 1289 return (error); 1290 } 1291 1292 /* 1293 * Add ifa's address as a local rtentry. 1294 */ 1295 int 1296 rt_ifa_addlocal(struct ifaddr *ifa) 1297 { 1298 struct rtentry *rt; 1299 u_int flags = RTF_HOST|RTF_LOCAL; 1300 int error = 0; 1301 1302 /* 1303 * If the configured address correspond to the magical "any" 1304 * address do not add a local route entry because that might 1305 * corrupt the routing tree which uses this value for the 1306 * default routes. 1307 */ 1308 switch (ifa->ifa_addr->sa_family) { 1309 case AF_INET: 1310 if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) 1311 return (0); 1312 break; 1313 #ifdef INET6 1314 case AF_INET6: 1315 if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr, 1316 &in6addr_any)) 1317 return (0); 1318 break; 1319 #endif 1320 default: 1321 break; 1322 } 1323 1324 if (!ISSET(ifa->ifa_ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))) 1325 flags |= RTF_LLINFO; 1326 1327 /* If there is no loopback entry, allocate one. */ 1328 rt = rtalloc(ifa->ifa_addr, 0, ifa->ifa_ifp->if_rdomain); 1329 if (rt == NULL || !ISSET(rt->rt_flags, flags)) 1330 error = rt_ifa_add(ifa, flags, ifa->ifa_addr); 1331 rtfree(rt); 1332 1333 return (error); 1334 } 1335 1336 /* 1337 * Remove local rtentry of ifa's addresss if it exists. 1338 */ 1339 int 1340 rt_ifa_dellocal(struct ifaddr *ifa) 1341 { 1342 struct rtentry *rt; 1343 u_int flags = RTF_HOST|RTF_LOCAL; 1344 int error = 0; 1345 1346 /* 1347 * We do not add local routes for such address, so do not bother 1348 * removing them. 1349 */ 1350 switch (ifa->ifa_addr->sa_family) { 1351 case AF_INET: 1352 if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) 1353 return (0); 1354 break; 1355 #ifdef INET6 1356 case AF_INET6: 1357 if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr, 1358 &in6addr_any)) 1359 return (0); 1360 break; 1361 #endif 1362 default: 1363 break; 1364 } 1365 1366 if (!ISSET(ifa->ifa_ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))) 1367 flags |= RTF_LLINFO; 1368 1369 /* 1370 * Before deleting, check if a corresponding local host 1371 * route surely exists. With this check, we can avoid to 1372 * delete an interface direct route whose destination is same 1373 * as the address being removed. This can happen when removing 1374 * a subnet-router anycast address on an interface attached 1375 * to a shared medium. 1376 */ 1377 rt = rtalloc(ifa->ifa_addr, 0, ifa->ifa_ifp->if_rdomain); 1378 if (rt != NULL && ISSET(rt->rt_flags, flags)) 1379 error = rt_ifa_del(ifa, flags, ifa->ifa_addr); 1380 rtfree(rt); 1381 1382 return (error); 1383 } 1384 1385 /* 1386 * Remove all addresses attached to ``ifa''. 1387 */ 1388 void 1389 rt_ifa_purge(struct ifaddr *ifa) 1390 { 1391 struct ifnet *ifp = ifa->ifa_ifp; 1392 unsigned int rtableid; 1393 int i; 1394 1395 KASSERT(ifp != NULL); 1396 1397 for (rtableid = 0; rtableid < rtmap_limit; rtableid++) { 1398 /* skip rtables that are not in the rdomain of the ifp */ 1399 if (rtable_l2(rtableid) != ifp->if_rdomain) 1400 continue; 1401 for (i = 1; i <= AF_MAX; i++) { 1402 rtable_walk(rtableid, i, rt_ifa_purge_walker, ifa); 1403 } 1404 } 1405 } 1406 1407 int 1408 rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid) 1409 { 1410 struct ifaddr *ifa = vifa; 1411 struct ifnet *ifp = ifa->ifa_ifp; 1412 int error; 1413 1414 if (rt->rt_ifa != ifa) 1415 return (0); 1416 1417 if ((error = rtdeletemsg(rt, ifp, rtableid))) { 1418 return (error); 1419 } 1420 1421 return (EAGAIN); 1422 1423 } 1424 1425 /* 1426 * Route timer routines. These routes allow functions to be called 1427 * for various routes at any time. This is useful in supporting 1428 * path MTU discovery and redirect route deletion. 1429 * 1430 * This is similar to some BSDI internal functions, but it provides 1431 * for multiple queues for efficiency's sake... 1432 */ 1433 1434 LIST_HEAD(, rttimer_queue) rttimer_queue_head; 1435 static int rt_init_done = 0; 1436 1437 #define RTTIMER_CALLOUT(r) { \ 1438 if (r->rtt_func != NULL) { \ 1439 (*r->rtt_func)(r->rtt_rt, r); \ 1440 } else { \ 1441 struct rt_addrinfo info; \ 1442 bzero(&info, sizeof(info)); \ 1443 info.rti_info[RTAX_DST] = rt_key(r->rtt_rt); \ 1444 rtrequest(RTM_DELETE, &info, \ 1445 r->rtt_rt->rt_priority, NULL, r->rtt_tableid); \ 1446 } \ 1447 } 1448 1449 /* 1450 * Some subtle order problems with domain initialization mean that 1451 * we cannot count on this being run from rt_init before various 1452 * protocol initializations are done. Therefore, we make sure 1453 * that this is run when the first queue is added... 1454 */ 1455 1456 void 1457 rt_timer_init(void) 1458 { 1459 static struct timeout rt_timer_timeout; 1460 1461 if (rt_init_done) 1462 panic("rt_timer_init: already initialized"); 1463 1464 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, IPL_SOFTNET, 0, 1465 "rttmr", NULL); 1466 1467 LIST_INIT(&rttimer_queue_head); 1468 timeout_set(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout); 1469 timeout_add_sec(&rt_timer_timeout, 1); 1470 rt_init_done = 1; 1471 } 1472 1473 struct rttimer_queue * 1474 rt_timer_queue_create(u_int timeout) 1475 { 1476 struct rttimer_queue *rtq; 1477 1478 if (rt_init_done == 0) 1479 rt_timer_init(); 1480 1481 if ((rtq = malloc(sizeof(*rtq), M_RTABLE, M_NOWAIT|M_ZERO)) == NULL) 1482 return (NULL); 1483 1484 rtq->rtq_timeout = timeout; 1485 rtq->rtq_count = 0; 1486 TAILQ_INIT(&rtq->rtq_head); 1487 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link); 1488 1489 return (rtq); 1490 } 1491 1492 void 1493 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout) 1494 { 1495 rtq->rtq_timeout = timeout; 1496 } 1497 1498 void 1499 rt_timer_queue_destroy(struct rttimer_queue *rtq) 1500 { 1501 struct rttimer *r; 1502 1503 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) { 1504 LIST_REMOVE(r, rtt_link); 1505 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1506 RTTIMER_CALLOUT(r); 1507 pool_put(&rttimer_pool, r); 1508 if (rtq->rtq_count > 0) 1509 rtq->rtq_count--; 1510 else 1511 printf("rt_timer_queue_destroy: rtq_count reached 0\n"); 1512 } 1513 1514 LIST_REMOVE(rtq, rtq_link); 1515 free(rtq, M_RTABLE, sizeof(*rtq)); 1516 } 1517 1518 unsigned long 1519 rt_timer_queue_count(struct rttimer_queue *rtq) 1520 { 1521 return (rtq->rtq_count); 1522 } 1523 1524 void 1525 rt_timer_remove_all(struct rtentry *rt) 1526 { 1527 struct rttimer *r; 1528 1529 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) { 1530 LIST_REMOVE(r, rtt_link); 1531 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1532 if (r->rtt_queue->rtq_count > 0) 1533 r->rtt_queue->rtq_count--; 1534 else 1535 printf("rt_timer_remove_all: rtq_count reached 0\n"); 1536 pool_put(&rttimer_pool, r); 1537 } 1538 } 1539 1540 int 1541 rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *, 1542 struct rttimer *), struct rttimer_queue *queue, u_int rtableid) 1543 { 1544 struct rttimer *r; 1545 long current_time; 1546 1547 current_time = time_uptime; 1548 rt->rt_rmx.rmx_expire = time_uptime + queue->rtq_timeout; 1549 1550 /* 1551 * If there's already a timer with this action, destroy it before 1552 * we add a new one. 1553 */ 1554 for (r = LIST_FIRST(&rt->rt_timer); r != NULL; 1555 r = LIST_NEXT(r, rtt_link)) { 1556 if (r->rtt_func == func) { 1557 LIST_REMOVE(r, rtt_link); 1558 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1559 if (r->rtt_queue->rtq_count > 0) 1560 r->rtt_queue->rtq_count--; 1561 else 1562 printf("rt_timer_add: rtq_count reached 0\n"); 1563 pool_put(&rttimer_pool, r); 1564 break; /* only one per list, so we can quit... */ 1565 } 1566 } 1567 1568 r = pool_get(&rttimer_pool, PR_NOWAIT | PR_ZERO); 1569 if (r == NULL) 1570 return (ENOBUFS); 1571 1572 r->rtt_rt = rt; 1573 r->rtt_time = current_time; 1574 r->rtt_func = func; 1575 r->rtt_queue = queue; 1576 r->rtt_tableid = rtableid; 1577 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link); 1578 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next); 1579 r->rtt_queue->rtq_count++; 1580 1581 return (0); 1582 } 1583 1584 void 1585 rt_timer_timer(void *arg) 1586 { 1587 struct timeout *to = (struct timeout *)arg; 1588 struct rttimer_queue *rtq; 1589 struct rttimer *r; 1590 long current_time; 1591 int s; 1592 1593 current_time = time_uptime; 1594 1595 s = splsoftnet(); 1596 for (rtq = LIST_FIRST(&rttimer_queue_head); rtq != NULL; 1597 rtq = LIST_NEXT(rtq, rtq_link)) { 1598 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL && 1599 (r->rtt_time + rtq->rtq_timeout) < current_time) { 1600 LIST_REMOVE(r, rtt_link); 1601 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1602 RTTIMER_CALLOUT(r); 1603 pool_put(&rttimer_pool, r); 1604 if (rtq->rtq_count > 0) 1605 rtq->rtq_count--; 1606 else 1607 printf("rt_timer_timer: rtq_count reached 0\n"); 1608 } 1609 } 1610 splx(s); 1611 1612 timeout_add_sec(to, 1); 1613 } 1614 1615 u_int16_t 1616 rtlabel_name2id(char *name) 1617 { 1618 struct rt_label *label, *p = NULL; 1619 u_int16_t new_id = 1; 1620 1621 if (!name[0]) 1622 return (0); 1623 1624 TAILQ_FOREACH(label, &rt_labels, rtl_entry) 1625 if (strcmp(name, label->rtl_name) == 0) { 1626 label->rtl_ref++; 1627 return (label->rtl_id); 1628 } 1629 1630 /* 1631 * to avoid fragmentation, we do a linear search from the beginning 1632 * and take the first free slot we find. if there is none or the list 1633 * is empty, append a new entry at the end. 1634 */ 1635 1636 if (!TAILQ_EMPTY(&rt_labels)) 1637 for (p = TAILQ_FIRST(&rt_labels); p != NULL && 1638 p->rtl_id == new_id; p = TAILQ_NEXT(p, rtl_entry)) 1639 new_id = p->rtl_id + 1; 1640 1641 if (new_id > LABELID_MAX) 1642 return (0); 1643 1644 label = malloc(sizeof(*label), M_RTABLE, M_NOWAIT|M_ZERO); 1645 if (label == NULL) 1646 return (0); 1647 strlcpy(label->rtl_name, name, sizeof(label->rtl_name)); 1648 label->rtl_id = new_id; 1649 label->rtl_ref++; 1650 1651 if (p != NULL) /* insert new entry before p */ 1652 TAILQ_INSERT_BEFORE(p, label, rtl_entry); 1653 else /* either list empty or no free slot in between */ 1654 TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry); 1655 1656 return (label->rtl_id); 1657 } 1658 1659 const char * 1660 rtlabel_id2name(u_int16_t id) 1661 { 1662 struct rt_label *label; 1663 1664 TAILQ_FOREACH(label, &rt_labels, rtl_entry) 1665 if (label->rtl_id == id) 1666 return (label->rtl_name); 1667 1668 return (NULL); 1669 } 1670 1671 struct sockaddr * 1672 rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl) 1673 { 1674 const char *label; 1675 1676 if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL) 1677 return (NULL); 1678 1679 bzero(sa_rl, sizeof(*sa_rl)); 1680 sa_rl->sr_len = sizeof(*sa_rl); 1681 sa_rl->sr_family = AF_UNSPEC; 1682 strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label)); 1683 1684 return ((struct sockaddr *)sa_rl); 1685 } 1686 1687 void 1688 rtlabel_unref(u_int16_t id) 1689 { 1690 struct rt_label *p, *next; 1691 1692 if (id == 0) 1693 return; 1694 1695 for (p = TAILQ_FIRST(&rt_labels); p != NULL; p = next) { 1696 next = TAILQ_NEXT(p, rtl_entry); 1697 if (id == p->rtl_id) { 1698 if (--p->rtl_ref == 0) { 1699 TAILQ_REMOVE(&rt_labels, p, rtl_entry); 1700 free(p, M_RTABLE, sizeof(*p)); 1701 } 1702 break; 1703 } 1704 } 1705 } 1706 1707 #ifndef SMALL_KERNEL 1708 void 1709 rt_if_track(struct ifnet *ifp) 1710 { 1711 int i; 1712 u_int tid; 1713 1714 for (tid = 0; tid < rtmap_limit; tid++) { 1715 /* skip rtables that are not in the rdomain of the ifp */ 1716 if (rtable_l2(tid) != ifp->if_rdomain) 1717 continue; 1718 for (i = 1; i <= AF_MAX; i++) { 1719 if (!rtable_mpath_capable(tid, i)) 1720 continue; 1721 1722 rtable_walk(tid, i, rt_if_linkstate_change, ifp); 1723 } 1724 } 1725 } 1726 1727 int 1728 rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id) 1729 { 1730 struct ifnet *ifp = arg; 1731 struct sockaddr_in6 sa_mask; 1732 1733 if (rt->rt_ifidx != ifp->if_index) 1734 return (0); 1735 1736 /* Local routes are always usable. */ 1737 if (rt->rt_flags & RTF_LOCAL) { 1738 rt->rt_flags |= RTF_UP; 1739 return (0); 1740 } 1741 1742 if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP) { 1743 if (!(rt->rt_flags & RTF_UP)) { 1744 /* bring route up */ 1745 rt->rt_flags |= RTF_UP; 1746 rtable_mpath_reprio(id, rt_key(rt), 1747 rt_plen2mask(rt, &sa_mask), 1748 rt->rt_priority & RTP_MASK, rt); 1749 } 1750 } else { 1751 if (rt->rt_flags & RTF_UP) { 1752 /* 1753 * Remove redirected and cloned routes (mainly ARP) 1754 * from down interfaces so we have a chance to get 1755 * new routes from a better source. 1756 */ 1757 if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC) && 1758 !ISSET(rt->rt_flags, RTF_CACHED)) { 1759 int error; 1760 1761 if ((error = rtdeletemsg(rt, ifp, id))) 1762 return (error); 1763 return (EAGAIN); 1764 } 1765 /* take route down */ 1766 rt->rt_flags &= ~RTF_UP; 1767 rtable_mpath_reprio(id, rt_key(rt), 1768 rt_plen2mask(rt, &sa_mask), 1769 rt->rt_priority | RTP_DOWN, rt); 1770 } 1771 } 1772 if_group_routechange(rt_key(rt), rt_plen2mask(rt, &sa_mask)); 1773 1774 return (0); 1775 } 1776 #endif 1777 1778 struct sockaddr * 1779 rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask) 1780 { 1781 struct sockaddr_in *sin = (struct sockaddr_in *)sa_mask; 1782 #ifdef INET6 1783 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa_mask; 1784 #endif 1785 1786 KASSERT(plen >= 0 || plen == -1); 1787 1788 if (plen == -1) 1789 return (NULL); 1790 1791 memset(sa_mask, 0, sizeof(*sa_mask)); 1792 1793 switch (af) { 1794 case AF_INET: 1795 sin->sin_family = AF_INET; 1796 sin->sin_len = sizeof(struct sockaddr_in); 1797 in_prefixlen2mask(&sin->sin_addr, plen); 1798 break; 1799 #ifdef INET6 1800 case AF_INET6: 1801 sin6->sin6_family = AF_INET6; 1802 sin6->sin6_len = sizeof(struct sockaddr_in6); 1803 in6_prefixlen2mask(&sin6->sin6_addr, plen); 1804 break; 1805 #endif /* INET6 */ 1806 default: 1807 return (NULL); 1808 } 1809 1810 return ((struct sockaddr *)sa_mask); 1811 } 1812 1813 struct sockaddr * 1814 rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask) 1815 { 1816 #ifndef ART 1817 return (rt_mask(rt)); 1818 #else 1819 return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask)); 1820 #endif /* ART */ 1821 } 1822 1823 #ifdef DDB 1824 #include <machine/db_machdep.h> 1825 #include <ddb/db_output.h> 1826 1827 void 1828 db_print_sa(struct sockaddr *sa) 1829 { 1830 int len; 1831 u_char *p; 1832 1833 if (sa == NULL) { 1834 db_printf("[NULL]"); 1835 return; 1836 } 1837 1838 p = (u_char *)sa; 1839 len = sa->sa_len; 1840 db_printf("["); 1841 while (len > 0) { 1842 db_printf("%d", *p); 1843 p++; 1844 len--; 1845 if (len) 1846 db_printf(","); 1847 } 1848 db_printf("]\n"); 1849 } 1850 1851 void 1852 db_print_ifa(struct ifaddr *ifa) 1853 { 1854 if (ifa == NULL) 1855 return; 1856 db_printf(" ifa_addr="); 1857 db_print_sa(ifa->ifa_addr); 1858 db_printf(" ifa_dsta="); 1859 db_print_sa(ifa->ifa_dstaddr); 1860 db_printf(" ifa_mask="); 1861 db_print_sa(ifa->ifa_netmask); 1862 db_printf(" flags=0x%x, refcnt=%d, metric=%d\n", 1863 ifa->ifa_flags, ifa->ifa_refcnt, ifa->ifa_metric); 1864 } 1865 1866 /* 1867 * Function to pass to rtalble_walk(). 1868 * Return non-zero error to abort walk. 1869 */ 1870 int 1871 db_show_rtentry(struct rtentry *rt, void *w, unsigned int id) 1872 { 1873 db_printf("rtentry=%p", rt); 1874 1875 db_printf(" flags=0x%x refcnt=%d use=%llu expire=%lld rtableid=%u\n", 1876 rt->rt_flags, rt->rt_refcnt, rt->rt_use, rt->rt_expire, id); 1877 1878 db_printf(" key="); db_print_sa(rt_key(rt)); 1879 db_printf(" plen=%d", rt_plen(rt)); 1880 db_printf(" gw="); db_print_sa(rt->rt_gateway); 1881 db_printf(" ifidx=%u ", rt->rt_ifidx); 1882 db_printf(" ifa=%p\n", rt->rt_ifa); 1883 db_print_ifa(rt->rt_ifa); 1884 1885 db_printf(" gwroute=%p llinfo=%p\n", rt->rt_gwroute, rt->rt_llinfo); 1886 return (0); 1887 } 1888 1889 /* 1890 * Function to print all the route trees. 1891 * Use this from ddb: "call db_show_arptab" 1892 */ 1893 int 1894 db_show_arptab(void) 1895 { 1896 db_printf("Route tree for AF_INET\n"); 1897 rtable_walk(0, AF_INET, db_show_rtentry, NULL); 1898 return (0); 1899 } 1900 #endif /* DDB */ 1901