1 /* $NetBSD: route.c,v 1.107 2008/04/10 18:12:02 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the project nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 */ 68 69 /* 70 * Copyright (c) 1980, 1986, 1991, 1993 71 * The Regents of the University of California. All rights reserved. 72 * 73 * Redistribution and use in source and binary forms, with or without 74 * modification, are permitted provided that the following conditions 75 * are met: 76 * 1. Redistributions of source code must retain the above copyright 77 * notice, this list of conditions and the following disclaimer. 78 * 2. Redistributions in binary form must reproduce the above copyright 79 * notice, this list of conditions and the following disclaimer in the 80 * documentation and/or other materials provided with the distribution. 81 * 3. Neither the name of the University nor the names of its contributors 82 * may be used to endorse or promote products derived from this software 83 * without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 95 * SUCH DAMAGE. 96 * 97 * @(#)route.c 8.3 (Berkeley) 1/9/95 98 */ 99 100 #include "opt_route.h" 101 102 #include <sys/cdefs.h> 103 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.107 2008/04/10 18:12:02 dyoung Exp $"); 104 105 #include <sys/param.h> 106 #include <sys/sysctl.h> 107 #include <sys/systm.h> 108 #include <sys/callout.h> 109 #include <sys/proc.h> 110 #include <sys/mbuf.h> 111 #include <sys/socket.h> 112 #include <sys/socketvar.h> 113 #include <sys/domain.h> 114 #include <sys/protosw.h> 115 #include <sys/kernel.h> 116 #include <sys/ioctl.h> 117 #include <sys/pool.h> 118 119 #include <net/if.h> 120 #include <net/route.h> 121 #include <net/raw_cb.h> 122 123 #include <netinet/in.h> 124 #include <netinet/in_var.h> 125 126 #ifdef RTFLUSH_DEBUG 127 #define rtcache_debug() __predict_false(_rtcache_debug) 128 #else /* RTFLUSH_DEBUG */ 129 #define rtcache_debug() 0 130 #endif /* RTFLUSH_DEBUG */ 131 132 struct route_cb route_cb; 133 struct rtstat rtstat; 134 struct radix_node_head *rt_tables[AF_MAX+1]; 135 136 int rttrash; /* routes not in table but not freed */ 137 138 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL, 139 IPL_SOFTNET); 140 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL, 141 IPL_SOFTNET); 142 143 struct callout rt_timer_ch; /* callout for rt_timer_timer() */ 144 145 #ifdef RTFLUSH_DEBUG 146 static int _rtcache_debug = 0; 147 #endif /* RTFLUSH_DEBUG */ 148 149 static int rtdeletemsg(struct rtentry *); 150 static int rtflushclone1(struct rtentry *, void *); 151 static void rtflushclone(sa_family_t family, struct rtentry *); 152 153 #ifdef RTFLUSH_DEBUG 154 SYSCTL_SETUP(sysctl_net_rtcache_setup, "sysctl net.rtcache.debug setup") 155 { 156 const struct sysctlnode *rnode; 157 158 /* XXX do not duplicate */ 159 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT, 160 CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL) != 0) 161 return; 162 if (sysctl_createv(clog, 0, &rnode, &rnode, CTLFLAG_PERMANENT, 163 CTLTYPE_NODE, 164 "rtcache", SYSCTL_DESCR("Route cache related settings"), 165 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 166 return; 167 if (sysctl_createv(clog, 0, &rnode, &rnode, 168 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 169 "debug", SYSCTL_DESCR("Debug route caches"), 170 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0) 171 return; 172 } 173 #endif /* RTFLUSH_DEBUG */ 174 175 struct ifaddr * 176 rt_get_ifa(struct rtentry *rt) 177 { 178 struct ifaddr *ifa; 179 180 if ((ifa = rt->rt_ifa) == NULL) 181 return ifa; 182 else if (ifa->ifa_getifa == NULL) 183 return ifa; 184 #if 0 185 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno) 186 return ifa; 187 #endif 188 else { 189 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt)); 190 rt_replace_ifa(rt, ifa); 191 return ifa; 192 } 193 } 194 195 static void 196 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa) 197 { 198 rt->rt_ifa = ifa; 199 if (ifa->ifa_seqno != NULL) 200 rt->rt_ifa_seqno = *ifa->ifa_seqno; 201 } 202 203 void 204 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa) 205 { 206 IFAREF(ifa); 207 IFAFREE(rt->rt_ifa); 208 rt_set_ifa1(rt, ifa); 209 } 210 211 static void 212 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa) 213 { 214 IFAREF(ifa); 215 rt_set_ifa1(rt, ifa); 216 } 217 218 void 219 rtable_init(void **table) 220 { 221 struct domain *dom; 222 DOMAIN_FOREACH(dom) 223 if (dom->dom_rtattach) 224 dom->dom_rtattach(&table[dom->dom_family], 225 dom->dom_rtoffset); 226 } 227 228 void 229 route_init(void) 230 { 231 232 rt_init(); 233 rn_init(); /* initialize all zeroes, all ones, mask table */ 234 rtable_init((void **)rt_tables); 235 } 236 237 void 238 rtflushall(int family) 239 { 240 struct domain *dom; 241 242 if (rtcache_debug()) 243 printf("%s: enter\n", __func__); 244 245 if ((dom = pffinddomain(family)) == NULL) 246 return; 247 248 rtcache_invalidate(&dom->dom_rtcache); 249 } 250 251 void 252 rtcache(struct route *ro) 253 { 254 struct domain *dom; 255 256 KASSERT(ro->_ro_rt != NULL); 257 KASSERT(ro->ro_invalid == false); 258 KASSERT(rtcache_getdst(ro) != NULL); 259 260 if ((dom = pffinddomain(rtcache_getdst(ro)->sa_family)) == NULL) 261 return; 262 263 LIST_INSERT_HEAD(&dom->dom_rtcache, ro, ro_rtcache_next); 264 } 265 266 /* 267 * Packet routing routines. 268 */ 269 struct rtentry * 270 rtalloc1(const struct sockaddr *dst, int report) 271 { 272 struct radix_node_head *rnh = rt_tables[dst->sa_family]; 273 struct rtentry *rt; 274 struct radix_node *rn; 275 struct rtentry *newrt = NULL; 276 struct rt_addrinfo info; 277 int s = splsoftnet(), err = 0, msgtype = RTM_MISS; 278 279 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) && 280 ((rn->rn_flags & RNF_ROOT) == 0)) { 281 newrt = rt = (struct rtentry *)rn; 282 if (report && (rt->rt_flags & RTF_CLONING)) { 283 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0, 284 &newrt); 285 if (err) { 286 newrt = rt; 287 rt->rt_refcnt++; 288 goto miss; 289 } 290 KASSERT(newrt != NULL); 291 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) { 292 msgtype = RTM_RESOLVE; 293 goto miss; 294 } 295 /* Inform listeners of the new route */ 296 memset(&info, 0, sizeof(info)); 297 info.rti_info[RTAX_DST] = rt_getkey(rt); 298 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 299 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 300 if (rt->rt_ifp != NULL) { 301 info.rti_info[RTAX_IFP] = 302 rt->rt_ifp->if_dl->ifa_addr; 303 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; 304 } 305 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0); 306 } else 307 rt->rt_refcnt++; 308 } else { 309 rtstat.rts_unreach++; 310 miss: if (report) { 311 memset((void *)&info, 0, sizeof(info)); 312 info.rti_info[RTAX_DST] = dst; 313 rt_missmsg(msgtype, &info, 0, err); 314 } 315 } 316 splx(s); 317 return newrt; 318 } 319 320 void 321 rtfree(struct rtentry *rt) 322 { 323 struct ifaddr *ifa; 324 325 if (rt == NULL) 326 panic("rtfree"); 327 rt->rt_refcnt--; 328 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) { 329 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 330 panic ("rtfree 2"); 331 rttrash--; 332 if (rt->rt_refcnt < 0) { 333 printf("rtfree: %p not freed (neg refs)\n", rt); 334 return; 335 } 336 rt_timer_remove_all(rt, 0); 337 ifa = rt->rt_ifa; 338 rt->rt_ifa = NULL; 339 IFAFREE(ifa); 340 rt->rt_ifp = NULL; 341 rt_destroy(rt); 342 pool_put(&rtentry_pool, rt); 343 } 344 } 345 346 void 347 ifafree(struct ifaddr *ifa) 348 { 349 350 #ifdef DIAGNOSTIC 351 if (ifa == NULL) 352 panic("ifafree: null ifa"); 353 if (ifa->ifa_refcnt != 0) 354 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt); 355 #endif 356 #ifdef IFAREF_DEBUG 357 printf("ifafree: freeing ifaddr %p\n", ifa); 358 #endif 359 free(ifa, M_IFADDR); 360 } 361 362 static inline int 363 equal(const struct sockaddr *sa1, const struct sockaddr *sa2) 364 { 365 return sockaddr_cmp(sa1, sa2) == 0; 366 } 367 368 /* 369 * Force a routing table entry to the specified 370 * destination to go through the given gateway. 371 * Normally called as a result of a routing redirect 372 * message from the network layer. 373 * 374 * N.B.: must be called at splsoftnet 375 */ 376 void 377 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway, 378 const struct sockaddr *netmask, int flags, const struct sockaddr *src, 379 struct rtentry **rtp) 380 { 381 struct rtentry *rt; 382 int error = 0; 383 u_quad_t *stat = NULL; 384 struct rt_addrinfo info; 385 struct ifaddr *ifa; 386 387 /* verify the gateway is directly reachable */ 388 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 389 error = ENETUNREACH; 390 goto out; 391 } 392 rt = rtalloc1(dst, 0); 393 /* 394 * If the redirect isn't from our current router for this dst, 395 * it's either old or wrong. If it redirects us to ourselves, 396 * we have a routing loop, perhaps as a result of an interface 397 * going down recently. 398 */ 399 if (!(flags & RTF_DONE) && rt && 400 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 401 error = EINVAL; 402 else if (ifa_ifwithaddr(gateway)) 403 error = EHOSTUNREACH; 404 if (error) 405 goto done; 406 /* 407 * Create a new entry if we just got back a wildcard entry 408 * or the lookup failed. This is necessary for hosts 409 * which use routing redirects generated by smart gateways 410 * to dynamically build the routing tables. 411 */ 412 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) 413 goto create; 414 /* 415 * Don't listen to the redirect if it's 416 * for a route to an interface. 417 */ 418 if (rt->rt_flags & RTF_GATEWAY) { 419 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { 420 /* 421 * Changing from route to net => route to host. 422 * Create new route, rather than smashing route to net. 423 */ 424 create: 425 if (rt != NULL) 426 rtfree(rt); 427 flags |= RTF_GATEWAY | RTF_DYNAMIC; 428 info.rti_info[RTAX_DST] = dst; 429 info.rti_info[RTAX_GATEWAY] = gateway; 430 info.rti_info[RTAX_NETMASK] = netmask; 431 info.rti_ifa = ifa; 432 info.rti_flags = flags; 433 rt = NULL; 434 error = rtrequest1(RTM_ADD, &info, &rt); 435 if (rt != NULL) 436 flags = rt->rt_flags; 437 stat = &rtstat.rts_dynamic; 438 } else { 439 /* 440 * Smash the current notion of the gateway to 441 * this destination. Should check about netmask!!! 442 */ 443 rt->rt_flags |= RTF_MODIFIED; 444 flags |= RTF_MODIFIED; 445 stat = &rtstat.rts_newgateway; 446 rt_setgate(rt, gateway); 447 } 448 } else 449 error = EHOSTUNREACH; 450 done: 451 if (rt) { 452 if (rtp != NULL && !error) 453 *rtp = rt; 454 else 455 rtfree(rt); 456 } 457 out: 458 if (error) 459 rtstat.rts_badredirect++; 460 else if (stat != NULL) 461 (*stat)++; 462 memset(&info, 0, sizeof(info)); 463 info.rti_info[RTAX_DST] = dst; 464 info.rti_info[RTAX_GATEWAY] = gateway; 465 info.rti_info[RTAX_NETMASK] = netmask; 466 info.rti_info[RTAX_AUTHOR] = src; 467 rt_missmsg(RTM_REDIRECT, &info, flags, error); 468 } 469 470 /* 471 * Delete a route and generate a message 472 */ 473 static int 474 rtdeletemsg(struct rtentry *rt) 475 { 476 int error; 477 struct rt_addrinfo info; 478 479 /* 480 * Request the new route so that the entry is not actually 481 * deleted. That will allow the information being reported to 482 * be accurate (and consistent with route_output()). 483 */ 484 memset(&info, 0, sizeof(info)); 485 info.rti_info[RTAX_DST] = rt_getkey(rt); 486 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 487 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 488 info.rti_flags = rt->rt_flags; 489 error = rtrequest1(RTM_DELETE, &info, &rt); 490 491 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error); 492 493 /* Adjust the refcount */ 494 if (error == 0 && rt->rt_refcnt <= 0) { 495 rt->rt_refcnt++; 496 rtfree(rt); 497 } 498 return error; 499 } 500 501 static int 502 rtflushclone1(struct rtentry *rt, void *arg) 503 { 504 struct rtentry *parent; 505 506 parent = (struct rtentry *)arg; 507 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent) 508 rtdeletemsg(rt); 509 return 0; 510 } 511 512 static void 513 rtflushclone(sa_family_t family, struct rtentry *parent) 514 { 515 516 #ifdef DIAGNOSTIC 517 if (!parent || (parent->rt_flags & RTF_CLONING) == 0) 518 panic("rtflushclone: called with a non-cloning route"); 519 #endif 520 rt_walktree(family, rtflushclone1, (void *)parent); 521 } 522 523 /* 524 * Routing table ioctl interface. 525 */ 526 int 527 rtioctl(u_long req, void *data, struct lwp *l) 528 { 529 return EOPNOTSUPP; 530 } 531 532 struct ifaddr * 533 ifa_ifwithroute(int flags, const struct sockaddr *dst, 534 const struct sockaddr *gateway) 535 { 536 struct ifaddr *ifa; 537 if ((flags & RTF_GATEWAY) == 0) { 538 /* 539 * If we are adding a route to an interface, 540 * and the interface is a pt to pt link 541 * we should search for the destination 542 * as our clue to the interface. Otherwise 543 * we can use the local address. 544 */ 545 ifa = NULL; 546 if (flags & RTF_HOST) 547 ifa = ifa_ifwithdstaddr(dst); 548 if (ifa == NULL) 549 ifa = ifa_ifwithaddr(gateway); 550 } else { 551 /* 552 * If we are adding a route to a remote net 553 * or host, the gateway may still be on the 554 * other end of a pt to pt link. 555 */ 556 ifa = ifa_ifwithdstaddr(gateway); 557 } 558 if (ifa == NULL) 559 ifa = ifa_ifwithnet(gateway); 560 if (ifa == NULL) { 561 struct rtentry *rt = rtalloc1(dst, 0); 562 if (rt == NULL) 563 return NULL; 564 rt->rt_refcnt--; 565 if ((ifa = rt->rt_ifa) == NULL) 566 return NULL; 567 } 568 if (ifa->ifa_addr->sa_family != dst->sa_family) { 569 struct ifaddr *oifa = ifa; 570 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 571 if (ifa == 0) 572 ifa = oifa; 573 } 574 return ifa; 575 } 576 577 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 578 579 int 580 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway, 581 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt) 582 { 583 struct rt_addrinfo info; 584 585 memset(&info, 0, sizeof(info)); 586 info.rti_flags = flags; 587 info.rti_info[RTAX_DST] = dst; 588 info.rti_info[RTAX_GATEWAY] = gateway; 589 info.rti_info[RTAX_NETMASK] = netmask; 590 return rtrequest1(req, &info, ret_nrt); 591 } 592 593 int 594 rt_getifa(struct rt_addrinfo *info) 595 { 596 struct ifaddr *ifa; 597 const struct sockaddr *dst = info->rti_info[RTAX_DST]; 598 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY]; 599 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA]; 600 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP]; 601 int flags = info->rti_flags; 602 603 /* 604 * ifp may be specified by sockaddr_dl when protocol address 605 * is ambiguous 606 */ 607 if (info->rti_ifp == NULL && ifpaddr != NULL 608 && ifpaddr->sa_family == AF_LINK && 609 (ifa = ifa_ifwithnet(ifpaddr)) != NULL) 610 info->rti_ifp = ifa->ifa_ifp; 611 if (info->rti_ifa == NULL && ifaaddr != NULL) 612 info->rti_ifa = ifa_ifwithaddr(ifaaddr); 613 if (info->rti_ifa == NULL) { 614 const struct sockaddr *sa; 615 616 sa = ifaaddr != NULL ? ifaaddr : 617 (gateway != NULL ? gateway : dst); 618 if (sa != NULL && info->rti_ifp != NULL) 619 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp); 620 else if (dst != NULL && gateway != NULL) 621 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway); 622 else if (sa != NULL) 623 info->rti_ifa = ifa_ifwithroute(flags, sa, sa); 624 } 625 if ((ifa = info->rti_ifa) == NULL) 626 return ENETUNREACH; 627 if (ifa->ifa_getifa != NULL) 628 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst); 629 if (info->rti_ifp == NULL) 630 info->rti_ifp = ifa->ifa_ifp; 631 return 0; 632 } 633 634 int 635 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt) 636 { 637 int s = splsoftnet(); 638 int error = 0; 639 struct rtentry *rt, *crt; 640 struct radix_node *rn; 641 struct radix_node_head *rnh; 642 struct ifaddr *ifa; 643 struct sockaddr_storage maskeddst; 644 const struct sockaddr *dst = info->rti_info[RTAX_DST]; 645 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY]; 646 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK]; 647 int flags = info->rti_flags; 648 #define senderr(x) { error = x ; goto bad; } 649 650 if ((rnh = rt_tables[dst->sa_family]) == NULL) 651 senderr(ESRCH); 652 if (flags & RTF_HOST) 653 netmask = NULL; 654 switch (req) { 655 case RTM_DELETE: 656 if (netmask) { 657 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst, 658 netmask); 659 dst = (struct sockaddr *)&maskeddst; 660 } 661 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL) 662 senderr(ESRCH); 663 rt = (struct rtentry *)rn; 664 if ((rt->rt_flags & RTF_CLONING) != 0) { 665 /* clean up any cloned children */ 666 rtflushclone(dst->sa_family, rt); 667 } 668 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL) 669 senderr(ESRCH); 670 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 671 panic ("rtrequest delete"); 672 rt = (struct rtentry *)rn; 673 if (rt->rt_gwroute) { 674 RTFREE(rt->rt_gwroute); 675 rt->rt_gwroute = NULL; 676 } 677 if (rt->rt_parent) { 678 rt->rt_parent->rt_refcnt--; 679 rt->rt_parent = NULL; 680 } 681 rt->rt_flags &= ~RTF_UP; 682 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 683 ifa->ifa_rtrequest(RTM_DELETE, rt, info); 684 rttrash++; 685 if (ret_nrt) 686 *ret_nrt = rt; 687 else if (rt->rt_refcnt <= 0) { 688 rt->rt_refcnt++; 689 rtfree(rt); 690 } 691 break; 692 693 case RTM_RESOLVE: 694 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 695 senderr(EINVAL); 696 if ((rt->rt_flags & RTF_CLONING) == 0) 697 senderr(EINVAL); 698 ifa = rt->rt_ifa; 699 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC); 700 flags |= RTF_CLONED; 701 gateway = rt->rt_gateway; 702 flags |= RTF_HOST; 703 goto makeroute; 704 705 case RTM_ADD: 706 if (info->rti_ifa == NULL && (error = rt_getifa(info))) 707 senderr(error); 708 ifa = info->rti_ifa; 709 makeroute: 710 /* Already at splsoftnet() so pool_get/pool_put are safe */ 711 rt = pool_get(&rtentry_pool, PR_NOWAIT); 712 if (rt == NULL) 713 senderr(ENOBUFS); 714 Bzero(rt, sizeof(*rt)); 715 rt->rt_flags = RTF_UP | flags; 716 LIST_INIT(&rt->rt_timer); 717 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__, 718 (void *)rt->_rt_key); 719 if (rt_setkey(rt, dst, M_NOWAIT) == NULL || 720 rt_setgate(rt, gateway) != 0) { 721 pool_put(&rtentry_pool, rt); 722 senderr(ENOBUFS); 723 } 724 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__, 725 (void *)rt->_rt_key); 726 if (netmask) { 727 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst, 728 netmask); 729 rt_setkey(rt, (struct sockaddr *)&maskeddst, M_NOWAIT); 730 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 731 __LINE__, (void *)rt->_rt_key); 732 } else { 733 rt_setkey(rt, dst, M_NOWAIT); 734 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 735 __LINE__, (void *)rt->_rt_key); 736 } 737 rt_set_ifa(rt, ifa); 738 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 739 __LINE__, (void *)rt->_rt_key); 740 rt->rt_ifp = ifa->ifa_ifp; 741 if (req == RTM_RESOLVE) { 742 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 743 rt->rt_parent = *ret_nrt; 744 rt->rt_parent->rt_refcnt++; 745 } 746 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 747 __LINE__, (void *)rt->_rt_key); 748 rn = rnh->rnh_addaddr(rt_getkey(rt), netmask, rnh, 749 rt->rt_nodes); 750 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 751 __LINE__, (void *)rt->_rt_key); 752 if (rn == NULL && (crt = rtalloc1(rt_getkey(rt), 0)) != NULL) { 753 /* overwrite cloned route */ 754 if ((crt->rt_flags & RTF_CLONED) != 0) { 755 rtdeletemsg(crt); 756 rn = rnh->rnh_addaddr(rt_getkey(rt), 757 netmask, rnh, rt->rt_nodes); 758 } 759 RTFREE(crt); 760 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 761 __LINE__, (void *)rt->_rt_key); 762 } 763 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 764 __LINE__, (void *)rt->_rt_key); 765 if (rn == NULL) { 766 IFAFREE(ifa); 767 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent) 768 rtfree(rt->rt_parent); 769 if (rt->rt_gwroute) 770 rtfree(rt->rt_gwroute); 771 rt_destroy(rt); 772 pool_put(&rtentry_pool, rt); 773 senderr(EEXIST); 774 } 775 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 776 __LINE__, (void *)rt->_rt_key); 777 if (ifa->ifa_rtrequest) 778 ifa->ifa_rtrequest(req, rt, info); 779 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 780 __LINE__, (void *)rt->_rt_key); 781 if (ret_nrt) { 782 *ret_nrt = rt; 783 rt->rt_refcnt++; 784 } 785 if ((rt->rt_flags & RTF_CLONING) != 0) { 786 /* clean up any cloned children */ 787 rtflushclone(dst->sa_family, rt); 788 } 789 rtflushall(dst->sa_family); 790 break; 791 case RTM_GET: 792 if (netmask != NULL) { 793 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst, 794 netmask); 795 dst = (struct sockaddr *)&maskeddst; 796 } 797 rn = rnh->rnh_lookup(dst, netmask, rnh); 798 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0) 799 senderr(ESRCH); 800 if (ret_nrt != NULL) { 801 rt = (struct rtentry *)rn; 802 *ret_nrt = rt; 803 rt->rt_refcnt++; 804 } 805 break; 806 } 807 bad: 808 splx(s); 809 return error; 810 } 811 812 int 813 rt_setgate(struct rtentry *rt, const struct sockaddr *gate) 814 { 815 KASSERT(rt != rt->rt_gwroute); 816 817 KASSERT(rt->_rt_key != NULL); 818 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 819 __LINE__, (void *)rt->_rt_key); 820 821 if (rt->rt_gwroute) { 822 RTFREE(rt->rt_gwroute); 823 rt->rt_gwroute = NULL; 824 } 825 KASSERT(rt->_rt_key != NULL); 826 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 827 __LINE__, (void *)rt->_rt_key); 828 if (rt->rt_gateway != NULL) 829 sockaddr_free(rt->rt_gateway); 830 KASSERT(rt->_rt_key != NULL); 831 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 832 __LINE__, (void *)rt->_rt_key); 833 if ((rt->rt_gateway = sockaddr_dup(gate, M_NOWAIT)) == NULL) 834 return ENOMEM; 835 KASSERT(rt->_rt_key != NULL); 836 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 837 __LINE__, (void *)rt->_rt_key); 838 839 if (rt->rt_flags & RTF_GATEWAY) { 840 KASSERT(rt->_rt_key != NULL); 841 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 842 __LINE__, (void *)rt->_rt_key); 843 rt->rt_gwroute = rtalloc1(gate, 1); 844 /* 845 * If we switched gateways, grab the MTU from the new 846 * gateway route if the current MTU, if the current MTU is 847 * greater than the MTU of gateway. 848 * Note that, if the MTU of gateway is 0, we will reset the 849 * MTU of the route to run PMTUD again from scratch. XXX 850 */ 851 KASSERT(rt->_rt_key != NULL); 852 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 853 __LINE__, (void *)rt->_rt_key); 854 if (rt->rt_gwroute 855 && !(rt->rt_rmx.rmx_locks & RTV_MTU) 856 && rt->rt_rmx.rmx_mtu 857 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) { 858 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu; 859 } 860 } 861 KASSERT(rt->_rt_key != NULL); 862 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 863 __LINE__, (void *)rt->_rt_key); 864 return 0; 865 } 866 867 void 868 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst, 869 const struct sockaddr *netmask) 870 { 871 const char *netmaskp = &netmask->sa_data[0], 872 *srcp = &src->sa_data[0]; 873 char *dstp = &dst->sa_data[0]; 874 const char *maskend = dstp + MIN(netmask->sa_len, src->sa_len); 875 const char *srcend = dstp + src->sa_len; 876 877 dst->sa_len = src->sa_len; 878 dst->sa_family = src->sa_family; 879 880 while (dstp < maskend) 881 *dstp++ = *srcp++ & *netmaskp++; 882 if (dstp < srcend) 883 memset(dstp, 0, (size_t)(srcend - dstp)); 884 } 885 886 /* 887 * Set up or tear down a routing table entry, normally 888 * for an interface. 889 */ 890 int 891 rtinit(struct ifaddr *ifa, int cmd, int flags) 892 { 893 struct rtentry *rt; 894 struct sockaddr *dst, *odst; 895 struct sockaddr_storage maskeddst; 896 struct rtentry *nrt = NULL; 897 int error; 898 struct rt_addrinfo info; 899 900 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr; 901 if (cmd == RTM_DELETE) { 902 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) { 903 /* Delete subnet route for this interface */ 904 odst = dst; 905 dst = (struct sockaddr *)&maskeddst; 906 rt_maskedcopy(odst, dst, ifa->ifa_netmask); 907 } 908 if ((rt = rtalloc1(dst, 0)) != NULL) { 909 rt->rt_refcnt--; 910 if (rt->rt_ifa != ifa) 911 return (flags & RTF_HOST) ? EHOSTUNREACH 912 : ENETUNREACH; 913 } 914 } 915 memset(&info, 0, sizeof(info)); 916 info.rti_ifa = ifa; 917 info.rti_flags = flags | ifa->ifa_flags; 918 info.rti_info[RTAX_DST] = dst; 919 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 920 /* 921 * XXX here, it seems that we are assuming that ifa_netmask is NULL 922 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate 923 * variable) when RTF_HOST is 1. still not sure if i can safely 924 * change it to meet bsdi4 behavior. 925 */ 926 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 927 error = rtrequest1(cmd, &info, &nrt); 928 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) { 929 rt_newaddrmsg(cmd, ifa, error, nrt); 930 if (rt->rt_refcnt <= 0) { 931 rt->rt_refcnt++; 932 rtfree(rt); 933 } 934 } 935 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) { 936 rt->rt_refcnt--; 937 if (rt->rt_ifa != ifa) { 938 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa, 939 rt->rt_ifa); 940 if (rt->rt_ifa->ifa_rtrequest) 941 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL); 942 rt_replace_ifa(rt, ifa); 943 rt->rt_ifp = ifa->ifa_ifp; 944 if (ifa->ifa_rtrequest) 945 ifa->ifa_rtrequest(RTM_ADD, rt, NULL); 946 } 947 rt_newaddrmsg(cmd, ifa, error, nrt); 948 } 949 return error; 950 } 951 952 /* 953 * Route timer routines. These routes allow functions to be called 954 * for various routes at any time. This is useful in supporting 955 * path MTU discovery and redirect route deletion. 956 * 957 * This is similar to some BSDI internal functions, but it provides 958 * for multiple queues for efficiency's sake... 959 */ 960 961 LIST_HEAD(, rttimer_queue) rttimer_queue_head; 962 static int rt_init_done = 0; 963 964 #define RTTIMER_CALLOUT(r) do { \ 965 if (r->rtt_func != NULL) { \ 966 (*r->rtt_func)(r->rtt_rt, r); \ 967 } else { \ 968 rtrequest((int) RTM_DELETE, \ 969 rt_getkey(r->rtt_rt), \ 970 0, 0, 0, 0); \ 971 } \ 972 } while (/*CONSTCOND*/0) 973 974 /* 975 * Some subtle order problems with domain initialization mean that 976 * we cannot count on this being run from rt_init before various 977 * protocol initializations are done. Therefore, we make sure 978 * that this is run when the first queue is added... 979 */ 980 981 void 982 rt_timer_init(void) 983 { 984 assert(rt_init_done == 0); 985 986 LIST_INIT(&rttimer_queue_head); 987 callout_init(&rt_timer_ch, 0); 988 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL); 989 rt_init_done = 1; 990 } 991 992 struct rttimer_queue * 993 rt_timer_queue_create(u_int timeout) 994 { 995 struct rttimer_queue *rtq; 996 997 if (rt_init_done == 0) 998 rt_timer_init(); 999 1000 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq); 1001 if (rtq == NULL) 1002 return NULL; 1003 Bzero(rtq, sizeof *rtq); 1004 1005 rtq->rtq_timeout = timeout; 1006 rtq->rtq_count = 0; 1007 TAILQ_INIT(&rtq->rtq_head); 1008 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link); 1009 1010 return rtq; 1011 } 1012 1013 void 1014 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout) 1015 { 1016 1017 rtq->rtq_timeout = timeout; 1018 } 1019 1020 void 1021 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy) 1022 { 1023 struct rttimer *r; 1024 1025 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) { 1026 LIST_REMOVE(r, rtt_link); 1027 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1028 if (destroy) 1029 RTTIMER_CALLOUT(r); 1030 /* we are already at splsoftnet */ 1031 pool_put(&rttimer_pool, r); 1032 if (rtq->rtq_count > 0) 1033 rtq->rtq_count--; 1034 else 1035 printf("rt_timer_queue_remove_all: " 1036 "rtq_count reached 0\n"); 1037 } 1038 } 1039 1040 void 1041 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy) 1042 { 1043 1044 rt_timer_queue_remove_all(rtq, destroy); 1045 1046 LIST_REMOVE(rtq, rtq_link); 1047 1048 /* 1049 * Caller is responsible for freeing the rttimer_queue structure. 1050 */ 1051 } 1052 1053 unsigned long 1054 rt_timer_count(struct rttimer_queue *rtq) 1055 { 1056 return rtq->rtq_count; 1057 } 1058 1059 void 1060 rt_timer_remove_all(struct rtentry *rt, int destroy) 1061 { 1062 struct rttimer *r; 1063 1064 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) { 1065 LIST_REMOVE(r, rtt_link); 1066 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1067 if (destroy) 1068 RTTIMER_CALLOUT(r); 1069 if (r->rtt_queue->rtq_count > 0) 1070 r->rtt_queue->rtq_count--; 1071 else 1072 printf("rt_timer_remove_all: rtq_count reached 0\n"); 1073 /* we are already at splsoftnet */ 1074 pool_put(&rttimer_pool, r); 1075 } 1076 } 1077 1078 int 1079 rt_timer_add(struct rtentry *rt, 1080 void (*func)(struct rtentry *, struct rttimer *), 1081 struct rttimer_queue *queue) 1082 { 1083 struct rttimer *r; 1084 int s; 1085 1086 /* 1087 * If there's already a timer with this action, destroy it before 1088 * we add a new one. 1089 */ 1090 LIST_FOREACH(r, &rt->rt_timer, rtt_link) { 1091 if (r->rtt_func == func) 1092 break; 1093 } 1094 if (r != NULL) { 1095 LIST_REMOVE(r, rtt_link); 1096 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1097 if (r->rtt_queue->rtq_count > 0) 1098 r->rtt_queue->rtq_count--; 1099 else 1100 printf("rt_timer_add: rtq_count reached 0\n"); 1101 } else { 1102 s = splsoftnet(); 1103 r = pool_get(&rttimer_pool, PR_NOWAIT); 1104 splx(s); 1105 if (r == NULL) 1106 return ENOBUFS; 1107 } 1108 1109 memset(r, 0, sizeof(*r)); 1110 1111 r->rtt_rt = rt; 1112 r->rtt_time = time_uptime; 1113 r->rtt_func = func; 1114 r->rtt_queue = queue; 1115 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link); 1116 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next); 1117 r->rtt_queue->rtq_count++; 1118 1119 return 0; 1120 } 1121 1122 /* ARGSUSED */ 1123 void 1124 rt_timer_timer(void *arg) 1125 { 1126 struct rttimer_queue *rtq; 1127 struct rttimer *r; 1128 int s; 1129 1130 s = splsoftnet(); 1131 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) { 1132 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL && 1133 (r->rtt_time + rtq->rtq_timeout) < time_uptime) { 1134 LIST_REMOVE(r, rtt_link); 1135 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1136 RTTIMER_CALLOUT(r); 1137 pool_put(&rttimer_pool, r); 1138 if (rtq->rtq_count > 0) 1139 rtq->rtq_count--; 1140 else 1141 printf("rt_timer_timer: rtq_count reached 0\n"); 1142 } 1143 } 1144 splx(s); 1145 1146 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL); 1147 } 1148 1149 static struct rtentry * 1150 _rtcache_init(struct route *ro, int flag) 1151 { 1152 KASSERT(ro->_ro_rt == NULL); 1153 1154 if (rtcache_getdst(ro) == NULL) 1155 return NULL; 1156 ro->ro_invalid = false; 1157 if ((ro->_ro_rt = rtalloc1(rtcache_getdst(ro), flag)) != NULL) 1158 rtcache(ro); 1159 1160 return ro->_ro_rt; 1161 } 1162 1163 struct rtentry * 1164 rtcache_init(struct route *ro) 1165 { 1166 return _rtcache_init(ro, 1); 1167 } 1168 1169 struct rtentry * 1170 rtcache_init_noclone(struct route *ro) 1171 { 1172 return _rtcache_init(ro, 0); 1173 } 1174 1175 struct rtentry * 1176 rtcache_update(struct route *ro, int clone) 1177 { 1178 rtcache_clear(ro); 1179 return _rtcache_init(ro, clone); 1180 } 1181 1182 void 1183 rtcache_copy(struct route *new_ro, const struct route *old_ro) 1184 { 1185 struct rtentry *rt; 1186 1187 KASSERT(new_ro != old_ro); 1188 1189 if ((rt = rtcache_validate(old_ro)) != NULL) 1190 rt->rt_refcnt++; 1191 1192 if (rtcache_getdst(old_ro) == NULL || 1193 rtcache_setdst(new_ro, rtcache_getdst(old_ro)) != 0) 1194 return; 1195 1196 new_ro->ro_invalid = false; 1197 if ((new_ro->_ro_rt = rt) != NULL) 1198 rtcache(new_ro); 1199 } 1200 1201 static struct dom_rtlist invalid_routes = LIST_HEAD_INITIALIZER(dom_rtlist); 1202 1203 void 1204 rtcache_invalidate(struct dom_rtlist *rtlist) 1205 { 1206 struct route *ro; 1207 1208 while ((ro = LIST_FIRST(rtlist)) != NULL) { 1209 KASSERT(ro->_ro_rt != NULL); 1210 ro->ro_invalid = true; 1211 LIST_REMOVE(ro, ro_rtcache_next); 1212 LIST_INSERT_HEAD(&invalid_routes, ro, ro_rtcache_next); 1213 } 1214 } 1215 1216 void 1217 rtcache_clear(struct route *ro) 1218 { 1219 if (ro->_ro_rt == NULL) 1220 return; 1221 1222 KASSERT(rtcache_getdst(ro) != NULL); 1223 1224 LIST_REMOVE(ro, ro_rtcache_next); 1225 1226 RTFREE(ro->_ro_rt); 1227 ro->_ro_rt = NULL; 1228 } 1229 1230 struct rtentry * 1231 rtcache_lookup2(struct route *ro, const struct sockaddr *dst, int clone, 1232 int *hitp) 1233 { 1234 const struct sockaddr *odst; 1235 struct rtentry *rt = NULL; 1236 1237 odst = rtcache_getdst(ro); 1238 1239 if (odst == NULL) 1240 ; 1241 else if (sockaddr_cmp(odst, dst) != 0) 1242 rtcache_free(ro); 1243 else if ((rt = rtcache_validate(ro)) == NULL) 1244 rtcache_clear(ro); 1245 1246 if (rt == NULL) { 1247 *hitp = 0; 1248 if (rtcache_setdst(ro, dst) == 0) 1249 rt = _rtcache_init(ro, clone); 1250 } else 1251 *hitp = 1; 1252 1253 return rt; 1254 } 1255 1256 void 1257 rtcache_free(struct route *ro) 1258 { 1259 rtcache_clear(ro); 1260 if (ro->ro_sa != NULL) { 1261 sockaddr_free(ro->ro_sa); 1262 ro->ro_sa = NULL; 1263 KASSERT(ro->_ro_rt == NULL); 1264 } 1265 } 1266 1267 int 1268 rtcache_setdst(struct route *ro, const struct sockaddr *sa) 1269 { 1270 KASSERT(sa != NULL); 1271 1272 if (ro->ro_sa != NULL && ro->ro_sa->sa_family == sa->sa_family) { 1273 rtcache_clear(ro); 1274 if (sockaddr_copy(ro->ro_sa, ro->ro_sa->sa_len, sa) != NULL) 1275 return 0; 1276 sockaddr_free(ro->ro_sa); 1277 } else if (ro->ro_sa != NULL) 1278 rtcache_free(ro); /* free ro_sa, wrong family */ 1279 1280 KASSERT(ro->_ro_rt == NULL); 1281 1282 if ((ro->ro_sa = sockaddr_dup(sa, M_NOWAIT)) == NULL) { 1283 return ENOMEM; 1284 } 1285 return 0; 1286 } 1287 1288 static int 1289 rt_walktree_visitor(struct radix_node *rn, void *v) 1290 { 1291 struct rtwalk *rw = (struct rtwalk *)v; 1292 1293 return (*rw->rw_f)((struct rtentry *)rn, rw->rw_v); 1294 } 1295 1296 int 1297 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v) 1298 { 1299 struct radix_node_head *rnh = rt_tables[family]; 1300 struct rtwalk rw; 1301 1302 if (rnh == NULL) 1303 return 0; 1304 1305 rw.rw_f = f; 1306 rw.rw_v = v; 1307 1308 return rn_walktree(rnh, rt_walktree_visitor, &rw); 1309 } 1310