1 /* $NetBSD: route.c,v 1.98 2007/10/10 22:14:38 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the project nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 */ 68 69 /* 70 * Copyright (c) 1980, 1986, 1991, 1993 71 * The Regents of the University of California. All rights reserved. 72 * 73 * Redistribution and use in source and binary forms, with or without 74 * modification, are permitted provided that the following conditions 75 * are met: 76 * 1. Redistributions of source code must retain the above copyright 77 * notice, this list of conditions and the following disclaimer. 78 * 2. Redistributions in binary form must reproduce the above copyright 79 * notice, this list of conditions and the following disclaimer in the 80 * documentation and/or other materials provided with the distribution. 81 * 3. Neither the name of the University nor the names of its contributors 82 * may be used to endorse or promote products derived from this software 83 * without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 95 * SUCH DAMAGE. 96 * 97 * @(#)route.c 8.3 (Berkeley) 1/9/95 98 */ 99 100 #include "opt_route.h" 101 102 #include <sys/cdefs.h> 103 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.98 2007/10/10 22:14:38 dyoung Exp $"); 104 105 #include <sys/param.h> 106 #include <sys/sysctl.h> 107 #include <sys/systm.h> 108 #include <sys/callout.h> 109 #include <sys/proc.h> 110 #include <sys/mbuf.h> 111 #include <sys/socket.h> 112 #include <sys/socketvar.h> 113 #include <sys/domain.h> 114 #include <sys/protosw.h> 115 #include <sys/kernel.h> 116 #include <sys/ioctl.h> 117 #include <sys/pool.h> 118 119 #include <net/if.h> 120 #include <net/route.h> 121 #include <net/raw_cb.h> 122 123 #include <netinet/in.h> 124 #include <netinet/in_var.h> 125 126 #ifdef RTFLUSH_DEBUG 127 #define rtcache_debug() __predict_false(_rtcache_debug) 128 #else /* RTFLUSH_DEBUG */ 129 #define rtcache_debug() 0 130 #endif /* RTFLUSH_DEBUG */ 131 132 struct route_cb route_cb; 133 struct rtstat rtstat; 134 struct radix_node_head *rt_tables[AF_MAX+1]; 135 136 int rttrash; /* routes not in table but not freed */ 137 struct sockaddr wildcard; /* zero valued cookie for wildcard searches */ 138 139 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL, 140 IPL_SOFTNET); 141 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL, 142 IPL_SOFTNET); 143 144 struct callout rt_timer_ch; /* callout for rt_timer_timer() */ 145 146 #ifdef RTFLUSH_DEBUG 147 static int _rtcache_debug = 0; 148 #endif /* RTFLUSH_DEBUG */ 149 150 static int rtdeletemsg(struct rtentry *); 151 static int rtflushclone1(struct rtentry *, void *); 152 static void rtflushclone(sa_family_t family, struct rtentry *); 153 154 #ifdef RTFLUSH_DEBUG 155 SYSCTL_SETUP(sysctl_net_rtcache_setup, "sysctl net.rtcache.debug setup") 156 { 157 const struct sysctlnode *rnode; 158 159 /* XXX do not duplicate */ 160 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT, 161 CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL) != 0) 162 return; 163 if (sysctl_createv(clog, 0, &rnode, &rnode, CTLFLAG_PERMANENT, 164 CTLTYPE_NODE, 165 "rtcache", SYSCTL_DESCR("Route cache related settings"), 166 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 167 return; 168 if (sysctl_createv(clog, 0, &rnode, &rnode, 169 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 170 "debug", SYSCTL_DESCR("Debug route caches"), 171 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0) 172 return; 173 } 174 #endif /* RTFLUSH_DEBUG */ 175 176 struct ifaddr * 177 rt_get_ifa(struct rtentry *rt) 178 { 179 struct ifaddr *ifa; 180 181 if ((ifa = rt->rt_ifa) == NULL) 182 return ifa; 183 else if (ifa->ifa_getifa == NULL) 184 return ifa; 185 #if 0 186 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno) 187 return ifa; 188 #endif 189 else { 190 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt)); 191 rt_replace_ifa(rt, ifa); 192 return ifa; 193 } 194 } 195 196 static void 197 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa) 198 { 199 rt->rt_ifa = ifa; 200 if (ifa->ifa_seqno != NULL) 201 rt->rt_ifa_seqno = *ifa->ifa_seqno; 202 } 203 204 void 205 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa) 206 { 207 IFAREF(ifa); 208 IFAFREE(rt->rt_ifa); 209 rt_set_ifa1(rt, ifa); 210 } 211 212 static void 213 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa) 214 { 215 IFAREF(ifa); 216 rt_set_ifa1(rt, ifa); 217 } 218 219 void 220 rtable_init(void **table) 221 { 222 struct domain *dom; 223 DOMAIN_FOREACH(dom) 224 if (dom->dom_rtattach) 225 dom->dom_rtattach(&table[dom->dom_family], 226 dom->dom_rtoffset); 227 } 228 229 void 230 route_init(void) 231 { 232 233 rn_init(); /* initialize all zeroes, all ones, mask table */ 234 rtable_init((void **)rt_tables); 235 } 236 237 void 238 rtflushall(int family) 239 { 240 int s; 241 struct domain *dom; 242 struct route *ro; 243 244 if (rtcache_debug()) 245 printf("%s: enter\n", __func__); 246 247 if ((dom = pffinddomain(family)) == NULL) 248 return; 249 250 s = splnet(); 251 while ((ro = LIST_FIRST(&dom->dom_rtcache)) != NULL) { 252 KASSERT(ro->ro_rt != NULL); 253 rtcache_clear(ro); 254 } 255 splx(s); 256 } 257 258 void 259 rtflush(struct route *ro) 260 { 261 int s = splnet(); 262 KASSERT(ro->ro_rt != NULL); 263 KASSERT(rtcache_getdst(ro) != NULL); 264 265 RTFREE(ro->ro_rt); 266 ro->ro_rt = NULL; 267 268 LIST_REMOVE(ro, ro_rtcache_next); 269 splx(s); 270 } 271 272 void 273 rtcache(struct route *ro) 274 { 275 int s; 276 struct domain *dom; 277 278 KASSERT(ro->ro_rt != NULL); 279 KASSERT(rtcache_getdst(ro) != NULL); 280 281 if ((dom = pffinddomain(rtcache_getdst(ro)->sa_family)) == NULL) 282 return; 283 284 s = splnet(); 285 LIST_INSERT_HEAD(&dom->dom_rtcache, ro, ro_rtcache_next); 286 splx(s); 287 } 288 289 /* 290 * Packet routing routines. 291 */ 292 void 293 rtalloc(struct route *ro) 294 { 295 if (ro->ro_rt != NULL) { 296 if (ro->ro_rt->rt_ifp != NULL && 297 (ro->ro_rt->rt_flags & RTF_UP) != 0) 298 return; 299 rtflush(ro); 300 } 301 if (rtcache_getdst(ro) == NULL || 302 (ro->ro_rt = rtalloc1(rtcache_getdst(ro), 1)) == NULL) 303 return; 304 rtcache(ro); 305 } 306 307 struct rtentry * 308 rtalloc1(const struct sockaddr *dst, int report) 309 { 310 struct radix_node_head *rnh = rt_tables[dst->sa_family]; 311 struct rtentry *rt; 312 struct radix_node *rn; 313 struct rtentry *newrt = NULL; 314 struct rt_addrinfo info; 315 int s = splsoftnet(), err = 0, msgtype = RTM_MISS; 316 317 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) && 318 ((rn->rn_flags & RNF_ROOT) == 0)) { 319 newrt = rt = (struct rtentry *)rn; 320 if (report && (rt->rt_flags & RTF_CLONING)) { 321 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0, 322 &newrt); 323 if (err) { 324 newrt = rt; 325 rt->rt_refcnt++; 326 goto miss; 327 } 328 KASSERT(newrt != NULL); 329 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) { 330 msgtype = RTM_RESOLVE; 331 goto miss; 332 } 333 /* Inform listeners of the new route */ 334 memset(&info, 0, sizeof(info)); 335 info.rti_info[RTAX_DST] = rt_getkey(rt); 336 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 337 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 338 if (rt->rt_ifp != NULL) { 339 info.rti_info[RTAX_IFP] = 340 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr; 341 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; 342 } 343 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0); 344 } else 345 rt->rt_refcnt++; 346 } else { 347 rtstat.rts_unreach++; 348 miss: if (report) { 349 memset((void *)&info, 0, sizeof(info)); 350 info.rti_info[RTAX_DST] = dst; 351 rt_missmsg(msgtype, &info, 0, err); 352 } 353 } 354 splx(s); 355 return newrt; 356 } 357 358 void 359 rtfree(struct rtentry *rt) 360 { 361 struct ifaddr *ifa; 362 363 if (rt == NULL) 364 panic("rtfree"); 365 rt->rt_refcnt--; 366 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) { 367 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 368 panic ("rtfree 2"); 369 rttrash--; 370 if (rt->rt_refcnt < 0) { 371 printf("rtfree: %p not freed (neg refs)\n", rt); 372 return; 373 } 374 rt_timer_remove_all(rt, 0); 375 ifa = rt->rt_ifa; 376 rt->rt_ifa = NULL; 377 IFAFREE(ifa); 378 rt->rt_ifp = NULL; 379 rt_destroy(rt); 380 pool_put(&rtentry_pool, rt); 381 } 382 } 383 384 void 385 ifafree(struct ifaddr *ifa) 386 { 387 388 #ifdef DIAGNOSTIC 389 if (ifa == NULL) 390 panic("ifafree: null ifa"); 391 if (ifa->ifa_refcnt != 0) 392 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt); 393 #endif 394 #ifdef IFAREF_DEBUG 395 printf("ifafree: freeing ifaddr %p\n", ifa); 396 #endif 397 free(ifa, M_IFADDR); 398 } 399 400 static inline int 401 equal(const struct sockaddr *sa1, const struct sockaddr *sa2) 402 { 403 return sockaddr_cmp(sa1, sa2) == 0; 404 } 405 406 /* 407 * Force a routing table entry to the specified 408 * destination to go through the given gateway. 409 * Normally called as a result of a routing redirect 410 * message from the network layer. 411 * 412 * N.B.: must be called at splsoftnet 413 */ 414 void 415 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway, 416 const struct sockaddr *netmask, int flags, const struct sockaddr *src, 417 struct rtentry **rtp) 418 { 419 struct rtentry *rt; 420 int error = 0; 421 u_quad_t *stat = NULL; 422 struct rt_addrinfo info; 423 struct ifaddr *ifa; 424 425 /* verify the gateway is directly reachable */ 426 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 427 error = ENETUNREACH; 428 goto out; 429 } 430 rt = rtalloc1(dst, 0); 431 /* 432 * If the redirect isn't from our current router for this dst, 433 * it's either old or wrong. If it redirects us to ourselves, 434 * we have a routing loop, perhaps as a result of an interface 435 * going down recently. 436 */ 437 if (!(flags & RTF_DONE) && rt && 438 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 439 error = EINVAL; 440 else if (ifa_ifwithaddr(gateway)) 441 error = EHOSTUNREACH; 442 if (error) 443 goto done; 444 /* 445 * Create a new entry if we just got back a wildcard entry 446 * or the lookup failed. This is necessary for hosts 447 * which use routing redirects generated by smart gateways 448 * to dynamically build the routing tables. 449 */ 450 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) 451 goto create; 452 /* 453 * Don't listen to the redirect if it's 454 * for a route to an interface. 455 */ 456 if (rt->rt_flags & RTF_GATEWAY) { 457 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { 458 /* 459 * Changing from route to net => route to host. 460 * Create new route, rather than smashing route to net. 461 */ 462 create: 463 if (rt != NULL) 464 rtfree(rt); 465 flags |= RTF_GATEWAY | RTF_DYNAMIC; 466 info.rti_info[RTAX_DST] = dst; 467 info.rti_info[RTAX_GATEWAY] = gateway; 468 info.rti_info[RTAX_NETMASK] = netmask; 469 info.rti_ifa = ifa; 470 info.rti_flags = flags; 471 rt = NULL; 472 error = rtrequest1(RTM_ADD, &info, &rt); 473 if (rt != NULL) 474 flags = rt->rt_flags; 475 stat = &rtstat.rts_dynamic; 476 } else { 477 /* 478 * Smash the current notion of the gateway to 479 * this destination. Should check about netmask!!! 480 */ 481 rt->rt_flags |= RTF_MODIFIED; 482 flags |= RTF_MODIFIED; 483 stat = &rtstat.rts_newgateway; 484 rt_setgate(rt, gateway); 485 } 486 } else 487 error = EHOSTUNREACH; 488 done: 489 if (rt) { 490 if (rtp != NULL && !error) 491 *rtp = rt; 492 else 493 rtfree(rt); 494 } 495 out: 496 if (error) 497 rtstat.rts_badredirect++; 498 else if (stat != NULL) 499 (*stat)++; 500 memset(&info, 0, sizeof(info)); 501 info.rti_info[RTAX_DST] = dst; 502 info.rti_info[RTAX_GATEWAY] = gateway; 503 info.rti_info[RTAX_NETMASK] = netmask; 504 info.rti_info[RTAX_AUTHOR] = src; 505 rt_missmsg(RTM_REDIRECT, &info, flags, error); 506 } 507 508 /* 509 * Delete a route and generate a message 510 */ 511 static int 512 rtdeletemsg(struct rtentry *rt) 513 { 514 int error; 515 struct rt_addrinfo info; 516 517 /* 518 * Request the new route so that the entry is not actually 519 * deleted. That will allow the information being reported to 520 * be accurate (and consistent with route_output()). 521 */ 522 memset(&info, 0, sizeof(info)); 523 info.rti_info[RTAX_DST] = rt_getkey(rt); 524 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 525 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 526 info.rti_flags = rt->rt_flags; 527 error = rtrequest1(RTM_DELETE, &info, &rt); 528 529 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error); 530 531 /* Adjust the refcount */ 532 if (error == 0 && rt->rt_refcnt <= 0) { 533 rt->rt_refcnt++; 534 rtfree(rt); 535 } 536 return error; 537 } 538 539 static int 540 rtflushclone1(struct rtentry *rt, void *arg) 541 { 542 struct rtentry *parent; 543 544 parent = (struct rtentry *)arg; 545 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent) 546 rtdeletemsg(rt); 547 return 0; 548 } 549 550 static void 551 rtflushclone(sa_family_t family, struct rtentry *parent) 552 { 553 554 #ifdef DIAGNOSTIC 555 if (!parent || (parent->rt_flags & RTF_CLONING) == 0) 556 panic("rtflushclone: called with a non-cloning route"); 557 #endif 558 rt_walktree(family, rtflushclone1, (void *)parent); 559 } 560 561 /* 562 * Routing table ioctl interface. 563 */ 564 int 565 rtioctl(u_long req, void *data, struct lwp *l) 566 { 567 return EOPNOTSUPP; 568 } 569 570 struct ifaddr * 571 ifa_ifwithroute(int flags, const struct sockaddr *dst, 572 const struct sockaddr *gateway) 573 { 574 struct ifaddr *ifa; 575 if ((flags & RTF_GATEWAY) == 0) { 576 /* 577 * If we are adding a route to an interface, 578 * and the interface is a pt to pt link 579 * we should search for the destination 580 * as our clue to the interface. Otherwise 581 * we can use the local address. 582 */ 583 ifa = NULL; 584 if (flags & RTF_HOST) 585 ifa = ifa_ifwithdstaddr(dst); 586 if (ifa == NULL) 587 ifa = ifa_ifwithaddr(gateway); 588 } else { 589 /* 590 * If we are adding a route to a remote net 591 * or host, the gateway may still be on the 592 * other end of a pt to pt link. 593 */ 594 ifa = ifa_ifwithdstaddr(gateway); 595 } 596 if (ifa == NULL) 597 ifa = ifa_ifwithnet(gateway); 598 if (ifa == NULL) { 599 struct rtentry *rt = rtalloc1(dst, 0); 600 if (rt == NULL) 601 return NULL; 602 rt->rt_refcnt--; 603 if ((ifa = rt->rt_ifa) == NULL) 604 return NULL; 605 } 606 if (ifa->ifa_addr->sa_family != dst->sa_family) { 607 struct ifaddr *oifa = ifa; 608 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 609 if (ifa == 0) 610 ifa = oifa; 611 } 612 return ifa; 613 } 614 615 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 616 617 int 618 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway, 619 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt) 620 { 621 struct rt_addrinfo info; 622 623 memset(&info, 0, sizeof(info)); 624 info.rti_flags = flags; 625 info.rti_info[RTAX_DST] = dst; 626 info.rti_info[RTAX_GATEWAY] = gateway; 627 info.rti_info[RTAX_NETMASK] = netmask; 628 return rtrequest1(req, &info, ret_nrt); 629 } 630 631 int 632 rt_getifa(struct rt_addrinfo *info) 633 { 634 struct ifaddr *ifa; 635 const struct sockaddr *dst = info->rti_info[RTAX_DST]; 636 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY]; 637 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA]; 638 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP]; 639 int flags = info->rti_flags; 640 641 /* 642 * ifp may be specified by sockaddr_dl when protocol address 643 * is ambiguous 644 */ 645 if (info->rti_ifp == NULL && ifpaddr != NULL 646 && ifpaddr->sa_family == AF_LINK && 647 (ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL) 648 info->rti_ifp = ifa->ifa_ifp; 649 if (info->rti_ifa == NULL && ifaaddr != NULL) 650 info->rti_ifa = ifa_ifwithaddr(ifaaddr); 651 if (info->rti_ifa == NULL) { 652 const struct sockaddr *sa; 653 654 sa = ifaaddr != NULL ? ifaaddr : 655 (gateway != NULL ? gateway : dst); 656 if (sa != NULL && info->rti_ifp != NULL) 657 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp); 658 else if (dst != NULL && gateway != NULL) 659 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway); 660 else if (sa != NULL) 661 info->rti_ifa = ifa_ifwithroute(flags, sa, sa); 662 } 663 if ((ifa = info->rti_ifa) == NULL) 664 return ENETUNREACH; 665 if (ifa->ifa_getifa != NULL) 666 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst); 667 if (info->rti_ifp == NULL) 668 info->rti_ifp = ifa->ifa_ifp; 669 return 0; 670 } 671 672 int 673 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt) 674 { 675 int s = splsoftnet(); 676 int error = 0; 677 struct rtentry *rt, *crt; 678 struct radix_node *rn; 679 struct radix_node_head *rnh; 680 struct ifaddr *ifa; 681 struct sockaddr_storage maskeddst; 682 const struct sockaddr *dst = info->rti_info[RTAX_DST]; 683 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY]; 684 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK]; 685 int flags = info->rti_flags; 686 #define senderr(x) { error = x ; goto bad; } 687 688 if ((rnh = rt_tables[dst->sa_family]) == NULL) 689 senderr(ESRCH); 690 if (flags & RTF_HOST) 691 netmask = NULL; 692 switch (req) { 693 case RTM_DELETE: 694 if (netmask) { 695 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst, 696 netmask); 697 dst = (struct sockaddr *)&maskeddst; 698 } 699 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL) 700 senderr(ESRCH); 701 rt = (struct rtentry *)rn; 702 if ((rt->rt_flags & RTF_CLONING) != 0) { 703 /* clean up any cloned children */ 704 rtflushclone(dst->sa_family, rt); 705 } 706 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL) 707 senderr(ESRCH); 708 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 709 panic ("rtrequest delete"); 710 rt = (struct rtentry *)rn; 711 if (rt->rt_gwroute) { 712 RTFREE(rt->rt_gwroute); 713 rt->rt_gwroute = NULL; 714 } 715 if (rt->rt_parent) { 716 rt->rt_parent->rt_refcnt--; 717 rt->rt_parent = NULL; 718 } 719 rt->rt_flags &= ~RTF_UP; 720 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 721 ifa->ifa_rtrequest(RTM_DELETE, rt, info); 722 rttrash++; 723 if (ret_nrt) 724 *ret_nrt = rt; 725 else if (rt->rt_refcnt <= 0) { 726 rt->rt_refcnt++; 727 rtfree(rt); 728 } 729 break; 730 731 case RTM_RESOLVE: 732 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 733 senderr(EINVAL); 734 if ((rt->rt_flags & RTF_CLONING) == 0) 735 senderr(EINVAL); 736 ifa = rt->rt_ifa; 737 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC); 738 flags |= RTF_CLONED; 739 gateway = rt->rt_gateway; 740 flags |= RTF_HOST; 741 goto makeroute; 742 743 case RTM_ADD: 744 if (info->rti_ifa == NULL && (error = rt_getifa(info))) 745 senderr(error); 746 ifa = info->rti_ifa; 747 makeroute: 748 /* Already at splsoftnet() so pool_get/pool_put are safe */ 749 rt = pool_get(&rtentry_pool, PR_NOWAIT); 750 if (rt == NULL) 751 senderr(ENOBUFS); 752 Bzero(rt, sizeof(*rt)); 753 rt->rt_flags = RTF_UP | flags; 754 LIST_INIT(&rt->rt_timer); 755 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__, 756 (void *)rt->_rt_key); 757 if (rt_setkey(rt, dst, M_NOWAIT) == NULL || 758 rt_setgate(rt, gateway) != 0) { 759 pool_put(&rtentry_pool, rt); 760 senderr(ENOBUFS); 761 } 762 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__, 763 (void *)rt->_rt_key); 764 if (netmask) { 765 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst, 766 netmask); 767 rt_setkey(rt, (struct sockaddr *)&maskeddst, M_NOWAIT); 768 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 769 __LINE__, (void *)rt->_rt_key); 770 } else { 771 rt_setkey(rt, dst, M_NOWAIT); 772 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 773 __LINE__, (void *)rt->_rt_key); 774 } 775 rt_set_ifa(rt, ifa); 776 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 777 __LINE__, (void *)rt->_rt_key); 778 rt->rt_ifp = ifa->ifa_ifp; 779 if (req == RTM_RESOLVE) { 780 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 781 rt->rt_parent = *ret_nrt; 782 rt->rt_parent->rt_refcnt++; 783 } 784 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 785 __LINE__, (void *)rt->_rt_key); 786 rn = rnh->rnh_addaddr(rt_getkey(rt), netmask, rnh, 787 rt->rt_nodes); 788 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 789 __LINE__, (void *)rt->_rt_key); 790 if (rn == NULL && (crt = rtalloc1(rt_getkey(rt), 0)) != NULL) { 791 /* overwrite cloned route */ 792 if ((crt->rt_flags & RTF_CLONED) != 0) { 793 rtdeletemsg(crt); 794 rn = rnh->rnh_addaddr(rt_getkey(rt), 795 netmask, rnh, rt->rt_nodes); 796 } 797 RTFREE(crt); 798 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 799 __LINE__, (void *)rt->_rt_key); 800 } 801 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 802 __LINE__, (void *)rt->_rt_key); 803 if (rn == NULL) { 804 IFAFREE(ifa); 805 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent) 806 rtfree(rt->rt_parent); 807 if (rt->rt_gwroute) 808 rtfree(rt->rt_gwroute); 809 rt_destroy(rt); 810 pool_put(&rtentry_pool, rt); 811 senderr(EEXIST); 812 } 813 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 814 __LINE__, (void *)rt->_rt_key); 815 if (ifa->ifa_rtrequest) 816 ifa->ifa_rtrequest(req, rt, info); 817 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 818 __LINE__, (void *)rt->_rt_key); 819 if (ret_nrt) { 820 *ret_nrt = rt; 821 rt->rt_refcnt++; 822 } 823 if ((rt->rt_flags & RTF_CLONING) != 0) { 824 /* clean up any cloned children */ 825 rtflushclone(dst->sa_family, rt); 826 } 827 rtflushall(dst->sa_family); 828 break; 829 case RTM_GET: 830 if (netmask != NULL) { 831 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst, 832 netmask); 833 dst = (struct sockaddr *)&maskeddst; 834 } 835 rn = rnh->rnh_lookup(dst, netmask, rnh); 836 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0) 837 senderr(ESRCH); 838 if (ret_nrt != NULL) { 839 rt = (struct rtentry *)rn; 840 *ret_nrt = rt; 841 rt->rt_refcnt++; 842 } 843 break; 844 } 845 bad: 846 splx(s); 847 return error; 848 } 849 850 int 851 rt_setgate(struct rtentry *rt, const struct sockaddr *gate) 852 { 853 KASSERT(rt != rt->rt_gwroute); 854 855 KASSERT(rt->_rt_key != NULL); 856 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 857 __LINE__, (void *)rt->_rt_key); 858 859 if (rt->rt_gwroute) { 860 RTFREE(rt->rt_gwroute); 861 rt->rt_gwroute = NULL; 862 } 863 KASSERT(rt->_rt_key != NULL); 864 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 865 __LINE__, (void *)rt->_rt_key); 866 if (rt->rt_gateway != NULL) 867 sockaddr_free(rt->rt_gateway); 868 KASSERT(rt->_rt_key != NULL); 869 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 870 __LINE__, (void *)rt->_rt_key); 871 if ((rt->rt_gateway = sockaddr_dup(gate, M_NOWAIT)) == NULL) 872 return ENOMEM; 873 KASSERT(rt->_rt_key != NULL); 874 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 875 __LINE__, (void *)rt->_rt_key); 876 877 if (rt->rt_flags & RTF_GATEWAY) { 878 KASSERT(rt->_rt_key != NULL); 879 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 880 __LINE__, (void *)rt->_rt_key); 881 rt->rt_gwroute = rtalloc1(gate, 1); 882 /* 883 * If we switched gateways, grab the MTU from the new 884 * gateway route if the current MTU, if the current MTU is 885 * greater than the MTU of gateway. 886 * Note that, if the MTU of gateway is 0, we will reset the 887 * MTU of the route to run PMTUD again from scratch. XXX 888 */ 889 KASSERT(rt->_rt_key != NULL); 890 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 891 __LINE__, (void *)rt->_rt_key); 892 if (rt->rt_gwroute 893 && !(rt->rt_rmx.rmx_locks & RTV_MTU) 894 && rt->rt_rmx.rmx_mtu 895 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) { 896 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu; 897 } 898 } 899 KASSERT(rt->_rt_key != NULL); 900 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, 901 __LINE__, (void *)rt->_rt_key); 902 return 0; 903 } 904 905 void 906 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst, 907 const struct sockaddr *netmask) 908 { 909 const char *netmaskp = &netmask->sa_data[0], 910 *srcp = &src->sa_data[0]; 911 char *dstp = &dst->sa_data[0]; 912 const char *maskend = dstp + MIN(netmask->sa_len, src->sa_len); 913 const char *srcend = dstp + src->sa_len; 914 915 dst->sa_len = src->sa_len; 916 dst->sa_family = src->sa_family; 917 918 while (dstp < maskend) 919 *dstp++ = *srcp++ & *netmaskp++; 920 if (dstp < srcend) 921 memset(dstp, 0, (size_t)(srcend - dstp)); 922 } 923 924 /* 925 * Set up or tear down a routing table entry, normally 926 * for an interface. 927 */ 928 int 929 rtinit(struct ifaddr *ifa, int cmd, int flags) 930 { 931 struct rtentry *rt; 932 struct sockaddr *dst, *odst; 933 struct sockaddr_storage maskeddst; 934 struct rtentry *nrt = NULL; 935 int error; 936 struct rt_addrinfo info; 937 938 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr; 939 if (cmd == RTM_DELETE) { 940 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) { 941 /* Delete subnet route for this interface */ 942 odst = dst; 943 dst = (struct sockaddr *)&maskeddst; 944 rt_maskedcopy(odst, dst, ifa->ifa_netmask); 945 } 946 if ((rt = rtalloc1(dst, 0)) != NULL) { 947 rt->rt_refcnt--; 948 if (rt->rt_ifa != ifa) 949 return (flags & RTF_HOST) ? EHOSTUNREACH 950 : ENETUNREACH; 951 } 952 } 953 memset(&info, 0, sizeof(info)); 954 info.rti_ifa = ifa; 955 info.rti_flags = flags | ifa->ifa_flags; 956 info.rti_info[RTAX_DST] = dst; 957 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 958 /* 959 * XXX here, it seems that we are assuming that ifa_netmask is NULL 960 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate 961 * variable) when RTF_HOST is 1. still not sure if i can safely 962 * change it to meet bsdi4 behavior. 963 */ 964 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 965 error = rtrequest1(cmd, &info, &nrt); 966 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) { 967 rt_newaddrmsg(cmd, ifa, error, nrt); 968 if (rt->rt_refcnt <= 0) { 969 rt->rt_refcnt++; 970 rtfree(rt); 971 } 972 } 973 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) { 974 rt->rt_refcnt--; 975 if (rt->rt_ifa != ifa) { 976 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa, 977 rt->rt_ifa); 978 if (rt->rt_ifa->ifa_rtrequest) 979 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL); 980 rt_replace_ifa(rt, ifa); 981 rt->rt_ifp = ifa->ifa_ifp; 982 if (ifa->ifa_rtrequest) 983 ifa->ifa_rtrequest(RTM_ADD, rt, NULL); 984 } 985 rt_newaddrmsg(cmd, ifa, error, nrt); 986 } 987 return error; 988 } 989 990 /* 991 * Route timer routines. These routes allow functions to be called 992 * for various routes at any time. This is useful in supporting 993 * path MTU discovery and redirect route deletion. 994 * 995 * This is similar to some BSDI internal functions, but it provides 996 * for multiple queues for efficiency's sake... 997 */ 998 999 LIST_HEAD(, rttimer_queue) rttimer_queue_head; 1000 static int rt_init_done = 0; 1001 1002 #define RTTIMER_CALLOUT(r) do { \ 1003 if (r->rtt_func != NULL) { \ 1004 (*r->rtt_func)(r->rtt_rt, r); \ 1005 } else { \ 1006 rtrequest((int) RTM_DELETE, \ 1007 rt_getkey(r->rtt_rt), \ 1008 0, 0, 0, 0); \ 1009 } \ 1010 } while (/*CONSTCOND*/0) 1011 1012 /* 1013 * Some subtle order problems with domain initialization mean that 1014 * we cannot count on this being run from rt_init before various 1015 * protocol initializations are done. Therefore, we make sure 1016 * that this is run when the first queue is added... 1017 */ 1018 1019 void 1020 rt_timer_init(void) 1021 { 1022 assert(rt_init_done == 0); 1023 1024 LIST_INIT(&rttimer_queue_head); 1025 callout_init(&rt_timer_ch, 0); 1026 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL); 1027 rt_init_done = 1; 1028 } 1029 1030 struct rttimer_queue * 1031 rt_timer_queue_create(u_int timeout) 1032 { 1033 struct rttimer_queue *rtq; 1034 1035 if (rt_init_done == 0) 1036 rt_timer_init(); 1037 1038 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq); 1039 if (rtq == NULL) 1040 return NULL; 1041 Bzero(rtq, sizeof *rtq); 1042 1043 rtq->rtq_timeout = timeout; 1044 rtq->rtq_count = 0; 1045 TAILQ_INIT(&rtq->rtq_head); 1046 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link); 1047 1048 return rtq; 1049 } 1050 1051 void 1052 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout) 1053 { 1054 1055 rtq->rtq_timeout = timeout; 1056 } 1057 1058 void 1059 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy) 1060 { 1061 struct rttimer *r; 1062 1063 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) { 1064 LIST_REMOVE(r, rtt_link); 1065 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1066 if (destroy) 1067 RTTIMER_CALLOUT(r); 1068 /* we are already at splsoftnet */ 1069 pool_put(&rttimer_pool, r); 1070 if (rtq->rtq_count > 0) 1071 rtq->rtq_count--; 1072 else 1073 printf("rt_timer_queue_remove_all: " 1074 "rtq_count reached 0\n"); 1075 } 1076 } 1077 1078 void 1079 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy) 1080 { 1081 1082 rt_timer_queue_remove_all(rtq, destroy); 1083 1084 LIST_REMOVE(rtq, rtq_link); 1085 1086 /* 1087 * Caller is responsible for freeing the rttimer_queue structure. 1088 */ 1089 } 1090 1091 unsigned long 1092 rt_timer_count(struct rttimer_queue *rtq) 1093 { 1094 return rtq->rtq_count; 1095 } 1096 1097 void 1098 rt_timer_remove_all(struct rtentry *rt, int destroy) 1099 { 1100 struct rttimer *r; 1101 1102 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) { 1103 LIST_REMOVE(r, rtt_link); 1104 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1105 if (destroy) 1106 RTTIMER_CALLOUT(r); 1107 if (r->rtt_queue->rtq_count > 0) 1108 r->rtt_queue->rtq_count--; 1109 else 1110 printf("rt_timer_remove_all: rtq_count reached 0\n"); 1111 /* we are already at splsoftnet */ 1112 pool_put(&rttimer_pool, r); 1113 } 1114 } 1115 1116 int 1117 rt_timer_add(struct rtentry *rt, 1118 void (*func)(struct rtentry *, struct rttimer *), 1119 struct rttimer_queue *queue) 1120 { 1121 struct rttimer *r; 1122 int s; 1123 1124 /* 1125 * If there's already a timer with this action, destroy it before 1126 * we add a new one. 1127 */ 1128 LIST_FOREACH(r, &rt->rt_timer, rtt_link) { 1129 if (r->rtt_func == func) 1130 break; 1131 } 1132 if (r != NULL) { 1133 LIST_REMOVE(r, rtt_link); 1134 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1135 if (r->rtt_queue->rtq_count > 0) 1136 r->rtt_queue->rtq_count--; 1137 else 1138 printf("rt_timer_add: rtq_count reached 0\n"); 1139 } else { 1140 s = splsoftnet(); 1141 r = pool_get(&rttimer_pool, PR_NOWAIT); 1142 splx(s); 1143 if (r == NULL) 1144 return ENOBUFS; 1145 } 1146 1147 memset(r, 0, sizeof(*r)); 1148 1149 r->rtt_rt = rt; 1150 r->rtt_time = time_uptime; 1151 r->rtt_func = func; 1152 r->rtt_queue = queue; 1153 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link); 1154 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next); 1155 r->rtt_queue->rtq_count++; 1156 1157 return 0; 1158 } 1159 1160 /* ARGSUSED */ 1161 void 1162 rt_timer_timer(void *arg) 1163 { 1164 struct rttimer_queue *rtq; 1165 struct rttimer *r; 1166 int s; 1167 1168 s = splsoftnet(); 1169 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) { 1170 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL && 1171 (r->rtt_time + rtq->rtq_timeout) < time_uptime) { 1172 LIST_REMOVE(r, rtt_link); 1173 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1174 RTTIMER_CALLOUT(r); 1175 pool_put(&rttimer_pool, r); 1176 if (rtq->rtq_count > 0) 1177 rtq->rtq_count--; 1178 else 1179 printf("rt_timer_timer: rtq_count reached 0\n"); 1180 } 1181 } 1182 splx(s); 1183 1184 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL); 1185 } 1186 1187 #ifdef RTCACHE_DEBUG 1188 #ifndef RTCACHE_DEBUG_SIZE 1189 #define RTCACHE_DEBUG_SIZE (1024 * 1024) 1190 #endif 1191 static const char *cache_caller[RTCACHE_DEBUG_SIZE]; 1192 static struct route *cache_entry[RTCACHE_DEBUG_SIZE]; 1193 size_t cache_cur; 1194 #endif 1195 1196 #ifdef RTCACHE_DEBUG 1197 static void 1198 _rtcache_init_debug(const char *caller, struct route *ro, int flag) 1199 #else 1200 static void 1201 _rtcache_init(struct route *ro, int flag) 1202 #endif 1203 { 1204 #ifdef RTCACHE_DEBUG 1205 size_t i; 1206 for (i = 0; i < cache_cur; ++i) { 1207 if (cache_entry[i] == ro) 1208 panic("Reinit of route %p, initialised from %s", ro, cache_caller[i]); 1209 } 1210 #endif 1211 1212 if (rtcache_getdst(ro) == NULL) 1213 return; 1214 ro->ro_rt = rtalloc1(rtcache_getdst(ro), flag); 1215 if (ro->ro_rt != NULL) { 1216 #ifdef RTCACHE_DEBUG 1217 if (cache_cur == RTCACHE_DEBUG_SIZE) 1218 panic("Route cache debug overflow"); 1219 cache_caller[cache_cur] = caller; 1220 cache_entry[cache_cur] = ro; 1221 ++cache_cur; 1222 #endif 1223 rtcache(ro); 1224 } 1225 } 1226 1227 #ifdef RTCACHE_DEBUG 1228 void 1229 rtcache_init_debug(const char *caller, struct route *ro) 1230 { 1231 _rtcache_init_debug(caller, ro, 1); 1232 } 1233 1234 void 1235 rtcache_init_noclone_debug(const char *caller, struct route *ro) 1236 { 1237 _rtcache_init_debug(caller, ro, 0); 1238 } 1239 1240 void 1241 rtcache_update(struct route *ro, int clone) 1242 { 1243 rtcache_clear(ro); 1244 _rtcache_init_debug(__func__, ro, clone); 1245 } 1246 #else 1247 void 1248 rtcache_init(struct route *ro) 1249 { 1250 _rtcache_init(ro, 1); 1251 } 1252 1253 void 1254 rtcache_init_noclone(struct route *ro) 1255 { 1256 _rtcache_init(ro, 0); 1257 } 1258 1259 void 1260 rtcache_update(struct route *ro, int clone) 1261 { 1262 rtcache_clear(ro); 1263 _rtcache_init(ro, clone); 1264 } 1265 #endif 1266 1267 #ifdef RTCACHE_DEBUG 1268 void 1269 rtcache_copy_debug(const char *caller, struct route *new_ro, const struct route *old_ro) 1270 #else 1271 void 1272 rtcache_copy(struct route *new_ro, const struct route *old_ro) 1273 #endif 1274 { 1275 /* XXX i doubt this DTRT any longer --dyoung */ 1276 #ifdef RTCACHE_DEBUG 1277 size_t i; 1278 1279 for (i = 0; i < cache_cur; ++i) { 1280 if (cache_entry[i] == new_ro) 1281 panic("Copy to initalised route %p (before %s)", new_ro, cache_caller[i]); 1282 } 1283 #endif 1284 1285 if (rtcache_getdst(old_ro) == NULL || 1286 rtcache_setdst(new_ro, rtcache_getdst(old_ro)) != 0) 1287 return; 1288 new_ro->ro_rt = old_ro->ro_rt; 1289 if (new_ro->ro_rt != NULL) { 1290 #ifdef RTCACHE_DEBUG 1291 if (cache_cur == RTCACHE_DEBUG_SIZE) 1292 panic("Route cache debug overflow"); 1293 cache_caller[cache_cur] = caller; 1294 cache_entry[cache_cur] = new_ro; 1295 ++cache_cur; 1296 #endif 1297 rtcache(new_ro); 1298 ++new_ro->ro_rt->rt_refcnt; 1299 } 1300 } 1301 1302 void 1303 rtcache_clear(struct route *ro) 1304 { 1305 #ifdef RTCACHE_DEBUG 1306 size_t j, i = cache_cur; 1307 for (i = j = 0; i < cache_cur; ++i, ++j) { 1308 if (cache_entry[i] == ro) { 1309 if (ro->ro_rt == NULL) 1310 panic("Route cache manipulated (allocated by %s)", cache_caller[i]); 1311 --j; 1312 } else { 1313 cache_caller[j] = cache_caller[i]; 1314 cache_entry[j] = cache_entry[i]; 1315 } 1316 } 1317 if (ro->ro_rt != NULL) { 1318 if (i != j + 1) 1319 panic("Wrong entries after rtcache_free: %zu (expected %zu)", j, i - 1); 1320 --cache_cur; 1321 } 1322 #endif 1323 1324 if (ro->ro_rt != NULL) 1325 rtflush(ro); 1326 ro->ro_rt = NULL; 1327 } 1328 1329 struct rtentry * 1330 rtcache_lookup2(struct route *ro, const struct sockaddr *dst, int clone, 1331 int *hitp) 1332 { 1333 const struct sockaddr *odst; 1334 1335 odst = rtcache_getdst(ro); 1336 1337 if (odst == NULL) 1338 ; 1339 else if (sockaddr_cmp(odst, dst) != 0) 1340 rtcache_free(ro); 1341 else if (rtcache_down(ro)) 1342 rtcache_clear(ro); 1343 1344 if (ro->ro_rt == NULL) { 1345 *hitp = 0; 1346 rtcache_setdst(ro, dst); 1347 _rtcache_init(ro, clone); 1348 } else 1349 *hitp = 1; 1350 1351 return ro->ro_rt; 1352 } 1353 1354 void 1355 rtcache_free(struct route *ro) 1356 { 1357 rtcache_clear(ro); 1358 if (ro->ro_sa != NULL) { 1359 sockaddr_free(ro->ro_sa); 1360 ro->ro_sa = NULL; 1361 } 1362 } 1363 1364 int 1365 rtcache_setdst(struct route *ro, const struct sockaddr *sa) 1366 { 1367 KASSERT(sa != NULL); 1368 1369 if (ro->ro_sa != NULL && ro->ro_sa->sa_family == sa->sa_family) { 1370 rtcache_clear(ro); 1371 if (sockaddr_copy(ro->ro_sa, ro->ro_sa->sa_len, sa) != NULL) 1372 return 0; 1373 sockaddr_free(ro->ro_sa); 1374 } else if (ro->ro_sa != NULL) 1375 rtcache_free(ro); /* free ro_sa, wrong family */ 1376 1377 if ((ro->ro_sa = sockaddr_dup(sa, M_NOWAIT)) == NULL) 1378 return ENOMEM; 1379 return 0; 1380 } 1381 1382 static int 1383 rt_walktree_visitor(struct radix_node *rn, void *v) 1384 { 1385 struct rtwalk *rw = (struct rtwalk *)v; 1386 1387 return (*rw->rw_f)((struct rtentry *)rn, rw->rw_v); 1388 } 1389 1390 int 1391 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v) 1392 { 1393 struct radix_node_head *rnh = rt_tables[family]; 1394 struct rtwalk rw; 1395 1396 if (rnh == NULL) 1397 return 0; 1398 1399 rw.rw_f = f; 1400 rw.rw_v = v; 1401 1402 return rn_walktree(rnh, rt_walktree_visitor, &rw); 1403 } 1404