1 /* 2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1980, 1986, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)route.c 8.3 (Berkeley) 1/9/95 62 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $ 63 */ 64 65 #include "opt_inet.h" 66 #include "opt_mpls.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/mbuf.h> 72 #include <sys/socket.h> 73 #include <sys/domain.h> 74 #include <sys/kernel.h> 75 #include <sys/sysctl.h> 76 #include <sys/globaldata.h> 77 #include <sys/thread.h> 78 79 #include <net/if.h> 80 #include <net/if_var.h> 81 #include <net/route.h> 82 #include <net/netisr.h> 83 84 #include <netinet/in.h> 85 #include <net/ip_mroute/ip_mroute.h> 86 87 #include <sys/thread2.h> 88 #include <sys/msgport2.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #ifdef MPLS 93 #include <netproto/mpls/mpls.h> 94 #endif 95 96 static struct rtstatistics rtstatistics_percpu[MAXCPU] __cachealign; 97 #define rtstat rtstatistics_percpu[mycpuid] 98 99 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1]; 100 101 static void rt_maskedcopy (struct sockaddr *, struct sockaddr *, 102 struct sockaddr *); 103 static void rtable_init(void); 104 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *, 105 struct rtentry *, void *); 106 107 static void rtredirect_msghandler(netmsg_t msg); 108 static void rtrequest1_msghandler(netmsg_t msg); 109 static void rtsearch_msghandler(netmsg_t msg); 110 static void rtmask_add_msghandler(netmsg_t msg); 111 112 static int rt_setshims(struct rtentry *, struct sockaddr **); 113 114 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing"); 115 116 #ifdef ROUTE_DEBUG 117 static int route_debug = 1; 118 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW, 119 &route_debug, 0, ""); 120 #endif 121 122 u_long route_kmalloc_limit = 0; 123 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit); 124 125 /* 126 * Initialize the route table(s) for protocol domains and 127 * create a helper thread which will be responsible for updating 128 * route table entries on each cpu. 129 */ 130 void 131 route_init(void) 132 { 133 int cpu; 134 135 if (route_kmalloc_limit) 136 kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit); 137 138 for (cpu = 0; cpu < netisr_ncpus; ++cpu) 139 bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics)); 140 rn_init(); /* initialize all zeroes, all ones, mask table */ 141 rtable_init(); /* call dom_rtattach() on each cpu */ 142 } 143 144 static void 145 rtable_init_oncpu(netmsg_t msg) 146 { 147 struct domain *dom; 148 int cpu = mycpuid; 149 150 ASSERT_NETISR_NCPUS(cpu); 151 152 SLIST_FOREACH(dom, &domains, dom_next) { 153 if (dom->dom_rtattach) { 154 dom->dom_rtattach( 155 (void **)&rt_tables[cpu][dom->dom_family], 156 dom->dom_rtoffset); 157 } 158 } 159 netisr_forwardmsg(&msg->base, cpu + 1); 160 } 161 162 static void 163 rtable_init(void) 164 { 165 struct netmsg_base msg; 166 167 netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu); 168 netisr_domsg_global(&msg); 169 } 170 171 /* 172 * Routing statistics. 173 */ 174 static int 175 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS) 176 { 177 int cpu, error = 0; 178 179 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 180 if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu], 181 sizeof(struct rtstatistics)))) 182 break; 183 if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu], 184 sizeof(struct rtstatistics)))) 185 break; 186 } 187 188 return (error); 189 } 190 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW), 191 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics"); 192 193 /* 194 * Packet routing routines. 195 */ 196 197 /* 198 * Look up and fill in the "ro_rt" rtentry field in a route structure given 199 * an address in the "ro_dst" field. Always send a report on a miss and 200 * always clone routes. 201 */ 202 void 203 rtalloc(struct route *ro) 204 { 205 rtalloc_ign(ro, 0UL); 206 } 207 208 /* 209 * Look up and fill in the "ro_rt" rtentry field in a route structure given 210 * an address in the "ro_dst" field. Always send a report on a miss and 211 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being 212 * ignored. 213 */ 214 void 215 rtalloc_ign(struct route *ro, u_long ignoreflags) 216 { 217 if (ro->ro_rt != NULL) { 218 if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP) 219 return; 220 rtfree(ro->ro_rt); 221 ro->ro_rt = NULL; 222 } 223 ro->ro_rt = _rtlookup(&ro->ro_dst, ignoreflags); 224 } 225 226 /* 227 * Look up the route that matches the given "dst" address. 228 * 229 * Route lookup can have the side-effect of creating and returning 230 * a cloned route instead when "dst" matches a cloning route and the 231 * RTF_CLONING and RTF_PRCLONING flags are not being ignored. 232 * 233 * Any route returned has its reference count incremented. 234 */ 235 struct rtentry * 236 _rtlookup(struct sockaddr *dst, u_long ignore) 237 { 238 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family]; 239 struct rtentry *rt; 240 241 ASSERT_NETISR_NCPUS(mycpuid); 242 243 if (rnh == NULL) 244 goto unreach; 245 246 /* 247 * Look up route in the radix tree. 248 */ 249 rt = (struct rtentry *) rnh->rnh_matchaddr(dst, rnh); 250 if (rt == NULL) 251 goto unreach; 252 253 /* 254 * Handle cloning routes. 255 */ 256 if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) { 257 struct rtentry *clonedroute; 258 int error; 259 260 clonedroute = rt; /* copy in/copy out parameter */ 261 error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0, 262 &clonedroute); /* clone the route */ 263 if (error != 0) { /* cloning failed */ 264 rt_dstmsg(RTM_MISS, dst, error); 265 rt->rt_refcnt++; 266 return (rt); /* return the uncloned route */ 267 } 268 if (clonedroute->rt_flags & RTF_XRESOLVE) 269 rt_dstmsg(RTM_RESOLVE, dst, 0); 270 return (clonedroute); /* return cloned route */ 271 } 272 273 /* 274 * Increment the reference count of the matched route and return. 275 */ 276 rt->rt_refcnt++; 277 return (rt); 278 279 unreach: 280 rtstat.rts_unreach++; 281 rt_dstmsg(RTM_MISS, dst, 0); 282 return (NULL); 283 } 284 285 void 286 rtfree(struct rtentry *rt) 287 { 288 289 ASSERT_NETISR_NCPUS(rt->rt_cpuid); 290 KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt)); 291 292 --rt->rt_refcnt; 293 if (rt->rt_refcnt == 0) { 294 struct radix_node_head *rnh = 295 rt_tables[mycpuid][rt_key(rt)->sa_family]; 296 297 if (rnh->rnh_close) 298 rnh->rnh_close((struct radix_node *)rt, rnh); 299 if (!(rt->rt_flags & RTF_UP)) { 300 /* deallocate route */ 301 if (rt->rt_ifa != NULL) 302 IFAFREE(rt->rt_ifa); 303 if (rt->rt_parent != NULL) 304 RTFREE(rt->rt_parent); /* recursive call! */ 305 R_Free(rt_key(rt)); 306 R_Free(rt); 307 } 308 } 309 } 310 311 static void 312 rtfree_async_dispatch(netmsg_t msg) 313 { 314 struct rtentry *rt = msg->lmsg.u.ms_resultp; 315 316 rtfree(rt); 317 netisr_replymsg(&msg->base, 0); 318 } 319 320 void 321 rtfree_async(struct rtentry *rt) 322 { 323 struct netmsg_base *msg; 324 325 if (IN_NETISR_NCPUS(rt->rt_cpuid)) { 326 rtfree(rt); 327 return; 328 } 329 330 KASSERT(rt->rt_refcnt > 0, 331 ("rtfree_async: rt_refcnt %ld", rt->rt_refcnt)); 332 333 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT); 334 netmsg_init(msg, NULL, &netisr_afree_rport, 0, rtfree_async_dispatch); 335 msg->lmsg.u.ms_resultp = rt; 336 337 netisr_sendmsg(msg, rt->rt_cpuid); 338 } 339 340 int 341 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway, 342 struct sockaddr *netmask, int flags, struct sockaddr *src) 343 { 344 struct rtentry *rt = NULL; 345 struct rt_addrinfo rtinfo; 346 struct ifaddr *ifa; 347 u_long *stat = NULL; 348 int error; 349 350 ASSERT_NETISR_NCPUS(mycpuid); 351 352 /* verify the gateway is directly reachable */ 353 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 354 error = ENETUNREACH; 355 goto out; 356 } 357 358 /* 359 * If the redirect isn't from our current router for this destination, 360 * it's either old or wrong. 361 */ 362 if (!(flags & RTF_DONE) && /* XXX JH */ 363 (rt = rtpurelookup(dst)) != NULL && 364 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) { 365 error = EINVAL; 366 goto done; 367 } 368 369 /* 370 * If it redirects us to ourselves, we have a routing loop, 371 * perhaps as a result of an interface going down recently. 372 */ 373 if (ifa_ifwithaddr(gateway)) { 374 error = EHOSTUNREACH; 375 goto done; 376 } 377 378 /* 379 * Create a new entry if the lookup failed or if we got back 380 * a wildcard entry for the default route. This is necessary 381 * for hosts which use routing redirects generated by smart 382 * gateways to dynamically build the routing tables. 383 */ 384 if (rt == NULL) 385 goto create; 386 if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) { 387 rtfree(rt); 388 goto create; 389 } 390 391 /* Ignore redirects for directly connected hosts. */ 392 if (!(rt->rt_flags & RTF_GATEWAY)) { 393 error = EHOSTUNREACH; 394 goto done; 395 } 396 397 if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) { 398 /* 399 * Changing from a network route to a host route. 400 * Create a new host route rather than smashing the 401 * network route. 402 */ 403 create: 404 flags |= RTF_GATEWAY | RTF_DYNAMIC; 405 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 406 rtinfo.rti_info[RTAX_DST] = dst; 407 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 408 rtinfo.rti_info[RTAX_NETMASK] = netmask; 409 rtinfo.rti_flags = flags; 410 rtinfo.rti_ifa = ifa; 411 rt = NULL; /* copy-in/copy-out parameter */ 412 error = rtrequest1(RTM_ADD, &rtinfo, &rt); 413 if (rt != NULL) 414 flags = rt->rt_flags; 415 stat = &rtstat.rts_dynamic; 416 } else { 417 /* 418 * Smash the current notion of the gateway to this destination. 419 * Should check about netmask!!! 420 */ 421 rt->rt_flags |= RTF_MODIFIED; 422 flags |= RTF_MODIFIED; 423 424 /* We only need to report rtmsg on CPU0 */ 425 rt_setgate(rt, rt_key(rt), gateway); 426 if (mycpuid == 0) 427 rt_rtmsg(RTM_CHANGE, rt, rt->rt_ifp, 0); 428 error = 0; 429 stat = &rtstat.rts_newgateway; 430 } 431 432 done: 433 if (rt != NULL) 434 rtfree(rt); 435 out: 436 if (error != 0) 437 rtstat.rts_badredirect++; 438 else if (stat != NULL) 439 (*stat)++; 440 441 return error; 442 } 443 444 struct netmsg_rtredirect { 445 struct netmsg_base base; 446 struct sockaddr *dst; 447 struct sockaddr *gateway; 448 struct sockaddr *netmask; 449 int flags; 450 struct sockaddr *src; 451 }; 452 453 /* 454 * Force a routing table entry to the specified 455 * destination to go through the given gateway. 456 * Normally called as a result of a routing redirect 457 * message from the network layer. 458 */ 459 void 460 rtredirect(struct sockaddr *dst, struct sockaddr *gateway, 461 struct sockaddr *netmask, int flags, struct sockaddr *src) 462 { 463 struct rt_addrinfo rtinfo; 464 int error; 465 struct netmsg_rtredirect msg; 466 467 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 468 0, rtredirect_msghandler); 469 msg.dst = dst; 470 msg.gateway = gateway; 471 msg.netmask = netmask; 472 msg.flags = flags; 473 msg.src = src; 474 error = netisr_domsg_global(&msg.base); 475 476 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 477 rtinfo.rti_info[RTAX_DST] = dst; 478 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 479 rtinfo.rti_info[RTAX_NETMASK] = netmask; 480 rtinfo.rti_info[RTAX_AUTHOR] = src; 481 rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error); 482 } 483 484 static void 485 rtredirect_msghandler(netmsg_t msg) 486 { 487 struct netmsg_rtredirect *rmsg = (void *)msg; 488 489 rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask, 490 rmsg->flags, rmsg->src); 491 netisr_forwardmsg(&msg->base, mycpuid + 1); 492 } 493 494 /* 495 * Routing table ioctl interface. 496 */ 497 int 498 rtioctl(u_long req, caddr_t data, struct ucred *cred) 499 { 500 #ifdef INET 501 /* Multicast goop, grrr... */ 502 return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP; 503 #else 504 return ENXIO; 505 #endif 506 } 507 508 struct ifaddr * 509 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway) 510 { 511 struct ifaddr *ifa; 512 513 if (!(flags & RTF_GATEWAY)) { 514 /* 515 * If we are adding a route to an interface, 516 * and the interface is a point-to-point link, 517 * we should search for the destination 518 * as our clue to the interface. Otherwise 519 * we can use the local address. 520 */ 521 ifa = NULL; 522 if (flags & RTF_HOST) { 523 ifa = ifa_ifwithdstaddr(dst); 524 } 525 if (ifa == NULL) 526 ifa = ifa_ifwithaddr(gateway); 527 } else { 528 /* 529 * If we are adding a route to a remote net 530 * or host, the gateway may still be on the 531 * other end of a pt to pt link. 532 */ 533 ifa = ifa_ifwithdstaddr(gateway); 534 } 535 if (ifa == NULL) 536 ifa = ifa_ifwithnet(gateway); 537 if (ifa == NULL) { 538 struct rtentry *rt; 539 540 rt = rtpurelookup(gateway); 541 if (rt == NULL) 542 return (NULL); 543 rt->rt_refcnt--; 544 if ((ifa = rt->rt_ifa) == NULL) 545 return (NULL); 546 } 547 if (ifa->ifa_addr->sa_family != dst->sa_family) { 548 struct ifaddr *oldifa = ifa; 549 550 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 551 if (ifa == NULL) 552 ifa = oldifa; 553 } 554 return (ifa); 555 } 556 557 static int rt_fixdelete (struct radix_node *, void *); 558 static int rt_fixchange (struct radix_node *, void *); 559 560 struct rtfc_arg { 561 struct rtentry *rt0; 562 struct radix_node_head *rnh; 563 }; 564 565 /* 566 * Set rtinfo->rti_ifa and rtinfo->rti_ifp. 567 * 568 * Assume that the caller did basic checks to ensure: 569 * - RTAX_DST exists 570 * - RTAX_GATEWAY exists if RTF_GATEWAY is set 571 */ 572 int 573 rt_getifa(struct rt_addrinfo *rtinfo) 574 { 575 struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY]; 576 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 577 struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA]; 578 int flags = rtinfo->rti_flags; 579 580 /* 581 * ifp may be specified by sockaddr_dl 582 * when protocol address is ambiguous. 583 */ 584 if (rtinfo->rti_ifp == NULL) { 585 struct sockaddr *ifpaddr; 586 587 /* 588 * If we have interface specified by RTAX_IFP address, 589 * try to use it. 590 */ 591 ifpaddr = rtinfo->rti_info[RTAX_IFP]; 592 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) { 593 struct ifaddr *ifa; 594 595 ifa = ifa_ifwithnet(ifpaddr); 596 if (ifa != NULL) 597 rtinfo->rti_ifp = ifa->ifa_ifp; 598 } 599 } 600 601 /* 602 * If we have source address specified, try to find it. 603 */ 604 if (rtinfo->rti_ifa == NULL && ifaaddr != NULL) 605 rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr); 606 if (rtinfo->rti_ifa == NULL) { 607 struct sockaddr *sa; 608 609 /* 610 * Most common use case for the userland-supplied routes. 611 * 612 * The IFA is determined by: 613 * + If ifp is set, try the followings in order: 614 * 1. IFA address 615 * 2. Gateway address 616 * Note: For interface routes link-level gateway address 617 * is specified to indicate the interface index 618 * without specifying RTF_GATEWAY. Ignore the 619 * gateway in this case. 620 * Note: The gateway may have different AF as the dst. 621 * Also ignore the gateway in this case. 622 * 3. Final destination 623 * 4. Try to get at least link-level IFA. 624 * Note: This allows to add directly-reachable interface 625 * prefix to an interface without any IP address. 626 * + Else: 627 * Try to lookup gateway or dst in the routing table to get 628 * the IFA. 629 */ 630 if (ifaaddr != NULL) 631 sa = ifaaddr; 632 else if ((flags & RTF_GATEWAY) != 0 && 633 gateway->sa_family == dst->sa_family) 634 sa = gateway; 635 else 636 sa = dst; 637 KKASSERT(sa != NULL); 638 639 if (rtinfo->rti_ifp != NULL) { 640 rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp); 641 if (rtinfo->rti_ifa == NULL && 642 gateway != NULL && gateway != sa) 643 rtinfo->rti_ifa = 644 ifaof_ifpforaddr(gateway, rtinfo->rti_ifp); 645 } else if (dst != NULL && gateway != NULL) { 646 rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway); 647 } else { 648 rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa); 649 } 650 } 651 if (rtinfo->rti_ifa == NULL) 652 return (ENETUNREACH); 653 654 if (rtinfo->rti_ifp == NULL) 655 rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp; 656 return (0); 657 } 658 659 /* 660 * Do appropriate manipulations of a routing tree given 661 * all the bits of info needed 662 */ 663 int 664 rtrequest( 665 int req, 666 struct sockaddr *dst, 667 struct sockaddr *gateway, 668 struct sockaddr *netmask, 669 int flags, 670 struct rtentry **ret_nrt) 671 { 672 struct rt_addrinfo rtinfo; 673 674 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 675 rtinfo.rti_info[RTAX_DST] = dst; 676 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 677 rtinfo.rti_info[RTAX_NETMASK] = netmask; 678 rtinfo.rti_flags = flags; 679 return rtrequest1(req, &rtinfo, ret_nrt); 680 } 681 682 int 683 rtrequest_global( 684 int req, 685 struct sockaddr *dst, 686 struct sockaddr *gateway, 687 struct sockaddr *netmask, 688 int flags) 689 { 690 struct rt_addrinfo rtinfo; 691 692 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 693 rtinfo.rti_info[RTAX_DST] = dst; 694 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 695 rtinfo.rti_info[RTAX_NETMASK] = netmask; 696 rtinfo.rti_flags = flags; 697 return rtrequest1_global(req, &rtinfo, NULL, NULL, RTREQ_PRIO_NORM); 698 } 699 700 struct netmsg_rtq { 701 struct netmsg_base base; 702 int req; 703 struct rt_addrinfo *rtinfo; 704 rtrequest1_callback_func_t callback; 705 void *arg; 706 }; 707 708 int 709 rtrequest1_global(int req, struct rt_addrinfo *rtinfo, 710 rtrequest1_callback_func_t callback, void *arg, boolean_t req_prio) 711 { 712 struct netmsg_rtq msg; 713 int flags = 0; 714 715 if (req_prio) 716 flags = MSGF_PRIORITY; 717 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags, 718 rtrequest1_msghandler); 719 msg.base.lmsg.ms_error = -1; 720 msg.req = req; 721 msg.rtinfo = rtinfo; 722 msg.callback = callback; 723 msg.arg = arg; 724 return (netisr_domsg_global(&msg.base)); 725 } 726 727 /* 728 * Handle a route table request on the current cpu. Since the route table's 729 * are supposed to be identical on each cpu, an error occuring later in the 730 * message chain is considered system-fatal. 731 */ 732 static void 733 rtrequest1_msghandler(netmsg_t msg) 734 { 735 struct netmsg_rtq *rmsg = (void *)msg; 736 struct rt_addrinfo rtinfo; 737 struct rtentry *rt = NULL; 738 int error; 739 740 /* 741 * Copy the rtinfo. We need to make sure that the original 742 * rtinfo, which is setup by the caller, in the netmsg will 743 * _not_ be changed; else the next CPU on the netmsg forwarding 744 * path will see a different rtinfo than what this CPU has seen. 745 */ 746 rtinfo = *rmsg->rtinfo; 747 748 error = rtrequest1(rmsg->req, &rtinfo, &rt); 749 if (rt) 750 --rt->rt_refcnt; 751 if (rmsg->callback) 752 rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg); 753 754 /* 755 * RTM_DELETE's are propogated even if an error occurs, since a 756 * cloned route might be undergoing deletion and cloned routes 757 * are not necessarily replicated. An overall error is returned 758 * only if no cpus have the route in question. 759 */ 760 if (rmsg->base.lmsg.ms_error < 0 || error == 0) 761 rmsg->base.lmsg.ms_error = error; 762 763 if (error && rmsg->req != RTM_DELETE) { 764 if (mycpuid != 0) { 765 panic("rtrequest1_msghandler: rtrequest table req %d, " 766 "failed on cpu%d, error %d\n", 767 rmsg->req, mycpuid, error); 768 } 769 netisr_replymsg(&rmsg->base, error); 770 } else { 771 netisr_forwardmsg_error(&rmsg->base, mycpuid + 1, 772 rmsg->base.lmsg.ms_error); 773 } 774 } 775 776 int 777 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt) 778 { 779 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 780 struct rtentry *rt; 781 struct radix_node *rn; 782 struct radix_node_head *rnh; 783 struct ifaddr *ifa; 784 struct sockaddr *ndst; 785 int error = 0; 786 787 ASSERT_NETISR_NCPUS(mycpuid); 788 789 #define gotoerr(x) { error = x ; goto bad; } 790 791 #ifdef ROUTE_DEBUG 792 if (route_debug) 793 rt_addrinfo_print(req, rtinfo); 794 #endif 795 796 crit_enter(); 797 /* 798 * Find the correct routing tree to use for this Address Family 799 */ 800 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL) 801 gotoerr(EAFNOSUPPORT); 802 803 /* 804 * If we are adding a host route then we don't want to put 805 * a netmask in the tree, nor do we want to clone it. 806 */ 807 if (rtinfo->rti_flags & RTF_HOST) { 808 rtinfo->rti_info[RTAX_NETMASK] = NULL; 809 rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING); 810 } 811 812 switch (req) { 813 case RTM_DELETE: 814 /* Remove the item from the tree. */ 815 rn = rnh->rnh_deladdr(rtinfo->rti_info[RTAX_DST], 816 rtinfo->rti_info[RTAX_NETMASK], rnh); 817 if (rn == NULL) 818 gotoerr(ESRCH); 819 KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)), 820 ("rnh_deladdr returned flags 0x%x", rn->rn_flags)); 821 rt = (struct rtentry *)rn; 822 823 /* ref to prevent a deletion race */ 824 ++rt->rt_refcnt; 825 826 /* Free any routes cloned from this one. */ 827 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) && 828 rt_mask(rt) != NULL) { 829 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), 830 rt_fixdelete, rt); 831 } 832 833 if (rt->rt_gwroute != NULL) { 834 RTFREE(rt->rt_gwroute); 835 rt->rt_gwroute = NULL; 836 } 837 838 /* 839 * NB: RTF_UP must be set during the search above, 840 * because we might delete the last ref, causing 841 * rt to get freed prematurely. 842 */ 843 rt->rt_flags &= ~RTF_UP; 844 845 #ifdef ROUTE_DEBUG 846 if (route_debug) 847 rt_print(rtinfo, rt); 848 #endif 849 850 /* Give the protocol a chance to keep things in sync. */ 851 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 852 ifa->ifa_rtrequest(RTM_DELETE, rt); 853 854 /* 855 * If the caller wants it, then it can have it, 856 * but it's up to it to free the rtentry as we won't be 857 * doing it. 858 */ 859 KASSERT(rt->rt_refcnt >= 0, 860 ("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt)); 861 if (ret_nrt != NULL) { 862 /* leave ref intact for return */ 863 *ret_nrt = rt; 864 } else { 865 /* deref / attempt to destroy */ 866 rtfree(rt); 867 } 868 break; 869 870 case RTM_RESOLVE: 871 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 872 gotoerr(EINVAL); 873 874 if (!(rt->rt_ifp->if_flags & IFF_UP)) 875 gotoerr(ENETDOWN); 876 877 KASSERT(rt->rt_cpuid == mycpuid, 878 ("rt resolve rt_cpuid %d, mycpuid %d", 879 rt->rt_cpuid, mycpuid)); 880 881 ifa = rt->rt_ifa; 882 rtinfo->rti_flags = 883 rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC); 884 rtinfo->rti_flags |= RTF_WASCLONED; 885 rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway; 886 if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL) 887 rtinfo->rti_flags |= RTF_HOST; 888 rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0]; 889 rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1]; 890 rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2]; 891 goto makeroute; 892 893 case RTM_ADD: 894 KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) || 895 rtinfo->rti_info[RTAX_GATEWAY] != NULL, 896 ("rtrequest: GATEWAY but no gateway")); 897 898 if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo))) 899 gotoerr(error); 900 ifa = rtinfo->rti_ifa; 901 makeroute: 902 R_Malloc(rt, struct rtentry *, sizeof(struct rtentry)); 903 if (rt == NULL) { 904 if (req == RTM_ADD) { 905 kprintf("rtrequest1: alloc rtentry failed on " 906 "cpu%d\n", mycpuid); 907 } 908 gotoerr(ENOBUFS); 909 } 910 bzero(rt, sizeof(struct rtentry)); 911 rt->rt_flags = RTF_UP | rtinfo->rti_flags; 912 rt->rt_cpuid = mycpuid; 913 914 error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY]); 915 if (error != 0) { 916 R_Free(rt); 917 gotoerr(error); 918 } 919 920 ndst = rt_key(rt); 921 if (rtinfo->rti_info[RTAX_NETMASK] != NULL) 922 rt_maskedcopy(dst, ndst, 923 rtinfo->rti_info[RTAX_NETMASK]); 924 else 925 bcopy(dst, ndst, dst->sa_len); 926 927 if (rtinfo->rti_info[RTAX_MPLS1] != NULL) 928 rt_setshims(rt, rtinfo->rti_info); 929 930 /* 931 * Note that we now have a reference to the ifa. 932 * This moved from below so that rnh->rnh_addaddr() can 933 * examine the ifa and ifa->ifa_ifp if it so desires. 934 */ 935 IFAREF(ifa); 936 rt->rt_ifa = ifa; 937 rt->rt_ifp = ifa->ifa_ifp; 938 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */ 939 940 rn = rnh->rnh_addaddr(ndst, rtinfo->rti_info[RTAX_NETMASK], 941 rnh, rt->rt_nodes); 942 if (rn == NULL) { 943 struct rtentry *oldrt; 944 945 /* 946 * We already have one of these in the tree. 947 * We do a special hack: if the old route was 948 * cloned, then we blow it away and try 949 * re-inserting the new one. 950 */ 951 oldrt = rtpurelookup(ndst); 952 if (oldrt != NULL) { 953 --oldrt->rt_refcnt; 954 if (oldrt->rt_flags & RTF_WASCLONED) { 955 rtrequest(RTM_DELETE, rt_key(oldrt), 956 oldrt->rt_gateway, 957 rt_mask(oldrt), 958 oldrt->rt_flags, NULL); 959 rn = rnh->rnh_addaddr(ndst, 960 rtinfo->rti_info[RTAX_NETMASK], 961 rnh, rt->rt_nodes); 962 } 963 } 964 } 965 /* NOTE: rt_ifa may have been changed */ 966 ifa = rt->rt_ifa; 967 968 /* 969 * If it still failed to go into the tree, 970 * then un-make it (this should be a function). 971 */ 972 if (rn == NULL) { 973 if (rt->rt_gwroute != NULL) 974 rtfree(rt->rt_gwroute); 975 IFAFREE(ifa); 976 R_Free(rt_key(rt)); 977 R_Free(rt); 978 gotoerr(EEXIST); 979 } 980 981 /* 982 * If we got here from RESOLVE, then we are cloning 983 * so clone the rest, and note that we 984 * are a clone (and increment the parent's references) 985 */ 986 if (req == RTM_RESOLVE) { 987 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 988 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */ 989 if ((*ret_nrt)->rt_flags & 990 (RTF_CLONING | RTF_PRCLONING)) { 991 rt->rt_parent = *ret_nrt; 992 (*ret_nrt)->rt_refcnt++; 993 } 994 } 995 996 /* 997 * if this protocol has something to add to this then 998 * allow it to do that as well. 999 */ 1000 if (ifa->ifa_rtrequest != NULL) 1001 ifa->ifa_rtrequest(req, rt); 1002 1003 /* 1004 * We repeat the same procedure from rt_setgate() here because 1005 * it doesn't fire when we call it there because the node 1006 * hasn't been added to the tree yet. 1007 */ 1008 if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) && 1009 rt_mask(rt) != NULL) { 1010 struct rtfc_arg arg = { rt, rnh }; 1011 1012 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), 1013 rt_fixchange, &arg); 1014 } 1015 1016 #ifdef ROUTE_DEBUG 1017 if (route_debug) 1018 rt_print(rtinfo, rt); 1019 #endif 1020 /* 1021 * Return the resulting rtentry, 1022 * increasing the number of references by one. 1023 */ 1024 if (ret_nrt != NULL) { 1025 rt->rt_refcnt++; 1026 *ret_nrt = rt; 1027 } 1028 break; 1029 case RTM_GET: 1030 /* Get the item from the tree. */ 1031 rn = rnh->rnh_lookup(rtinfo->rti_info[RTAX_DST], 1032 rtinfo->rti_info[RTAX_NETMASK], rnh); 1033 if (rn == NULL) 1034 gotoerr(ESRCH); 1035 if (ret_nrt != NULL) { 1036 rt = (struct rtentry *)rn; 1037 rt->rt_refcnt++; 1038 *ret_nrt = rt; 1039 } 1040 break; 1041 default: 1042 error = EOPNOTSUPP; 1043 } 1044 bad: 1045 #ifdef ROUTE_DEBUG 1046 if (route_debug) { 1047 if (error) 1048 kprintf("rti %p failed error %d\n", rtinfo, error); 1049 else 1050 kprintf("rti %p succeeded\n", rtinfo); 1051 } 1052 #endif 1053 crit_exit(); 1054 return (error); 1055 } 1056 1057 /* 1058 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family'' 1059 * (i.e., the routes related to it by the operation of cloning). This 1060 * routine is iterated over all potential former-child-routes by way of 1061 * rnh->rnh_walktree_from() above, and those that actually are children of 1062 * the late parent (passed in as VP here) are themselves deleted. 1063 */ 1064 static int 1065 rt_fixdelete(struct radix_node *rn, void *vp) 1066 { 1067 struct rtentry *rt = (struct rtentry *)rn; 1068 struct rtentry *rt0 = vp; 1069 1070 if (rt->rt_parent == rt0 && 1071 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) { 1072 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1073 rt->rt_flags, NULL); 1074 } 1075 return 0; 1076 } 1077 1078 /* 1079 * This routine is called from rt_setgate() to do the analogous thing for 1080 * adds and changes. There is the added complication in this case of a 1081 * middle insert; i.e., insertion of a new network route between an older 1082 * network route and (cloned) host routes. For this reason, a simple check 1083 * of rt->rt_parent is insufficient; each candidate route must be tested 1084 * against the (mask, value) of the new route (passed as before in vp) 1085 * to see if the new route matches it. 1086 * 1087 * XXX - it may be possible to do fixdelete() for changes and reserve this 1088 * routine just for adds. I'm not sure why I thought it was necessary to do 1089 * changes this way. 1090 */ 1091 #ifdef DEBUG 1092 static int rtfcdebug = 0; 1093 #endif 1094 1095 static int 1096 rt_fixchange(struct radix_node *rn, void *vp) 1097 { 1098 struct rtentry *rt = (struct rtentry *)rn; 1099 struct rtfc_arg *ap = vp; 1100 struct rtentry *rt0 = ap->rt0; 1101 struct radix_node_head *rnh = ap->rnh; 1102 u_char *xk1, *xm1, *xk2, *xmp; 1103 int i, len, mlen; 1104 1105 #ifdef DEBUG 1106 if (rtfcdebug) 1107 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0); 1108 #endif 1109 1110 if (rt->rt_parent == NULL || 1111 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) { 1112 #ifdef DEBUG 1113 if (rtfcdebug) kprintf("no parent, pinned or cloning\n"); 1114 #endif 1115 return 0; 1116 } 1117 1118 if (rt->rt_parent == rt0) { 1119 #ifdef DEBUG 1120 if (rtfcdebug) kprintf("parent match\n"); 1121 #endif 1122 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1123 rt->rt_flags, NULL); 1124 } 1125 1126 /* 1127 * There probably is a function somewhere which does this... 1128 * if not, there should be. 1129 */ 1130 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len); 1131 1132 xk1 = (u_char *)rt_key(rt0); 1133 xm1 = (u_char *)rt_mask(rt0); 1134 xk2 = (u_char *)rt_key(rt); 1135 1136 /* avoid applying a less specific route */ 1137 xmp = (u_char *)rt_mask(rt->rt_parent); 1138 mlen = rt_key(rt->rt_parent)->sa_len; 1139 if (mlen > rt_key(rt0)->sa_len) { 1140 #ifdef DEBUG 1141 if (rtfcdebug) 1142 kprintf("rt_fixchange: inserting a less " 1143 "specific route\n"); 1144 #endif 1145 return 0; 1146 } 1147 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) { 1148 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) { 1149 #ifdef DEBUG 1150 if (rtfcdebug) 1151 kprintf("rt_fixchange: inserting a less " 1152 "specific route\n"); 1153 #endif 1154 return 0; 1155 } 1156 } 1157 1158 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) { 1159 if ((xk2[i] & xm1[i]) != xk1[i]) { 1160 #ifdef DEBUG 1161 if (rtfcdebug) kprintf("no match\n"); 1162 #endif 1163 return 0; 1164 } 1165 } 1166 1167 /* 1168 * OK, this node is a clone, and matches the node currently being 1169 * changed/added under the node's mask. So, get rid of it. 1170 */ 1171 #ifdef DEBUG 1172 if (rtfcdebug) kprintf("deleting\n"); 1173 #endif 1174 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1175 rt->rt_flags, NULL); 1176 } 1177 1178 int 1179 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate) 1180 { 1181 char *space, *oldspace; 1182 int dlen = RT_ROUNDUP(dst->sa_len), glen = RT_ROUNDUP(gate->sa_len); 1183 struct rtentry *rt = rt0; 1184 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family]; 1185 1186 ASSERT_NETISR_NCPUS(mycpuid); 1187 1188 /* 1189 * A host route with the destination equal to the gateway 1190 * will interfere with keeping LLINFO in the routing 1191 * table, so disallow it. 1192 */ 1193 if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) == 1194 (RTF_HOST | RTF_GATEWAY)) && 1195 dst->sa_len == gate->sa_len && 1196 sa_equal(dst, gate)) { 1197 /* 1198 * The route might already exist if this is an RTM_CHANGE 1199 * or a routing redirect, so try to delete it. 1200 */ 1201 if (rt_key(rt0) != NULL) 1202 rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway, 1203 rt_mask(rt0), rt0->rt_flags, NULL); 1204 return EADDRNOTAVAIL; 1205 } 1206 1207 /* 1208 * Both dst and gateway are stored in the same malloc'ed chunk 1209 * (If I ever get my hands on....) 1210 * if we need to malloc a new chunk, then keep the old one around 1211 * till we don't need it any more. 1212 */ 1213 if (rt->rt_gateway == NULL || 1214 glen > RT_ROUNDUP(rt->rt_gateway->sa_len)) { 1215 oldspace = (char *)rt_key(rt); 1216 R_Malloc(space, char *, dlen + glen); 1217 if (space == NULL) 1218 return ENOBUFS; 1219 rt->rt_nodes->rn_key = space; 1220 } else { 1221 space = (char *)rt_key(rt); /* Just use the old space. */ 1222 oldspace = NULL; 1223 } 1224 1225 /* Set the gateway value. */ 1226 rt->rt_gateway = (struct sockaddr *)(space + dlen); 1227 bcopy(gate, rt->rt_gateway, glen); 1228 1229 if (oldspace != NULL) { 1230 /* 1231 * If we allocated a new chunk, preserve the original dst. 1232 * This way, rt_setgate() really just sets the gate 1233 * and leaves the dst field alone. 1234 */ 1235 bcopy(dst, space, dlen); 1236 R_Free(oldspace); 1237 } 1238 1239 /* 1240 * If there is already a gwroute, it's now almost definitely wrong 1241 * so drop it. 1242 */ 1243 if (rt->rt_gwroute != NULL) { 1244 RTFREE(rt->rt_gwroute); 1245 rt->rt_gwroute = NULL; 1246 } 1247 if (rt->rt_flags & RTF_GATEWAY) { 1248 /* 1249 * Cloning loop avoidance: In the presence of 1250 * protocol-cloning and bad configuration, it is 1251 * possible to get stuck in bottomless mutual recursion 1252 * (rtrequest rt_setgate rtlookup). We avoid this 1253 * by not allowing protocol-cloning to operate for 1254 * gateways (which is probably the correct choice 1255 * anyway), and avoid the resulting reference loops 1256 * by disallowing any route to run through itself as 1257 * a gateway. This is obviously mandatory when we 1258 * get rt->rt_output(). 1259 * 1260 * This breaks TTCP for hosts outside the gateway! XXX JH 1261 */ 1262 rt->rt_gwroute = _rtlookup(gate, RTF_PRCLONING); 1263 if (rt->rt_gwroute == rt) { 1264 rt->rt_gwroute = NULL; 1265 --rt->rt_refcnt; 1266 return EDQUOT; /* failure */ 1267 } 1268 } 1269 1270 /* 1271 * This isn't going to do anything useful for host routes, so 1272 * don't bother. Also make sure we have a reasonable mask 1273 * (we don't yet have one during adds). 1274 */ 1275 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) { 1276 struct rtfc_arg arg = { rt, rnh }; 1277 1278 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), 1279 rt_fixchange, &arg); 1280 } 1281 1282 return 0; 1283 } 1284 1285 static void 1286 rt_maskedcopy( 1287 struct sockaddr *src, 1288 struct sockaddr *dst, 1289 struct sockaddr *netmask) 1290 { 1291 u_char *cp1 = (u_char *)src; 1292 u_char *cp2 = (u_char *)dst; 1293 u_char *cp3 = (u_char *)netmask; 1294 u_char *cplim = cp2 + *cp3; 1295 u_char *cplim2 = cp2 + *cp1; 1296 1297 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1298 cp3 += 2; 1299 if (cplim > cplim2) 1300 cplim = cplim2; 1301 while (cp2 < cplim) 1302 *cp2++ = *cp1++ & *cp3++; 1303 if (cp2 < cplim2) 1304 bzero(cp2, cplim2 - cp2); 1305 } 1306 1307 int 1308 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt) 1309 { 1310 struct rtentry *up_rt, *rt; 1311 1312 ASSERT_NETISR_NCPUS(mycpuid); 1313 1314 if (!(rt0->rt_flags & RTF_UP)) { 1315 up_rt = rtlookup(dst); 1316 if (up_rt == NULL) 1317 return (EHOSTUNREACH); 1318 up_rt->rt_refcnt--; 1319 } else 1320 up_rt = rt0; 1321 if (up_rt->rt_flags & RTF_GATEWAY) { 1322 if (up_rt->rt_gwroute == NULL) { 1323 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway); 1324 if (up_rt->rt_gwroute == NULL) 1325 return (EHOSTUNREACH); 1326 } else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) { 1327 rtfree(up_rt->rt_gwroute); 1328 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway); 1329 if (up_rt->rt_gwroute == NULL) 1330 return (EHOSTUNREACH); 1331 } 1332 rt = up_rt->rt_gwroute; 1333 } else 1334 rt = up_rt; 1335 if (rt->rt_flags & RTF_REJECT && 1336 (rt->rt_rmx.rmx_expire == 0 || /* rt doesn't expire */ 1337 time_uptime < rt->rt_rmx.rmx_expire)) /* rt not expired */ 1338 return (rt->rt_flags & RTF_HOST ? EHOSTDOWN : EHOSTUNREACH); 1339 *drt = rt; 1340 return 0; 1341 } 1342 1343 struct rt_purgecloned_arg { 1344 struct ifnet *ifp; 1345 int family; 1346 }; 1347 1348 static int 1349 rt_purgecloned_callback(struct radix_node *rn, void *xap) 1350 { 1351 struct rtentry *rt = (struct rtentry *)rn; 1352 struct rt_purgecloned_arg *arg = xap; 1353 1354 if (rt->rt_ifp == arg->ifp && rt->rt_flags & RTF_WASCLONED) 1355 rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 0, NULL); 1356 return 0; 1357 } 1358 1359 void 1360 rt_purgecloned(struct ifnet *ifp, int af) 1361 { 1362 struct radix_node_head *rnh; 1363 struct rt_purgecloned_arg arg = { 1364 .ifp = ifp, 1365 .family = af, 1366 }; 1367 1368 ASSERT_NETISR0; 1369 1370 if ((rnh = rt_tables[mycpuid][af]) != NULL) 1371 rnh->rnh_walktree(rnh, rt_purgecloned_callback, &arg); 1372 } 1373 1374 static int 1375 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){ 1376 int i; 1377 1378 for (i=0; i<3; i++) { 1379 struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i]; 1380 int shimlen; 1381 1382 if (shim == NULL) 1383 break; 1384 1385 shimlen = RT_ROUNDUP(shim->sa_len); 1386 R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen); 1387 bcopy(shim, rt->rt_shim[i], shimlen); 1388 } 1389 1390 return 0; 1391 } 1392 1393 #ifdef ROUTE_DEBUG 1394 1395 /* 1396 * Print out a route table entry 1397 */ 1398 void 1399 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rt) 1400 { 1401 kprintf("rti %p cpu %d route %p flags %08lx: ", 1402 rtinfo, mycpuid, rt, rt->rt_flags); 1403 sockaddr_print(rt_key(rt)); 1404 kprintf(" mask "); 1405 sockaddr_print(rt_mask(rt)); 1406 kprintf(" gw "); 1407 sockaddr_print(rt->rt_gateway); 1408 kprintf(" ifp \"%s\"", rt->rt_ifp ? rt->rt_ifp->if_xname : "?"); 1409 kprintf(" ifa %p\n", rt->rt_ifa); 1410 } 1411 1412 void 1413 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti) 1414 { 1415 int didit = 0; 1416 int i; 1417 1418 #ifdef ROUTE_DEBUG 1419 if (cmd == RTM_DELETE && route_debug > 1) 1420 print_backtrace(-1); 1421 #endif 1422 1423 switch(cmd) { 1424 case RTM_ADD: 1425 kprintf("ADD "); 1426 break; 1427 case RTM_RESOLVE: 1428 kprintf("RES "); 1429 break; 1430 case RTM_DELETE: 1431 kprintf("DEL "); 1432 break; 1433 default: 1434 kprintf("C%02d ", cmd); 1435 break; 1436 } 1437 kprintf("rti %p cpu %d flags %08x ", rti, mycpuid, rti->rti_flags); 1438 for (i = 0; i < RTAX_MAX; ++i) { 1439 if (rti->rti_info[i] == NULL) 1440 continue; 1441 if (didit) 1442 kprintf(", "); 1443 switch (i) { 1444 case RTAX_DST: 1445 kprintf("(DST "); 1446 break; 1447 case RTAX_GATEWAY: 1448 kprintf("(GWY "); 1449 break; 1450 case RTAX_NETMASK: 1451 kprintf("(MSK "); 1452 break; 1453 case RTAX_GENMASK: 1454 kprintf("(GEN "); 1455 break; 1456 case RTAX_IFP: 1457 kprintf("(IFP "); 1458 break; 1459 case RTAX_IFA: 1460 kprintf("(IFA "); 1461 break; 1462 case RTAX_AUTHOR: 1463 kprintf("(AUT "); 1464 break; 1465 case RTAX_BRD: 1466 kprintf("(BRD "); 1467 break; 1468 default: 1469 kprintf("(?%02d ", i); 1470 break; 1471 } 1472 sockaddr_print(rti->rti_info[i]); 1473 kprintf(")"); 1474 didit = 1; 1475 } 1476 kprintf(" ifp \"%s\"", rti->rti_ifp ? rti->rti_ifp->if_xname : "?"); 1477 kprintf(" ifa %p\n", rti->rti_ifa); 1478 } 1479 1480 void 1481 sockaddr_print(const struct sockaddr *sa) 1482 { 1483 const struct sockaddr_in *sa4; 1484 const struct sockaddr_in6 *sa6; 1485 int len; 1486 int i; 1487 1488 if (sa == NULL) { 1489 kprintf("NULL"); 1490 return; 1491 } 1492 1493 switch (sa->sa_family) { 1494 case AF_INET: 1495 sa4 = (const struct sockaddr_in *)sa; 1496 kprintf("INET %d %d.%d.%d.%d", 1497 ntohs(sa4->sin_port), 1498 (ntohl(sa4->sin_addr.s_addr) >> 24) & 255, 1499 (ntohl(sa4->sin_addr.s_addr) >> 16) & 255, 1500 (ntohl(sa4->sin_addr.s_addr) >> 8) & 255, 1501 (ntohl(sa4->sin_addr.s_addr) >> 0) & 255 1502 ); 1503 break; 1504 case AF_INET6: 1505 sa6 = (const struct sockaddr_in6 *)sa; 1506 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x", 1507 ntohs(sa6->sin6_port), 1508 ntohs(sa6->sin6_addr.s6_addr16[0]), 1509 ntohs(sa6->sin6_addr.s6_addr16[1]), 1510 ntohs(sa6->sin6_addr.s6_addr16[2]), 1511 ntohs(sa6->sin6_addr.s6_addr16[3]), 1512 ntohs(sa6->sin6_addr.s6_addr16[4]), 1513 ntohs(sa6->sin6_addr.s6_addr16[5]), 1514 ntohs(sa6->sin6_addr.s6_addr16[6]), 1515 ntohs(sa6->sin6_addr.s6_addr16[7]) 1516 ); 1517 break; 1518 default: 1519 kprintf("AF%d ", sa->sa_family); 1520 len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]); 1521 while (len > 0 && sa->sa_data[len-1] == 0) 1522 --len; 1523 for (i = 0; i < len; ++i) { 1524 if (i) 1525 kprintf("."); 1526 kprintf("%d", (unsigned char)sa->sa_data[i]); 1527 } 1528 break; 1529 } 1530 } 1531 1532 #endif 1533 1534 /* 1535 * Set up a routing table entry, normally for an interface. 1536 */ 1537 int 1538 rtinit(struct ifaddr *ifa, int cmd, int flags) 1539 { 1540 struct sockaddr *dst, *deldst, *netmask; 1541 struct mbuf *m = NULL; 1542 struct radix_node_head *rnh; 1543 struct radix_node *rn; 1544 struct rt_addrinfo rtinfo; 1545 int error; 1546 1547 ASSERT_NETISR0; 1548 1549 if (flags & RTF_HOST) { 1550 dst = ifa->ifa_dstaddr; 1551 netmask = NULL; 1552 } else { 1553 dst = ifa->ifa_addr; 1554 netmask = ifa->ifa_netmask; 1555 } 1556 /* 1557 * If it's a delete, check that if it exists, it's on the correct 1558 * interface or we might scrub a route to another ifa which would 1559 * be confusing at best and possibly worse. 1560 */ 1561 if (cmd == RTM_DELETE) { 1562 /* 1563 * It's a delete, so it should already exist.. 1564 * If it's a net, mask off the host bits 1565 * (Assuming we have a mask) 1566 */ 1567 if (netmask != NULL) { 1568 m = m_get(M_NOWAIT, MT_SONAME); 1569 if (m == NULL) 1570 return (ENOBUFS); 1571 mbuftrackid(m, 34); 1572 deldst = mtod(m, struct sockaddr *); 1573 rt_maskedcopy(dst, deldst, netmask); 1574 dst = deldst; 1575 } 1576 /* 1577 * Look up an rtentry that is in the routing tree and 1578 * contains the correct info. 1579 */ 1580 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL || 1581 (rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL || 1582 ((struct rtentry *)rn)->rt_ifa != ifa || 1583 !sa_equal((const struct sockaddr *)rn->rn_key, dst)) { 1584 if (m != NULL) 1585 m_free(m); 1586 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); 1587 } 1588 /* XXX */ 1589 #if 0 1590 else { 1591 /* 1592 * One would think that as we are deleting, and we know 1593 * it doesn't exist, we could just return at this point 1594 * with an "ELSE" clause, but apparently not.. 1595 */ 1596 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); 1597 } 1598 #endif 1599 } 1600 /* 1601 * Do the actual request 1602 */ 1603 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1604 rtinfo.rti_info[RTAX_DST] = dst; 1605 rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1606 rtinfo.rti_info[RTAX_NETMASK] = netmask; 1607 rtinfo.rti_flags = flags | ifa->ifa_flags; 1608 rtinfo.rti_ifa = ifa; 1609 error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa, 1610 RTREQ_PRIO_HIGH); 1611 if (m != NULL) 1612 m_free(m); 1613 return (error); 1614 } 1615 1616 static void 1617 rtinit_rtrequest_callback(int cmd, int error, 1618 struct rt_addrinfo *rtinfo, struct rtentry *rt, 1619 void *arg) 1620 { 1621 struct ifaddr *ifa = arg; 1622 1623 if (error == 0 && rt) { 1624 if (mycpuid == 0) 1625 rt_newaddrmsg(cmd, ifa, error, rt); 1626 if (cmd == RTM_DELETE) { 1627 if (rt->rt_refcnt == 0) { 1628 ++rt->rt_refcnt; 1629 rtfree(rt); 1630 } 1631 } 1632 } 1633 } 1634 1635 struct netmsg_rts { 1636 struct netmsg_base base; 1637 int req; 1638 struct rt_addrinfo *rtinfo; 1639 rtsearch_callback_func_t callback; 1640 void *arg; 1641 boolean_t exact_match; 1642 int found_cnt; 1643 }; 1644 1645 int 1646 rtsearch_global(int req, struct rt_addrinfo *rtinfo, 1647 rtsearch_callback_func_t callback, void *arg, boolean_t exact_match, 1648 boolean_t req_prio) 1649 { 1650 struct netmsg_rts msg; 1651 int flags = 0; 1652 1653 if (req_prio) 1654 flags = MSGF_PRIORITY; 1655 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags, 1656 rtsearch_msghandler); 1657 msg.req = req; 1658 msg.rtinfo = rtinfo; 1659 msg.callback = callback; 1660 msg.arg = arg; 1661 msg.exact_match = exact_match; 1662 msg.found_cnt = 0; 1663 return (netisr_domsg_global(&msg.base)); 1664 } 1665 1666 static void 1667 rtsearch_msghandler(netmsg_t msg) 1668 { 1669 struct netmsg_rts *rmsg = (void *)msg; 1670 struct rt_addrinfo rtinfo; 1671 struct radix_node_head *rnh; 1672 struct rtentry *rt; 1673 int error; 1674 1675 ASSERT_NETISR_NCPUS(mycpuid); 1676 1677 /* 1678 * Copy the rtinfo. We need to make sure that the original 1679 * rtinfo, which is setup by the caller, in the netmsg will 1680 * _not_ be changed; else the next CPU on the netmsg forwarding 1681 * path will see a different rtinfo than what this CPU has seen. 1682 */ 1683 rtinfo = *rmsg->rtinfo; 1684 1685 /* 1686 * Find the correct routing tree to use for this Address Family 1687 */ 1688 if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) { 1689 if (mycpuid != 0) 1690 panic("partially initialized routing tables"); 1691 netisr_replymsg(&rmsg->base, EAFNOSUPPORT); 1692 return; 1693 } 1694 1695 /* 1696 * Correct rtinfo for the host route searching. 1697 */ 1698 if (rtinfo.rti_flags & RTF_HOST) { 1699 rtinfo.rti_netmask = NULL; 1700 rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING); 1701 } 1702 1703 rt = (struct rtentry *) 1704 rnh->rnh_lookup(rtinfo.rti_dst, rtinfo.rti_netmask, rnh); 1705 1706 /* 1707 * If we are asked to do the "exact match", we need to make sure 1708 * that host route searching got a host route while a network 1709 * route searching got a network route. 1710 */ 1711 if (rt != NULL && rmsg->exact_match && 1712 ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST)) 1713 rt = NULL; 1714 1715 if (rt == NULL) { 1716 /* 1717 * No matching routes have been found, don't count this 1718 * as a critical error (here, we set 'error' to 0), just 1719 * keep moving on, since at least prcloned routes are not 1720 * duplicated onto each CPU. 1721 */ 1722 error = 0; 1723 } else { 1724 rmsg->found_cnt++; 1725 1726 rt->rt_refcnt++; 1727 error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg, 1728 rmsg->found_cnt); 1729 rt->rt_refcnt--; 1730 1731 if (error == EJUSTRETURN) { 1732 netisr_replymsg(&rmsg->base, 0); 1733 return; 1734 } 1735 } 1736 1737 if (error) { 1738 KKASSERT(rmsg->found_cnt > 0); 1739 1740 /* 1741 * Under following cases, unrecoverable error has 1742 * not occured: 1743 * o Request is RTM_GET 1744 * o The first time that we find the route, but the 1745 * modification fails. 1746 */ 1747 if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) { 1748 panic("rtsearch_msghandler: unrecoverable error " 1749 "cpu %d", mycpuid); 1750 } 1751 netisr_replymsg(&rmsg->base, error); 1752 } else { 1753 if (rmsg->found_cnt == 0) { 1754 /* The requested route has not been seen ... */ 1755 error = ESRCH; 1756 } 1757 netisr_forwardmsg_error(&rmsg->base, mycpuid + 1, error); 1758 } 1759 } 1760 1761 int 1762 rtmask_add_global(struct sockaddr *mask, boolean_t req_prio) 1763 { 1764 struct netmsg_base msg; 1765 int flags = 0; 1766 1767 if (req_prio) 1768 flags = MSGF_PRIORITY; 1769 netmsg_init(&msg, NULL, &curthread->td_msgport, flags, 1770 rtmask_add_msghandler); 1771 msg.lmsg.u.ms_resultp = mask; 1772 1773 return (netisr_domsg_global(&msg)); 1774 } 1775 1776 struct sockaddr * 1777 _rtmask_lookup(struct sockaddr *mask, boolean_t search) 1778 { 1779 struct radix_node *n; 1780 1781 #define clen(s) (*(const u_char *)(s)) 1782 n = rn_addmask(mask, search, true, rn_cpumaskhead(mycpuid)); 1783 if (n != NULL && 1784 mask->sa_len >= clen(n->rn_key) && 1785 bcmp((const u_char *)mask + 1, 1786 n->rn_key + 1, clen(n->rn_key) - 1) == 0) { 1787 return __DECONST(struct sockaddr *, n->rn_key); 1788 } else { 1789 return NULL; 1790 } 1791 #undef clen 1792 } 1793 1794 static void 1795 rtmask_add_msghandler(netmsg_t msg) 1796 { 1797 struct sockaddr *mask = msg->lmsg.u.ms_resultp; 1798 1799 ASSERT_NETISR_NCPUS(mycpuid); 1800 1801 if (rtmask_lookup(mask) == NULL) { 1802 netisr_replymsg(&msg->base, ENOBUFS); 1803 return; 1804 } 1805 netisr_forwardmsg(&msg->base, mycpuid + 1); 1806 } 1807 1808 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */ 1809 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0); 1810 1811 struct rtchange_arg { 1812 struct ifaddr *old_ifa; 1813 struct ifaddr *new_ifa; 1814 struct rtentry *rt; 1815 int changed; 1816 }; 1817 1818 static void 1819 rtchange_ifa(struct rtentry *rt, struct rtchange_arg *ap) 1820 { 1821 if (rt->rt_ifa->ifa_rtrequest != NULL) 1822 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt); 1823 IFAFREE(rt->rt_ifa); 1824 1825 IFAREF(ap->new_ifa); 1826 rt->rt_ifa = ap->new_ifa; 1827 rt->rt_ifp = ap->new_ifa->ifa_ifp; 1828 if (rt->rt_ifa->ifa_rtrequest != NULL) 1829 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt); 1830 1831 ap->changed = 1; 1832 } 1833 1834 static int 1835 rtchange_callback(struct radix_node *rn, void *xap) 1836 { 1837 struct rtchange_arg *ap = xap; 1838 struct rtentry *rt = (struct rtentry *)rn; 1839 1840 if (rt->rt_ifa == ap->old_ifa) { 1841 if (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) { 1842 /* 1843 * We could saw the branch off when we are 1844 * still sitting on it, if the ifa_rtrequest 1845 * DEL/ADD are called directly from here. 1846 */ 1847 ap->rt = rt; 1848 return EJUSTRETURN; 1849 } 1850 rtchange_ifa(rt, ap); 1851 } 1852 return 0; 1853 } 1854 1855 struct netmsg_rtchange { 1856 struct netmsg_base base; 1857 struct ifaddr *old_ifa; 1858 struct ifaddr *new_ifa; 1859 int changed; 1860 }; 1861 1862 static void 1863 rtchange_dispatch(netmsg_t msg) 1864 { 1865 struct netmsg_rtchange *rmsg = (void *)msg; 1866 struct radix_node_head *rnh; 1867 struct rtchange_arg arg; 1868 int cpu; 1869 1870 cpu = mycpuid; 1871 ASSERT_NETISR_NCPUS(cpu); 1872 1873 memset(&arg, 0, sizeof(arg)); 1874 arg.old_ifa = rmsg->old_ifa; 1875 arg.new_ifa = rmsg->new_ifa; 1876 1877 rnh = rt_tables[cpu][AF_INET]; 1878 for (;;) { 1879 int error; 1880 1881 KKASSERT(arg.rt == NULL); 1882 error = rnh->rnh_walktree(rnh, rtchange_callback, &arg); 1883 if (arg.rt != NULL) { 1884 struct rtentry *rt; 1885 1886 rt = arg.rt; 1887 arg.rt = NULL; 1888 rtchange_ifa(rt, &arg); 1889 } else { 1890 break; 1891 } 1892 } 1893 if (arg.changed) 1894 rmsg->changed = 1; 1895 1896 netisr_forwardmsg(&rmsg->base, cpu + 1); 1897 } 1898 1899 int 1900 rtchange(struct ifaddr *old_ifa, struct ifaddr *new_ifa) 1901 { 1902 struct netmsg_rtchange msg; 1903 1904 /* 1905 * XXX individual requests are not independantly chained, 1906 * which means that the per-cpu route tables will not be 1907 * consistent in the middle of the operation. If routes 1908 * related to the interface are manipulated while we are 1909 * doing this the inconsistancy could trigger a panic. 1910 */ 1911 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1912 rtchange_dispatch); 1913 msg.old_ifa = old_ifa; 1914 msg.new_ifa = new_ifa; 1915 msg.changed = 0; 1916 netisr_domsg_global(&msg.base); 1917 1918 if (msg.changed) { 1919 old_ifa->ifa_flags &= ~IFA_ROUTE; 1920 new_ifa->ifa_flags |= IFA_ROUTE; 1921 return 0; 1922 } else { 1923 return ENOENT; 1924 } 1925 } 1926