1 /* 2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1980, 1986, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by the University of 48 * California, Berkeley and its contributors. 49 * 4. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)route.c 8.3 (Berkeley) 1/9/95 66 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $ 67 * $DragonFly: src/sys/net/route.c,v 1.41 2008/11/09 10:50:15 sephe Exp $ 68 */ 69 70 #include "opt_inet.h" 71 #include "opt_mpls.h" 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/malloc.h> 76 #include <sys/mbuf.h> 77 #include <sys/socket.h> 78 #include <sys/domain.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/globaldata.h> 82 #include <sys/thread.h> 83 84 #include <net/if.h> 85 #include <net/route.h> 86 #include <net/netisr.h> 87 88 #include <netinet/in.h> 89 #include <net/ip_mroute/ip_mroute.h> 90 91 #include <sys/thread2.h> 92 #include <sys/msgport2.h> 93 #include <sys/mplock2.h> 94 #include <net/netmsg2.h> 95 96 #ifdef MPLS 97 #include <netproto/mpls/mpls.h> 98 #endif 99 100 static struct rtstatistics rtstatistics_percpu[MAXCPU]; 101 #ifdef SMP 102 #define rtstat rtstatistics_percpu[mycpuid] 103 #else 104 #define rtstat rtstatistics_percpu[0] 105 #endif 106 107 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1]; 108 struct lwkt_port *rt_ports[MAXCPU]; 109 110 static void rt_maskedcopy (struct sockaddr *, struct sockaddr *, 111 struct sockaddr *); 112 static void rtable_init(void); 113 static void rtable_service_loop(void *dummy); 114 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *, 115 struct rtentry *, void *); 116 117 #ifdef SMP 118 static void rtredirect_msghandler(struct netmsg *netmsg); 119 static void rtrequest1_msghandler(struct netmsg *netmsg); 120 #endif 121 static void rtsearch_msghandler(struct netmsg *netmsg); 122 123 static void rtmask_add_msghandler(struct netmsg *netmsg); 124 125 static int rt_setshims(struct rtentry *, struct sockaddr **); 126 127 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing"); 128 129 #ifdef ROUTE_DEBUG 130 static int route_debug = 1; 131 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW, 132 &route_debug, 0, ""); 133 #endif 134 135 int route_assert_owner_access = 0; 136 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW, 137 &route_assert_owner_access, 0, ""); 138 139 /* 140 * Initialize the route table(s) for protocol domains and 141 * create a helper thread which will be responsible for updating 142 * route table entries on each cpu. 143 */ 144 void 145 route_init(void) 146 { 147 int cpu; 148 thread_t rtd; 149 150 for (cpu = 0; cpu < ncpus; ++cpu) 151 bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics)); 152 rn_init(); /* initialize all zeroes, all ones, mask table */ 153 rtable_init(); /* call dom_rtattach() on each cpu */ 154 155 for (cpu = 0; cpu < ncpus; cpu++) { 156 lwkt_create(rtable_service_loop, NULL, &rtd, NULL, 157 0, cpu, "rtable_cpu %d", cpu); 158 rt_ports[cpu] = &rtd->td_msgport; 159 } 160 } 161 162 static void 163 rtable_init_oncpu(struct netmsg *nmsg) 164 { 165 struct domain *dom; 166 int cpu = mycpuid; 167 168 SLIST_FOREACH(dom, &domains, dom_next) { 169 if (dom->dom_rtattach) { 170 dom->dom_rtattach( 171 (void **)&rt_tables[cpu][dom->dom_family], 172 dom->dom_rtoffset); 173 } 174 } 175 ifnet_forwardmsg(&nmsg->nm_lmsg, cpu + 1); 176 } 177 178 static void 179 rtable_init(void) 180 { 181 struct netmsg nmsg; 182 183 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 184 0, rtable_init_oncpu); 185 ifnet_domsg(&nmsg.nm_lmsg, 0); 186 } 187 188 /* 189 * Our per-cpu table management protocol thread. All route table operations 190 * are sequentially chained through all cpus starting at cpu #0 in order to 191 * maintain duplicate route tables on each cpu. Having a spearate route 192 * table management thread allows the protocol and interrupt threads to 193 * issue route table changes. 194 */ 195 static void 196 rtable_service_loop(void *dummy __unused) 197 { 198 struct netmsg *netmsg; 199 thread_t td = curthread; 200 201 get_mplock(); /* XXX is this mpsafe yet? */ 202 203 while ((netmsg = lwkt_waitport(&td->td_msgport, 0)) != NULL) { 204 netmsg->nm_dispatch(netmsg); 205 } 206 } 207 208 /* 209 * Routing statistics. 210 */ 211 #ifdef SMP 212 static int 213 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS) 214 { 215 int cpu, error = 0; 216 217 for (cpu = 0; cpu < ncpus; ++cpu) { 218 if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu], 219 sizeof(struct rtstatistics)))) 220 break; 221 if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu], 222 sizeof(struct rtstatistics)))) 223 break; 224 } 225 226 return (error); 227 } 228 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW), 229 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics"); 230 #else 231 SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics, 232 "Routing statistics"); 233 #endif 234 235 /* 236 * Packet routing routines. 237 */ 238 239 /* 240 * Look up and fill in the "ro_rt" rtentry field in a route structure given 241 * an address in the "ro_dst" field. Always send a report on a miss and 242 * always clone routes. 243 */ 244 void 245 rtalloc(struct route *ro) 246 { 247 rtalloc_ign(ro, 0UL); 248 } 249 250 /* 251 * Look up and fill in the "ro_rt" rtentry field in a route structure given 252 * an address in the "ro_dst" field. Always send a report on a miss and 253 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being 254 * ignored. 255 */ 256 void 257 rtalloc_ign(struct route *ro, u_long ignoreflags) 258 { 259 if (ro->ro_rt != NULL) { 260 if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP) 261 return; 262 rtfree(ro->ro_rt); 263 ro->ro_rt = NULL; 264 } 265 ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags); 266 } 267 268 /* 269 * Look up the route that matches the given "dst" address. 270 * 271 * Route lookup can have the side-effect of creating and returning 272 * a cloned route instead when "dst" matches a cloning route and the 273 * RTF_CLONING and RTF_PRCLONING flags are not being ignored. 274 * 275 * Any route returned has its reference count incremented. 276 */ 277 struct rtentry * 278 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore) 279 { 280 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family]; 281 struct rtentry *rt; 282 283 if (rnh == NULL) 284 goto unreach; 285 286 /* 287 * Look up route in the radix tree. 288 */ 289 rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh); 290 if (rt == NULL) 291 goto unreach; 292 293 /* 294 * Handle cloning routes. 295 */ 296 if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) { 297 struct rtentry *clonedroute; 298 int error; 299 300 clonedroute = rt; /* copy in/copy out parameter */ 301 error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0, 302 &clonedroute); /* clone the route */ 303 if (error != 0) { /* cloning failed */ 304 if (generate_report) 305 rt_dstmsg(RTM_MISS, dst, error); 306 rt->rt_refcnt++; 307 return (rt); /* return the uncloned route */ 308 } 309 if (generate_report) { 310 if (clonedroute->rt_flags & RTF_XRESOLVE) 311 rt_dstmsg(RTM_RESOLVE, dst, 0); 312 else 313 rt_rtmsg(RTM_ADD, clonedroute, 314 clonedroute->rt_ifp, 0); 315 } 316 return (clonedroute); /* return cloned route */ 317 } 318 319 /* 320 * Increment the reference count of the matched route and return. 321 */ 322 rt->rt_refcnt++; 323 return (rt); 324 325 unreach: 326 rtstat.rts_unreach++; 327 if (generate_report) 328 rt_dstmsg(RTM_MISS, dst, 0); 329 return (NULL); 330 } 331 332 void 333 rtfree(struct rtentry *rt) 334 { 335 if (rt->rt_cpuid == mycpuid) 336 rtfree_oncpu(rt); 337 else 338 rtfree_remote(rt, 1); 339 } 340 341 void 342 rtfree_oncpu(struct rtentry *rt) 343 { 344 KKASSERT(rt->rt_cpuid == mycpuid); 345 KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt)); 346 347 --rt->rt_refcnt; 348 if (rt->rt_refcnt == 0) { 349 struct radix_node_head *rnh = 350 rt_tables[mycpuid][rt_key(rt)->sa_family]; 351 352 if (rnh->rnh_close) 353 rnh->rnh_close((struct radix_node *)rt, rnh); 354 if (!(rt->rt_flags & RTF_UP)) { 355 /* deallocate route */ 356 if (rt->rt_ifa != NULL) 357 IFAFREE(rt->rt_ifa); 358 if (rt->rt_parent != NULL) 359 RTFREE(rt->rt_parent); /* recursive call! */ 360 Free(rt_key(rt)); 361 Free(rt); 362 } 363 } 364 } 365 366 static void 367 rtfree_remote_dispatch(struct netmsg *nmsg) 368 { 369 struct lwkt_msg *lmsg = &nmsg->nm_lmsg; 370 struct rtentry *rt = lmsg->u.ms_resultp; 371 372 rtfree_oncpu(rt); 373 lwkt_replymsg(lmsg, 0); 374 } 375 376 void 377 rtfree_remote(struct rtentry *rt, int allow_panic) 378 { 379 struct netmsg nmsg; 380 struct lwkt_msg *lmsg; 381 382 KKASSERT(rt->rt_cpuid != mycpuid); 383 384 if (route_assert_owner_access && allow_panic) { 385 panic("rt remote free rt_cpuid %d, mycpuid %d\n", 386 rt->rt_cpuid, mycpuid); 387 } else { 388 kprintf("rt remote free rt_cpuid %d, mycpuid %d\n", 389 rt->rt_cpuid, mycpuid); 390 print_backtrace(-1); 391 } 392 393 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 394 0, rtfree_remote_dispatch); 395 lmsg = &nmsg.nm_lmsg; 396 lmsg->u.ms_resultp = rt; 397 398 lwkt_domsg(rtable_portfn(rt->rt_cpuid), lmsg, 0); 399 } 400 401 static int 402 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway, 403 struct sockaddr *netmask, int flags, struct sockaddr *src) 404 { 405 struct rtentry *rt = NULL; 406 struct rt_addrinfo rtinfo; 407 struct ifaddr *ifa; 408 u_long *stat = NULL; 409 int error; 410 411 /* verify the gateway is directly reachable */ 412 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 413 error = ENETUNREACH; 414 goto out; 415 } 416 417 /* 418 * If the redirect isn't from our current router for this destination, 419 * it's either old or wrong. 420 */ 421 if (!(flags & RTF_DONE) && /* XXX JH */ 422 (rt = rtpurelookup(dst)) != NULL && 423 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) { 424 error = EINVAL; 425 goto done; 426 } 427 428 /* 429 * If it redirects us to ourselves, we have a routing loop, 430 * perhaps as a result of an interface going down recently. 431 */ 432 if (ifa_ifwithaddr(gateway)) { 433 error = EHOSTUNREACH; 434 goto done; 435 } 436 437 /* 438 * Create a new entry if the lookup failed or if we got back 439 * a wildcard entry for the default route. This is necessary 440 * for hosts which use routing redirects generated by smart 441 * gateways to dynamically build the routing tables. 442 */ 443 if (rt == NULL) 444 goto create; 445 if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) { 446 rtfree(rt); 447 goto create; 448 } 449 450 /* Ignore redirects for directly connected hosts. */ 451 if (!(rt->rt_flags & RTF_GATEWAY)) { 452 error = EHOSTUNREACH; 453 goto done; 454 } 455 456 if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) { 457 /* 458 * Changing from a network route to a host route. 459 * Create a new host route rather than smashing the 460 * network route. 461 */ 462 create: 463 flags |= RTF_GATEWAY | RTF_DYNAMIC; 464 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 465 rtinfo.rti_info[RTAX_DST] = dst; 466 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 467 rtinfo.rti_info[RTAX_NETMASK] = netmask; 468 rtinfo.rti_flags = flags; 469 rtinfo.rti_ifa = ifa; 470 rt = NULL; /* copy-in/copy-out parameter */ 471 error = rtrequest1(RTM_ADD, &rtinfo, &rt); 472 if (rt != NULL) 473 flags = rt->rt_flags; 474 stat = &rtstat.rts_dynamic; 475 } else { 476 /* 477 * Smash the current notion of the gateway to this destination. 478 * Should check about netmask!!! 479 */ 480 rt->rt_flags |= RTF_MODIFIED; 481 flags |= RTF_MODIFIED; 482 483 /* We only need to report rtmsg on CPU0 */ 484 rt_setgate(rt, rt_key(rt), gateway, 485 mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT); 486 error = 0; 487 stat = &rtstat.rts_newgateway; 488 } 489 490 done: 491 if (rt != NULL) 492 rtfree(rt); 493 out: 494 if (error != 0) 495 rtstat.rts_badredirect++; 496 else if (stat != NULL) 497 (*stat)++; 498 499 return error; 500 } 501 502 #ifdef SMP 503 504 struct netmsg_rtredirect { 505 struct netmsg netmsg; 506 struct sockaddr *dst; 507 struct sockaddr *gateway; 508 struct sockaddr *netmask; 509 int flags; 510 struct sockaddr *src; 511 }; 512 513 #endif 514 515 /* 516 * Force a routing table entry to the specified 517 * destination to go through the given gateway. 518 * Normally called as a result of a routing redirect 519 * message from the network layer. 520 * 521 * N.B.: must be called at splnet 522 */ 523 void 524 rtredirect(struct sockaddr *dst, struct sockaddr *gateway, 525 struct sockaddr *netmask, int flags, struct sockaddr *src) 526 { 527 struct rt_addrinfo rtinfo; 528 int error; 529 #ifdef SMP 530 struct netmsg_rtredirect msg; 531 532 netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport, 533 0, rtredirect_msghandler); 534 msg.dst = dst; 535 msg.gateway = gateway; 536 msg.netmask = netmask; 537 msg.flags = flags; 538 msg.src = src; 539 error = lwkt_domsg(rtable_portfn(0), &msg.netmsg.nm_lmsg, 0); 540 #else 541 error = rtredirect_oncpu(dst, gateway, netmask, flags, src); 542 #endif 543 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 544 rtinfo.rti_info[RTAX_DST] = dst; 545 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 546 rtinfo.rti_info[RTAX_NETMASK] = netmask; 547 rtinfo.rti_info[RTAX_AUTHOR] = src; 548 rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error); 549 } 550 551 #ifdef SMP 552 553 static void 554 rtredirect_msghandler(struct netmsg *netmsg) 555 { 556 struct netmsg_rtredirect *msg = (void *)netmsg; 557 int nextcpu; 558 559 rtredirect_oncpu(msg->dst, msg->gateway, msg->netmask, 560 msg->flags, msg->src); 561 nextcpu = mycpuid + 1; 562 if (nextcpu < ncpus) 563 lwkt_forwardmsg(rtable_portfn(nextcpu), &netmsg->nm_lmsg); 564 else 565 lwkt_replymsg(&netmsg->nm_lmsg, 0); 566 } 567 568 #endif 569 570 /* 571 * Routing table ioctl interface. 572 */ 573 int 574 rtioctl(u_long req, caddr_t data, struct ucred *cred) 575 { 576 #ifdef INET 577 /* Multicast goop, grrr... */ 578 return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP; 579 #else 580 return ENXIO; 581 #endif 582 } 583 584 struct ifaddr * 585 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway) 586 { 587 struct ifaddr *ifa; 588 589 if (!(flags & RTF_GATEWAY)) { 590 /* 591 * If we are adding a route to an interface, 592 * and the interface is a point-to-point link, 593 * we should search for the destination 594 * as our clue to the interface. Otherwise 595 * we can use the local address. 596 */ 597 ifa = NULL; 598 if (flags & RTF_HOST) { 599 ifa = ifa_ifwithdstaddr(dst); 600 } 601 if (ifa == NULL) 602 ifa = ifa_ifwithaddr(gateway); 603 } else { 604 /* 605 * If we are adding a route to a remote net 606 * or host, the gateway may still be on the 607 * other end of a pt to pt link. 608 */ 609 ifa = ifa_ifwithdstaddr(gateway); 610 } 611 if (ifa == NULL) 612 ifa = ifa_ifwithnet(gateway); 613 if (ifa == NULL) { 614 struct rtentry *rt; 615 616 rt = rtpurelookup(gateway); 617 if (rt == NULL) 618 return (NULL); 619 rt->rt_refcnt--; 620 if ((ifa = rt->rt_ifa) == NULL) 621 return (NULL); 622 } 623 if (ifa->ifa_addr->sa_family != dst->sa_family) { 624 struct ifaddr *oldifa = ifa; 625 626 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 627 if (ifa == NULL) 628 ifa = oldifa; 629 } 630 return (ifa); 631 } 632 633 static int rt_fixdelete (struct radix_node *, void *); 634 static int rt_fixchange (struct radix_node *, void *); 635 636 struct rtfc_arg { 637 struct rtentry *rt0; 638 struct radix_node_head *rnh; 639 }; 640 641 /* 642 * Set rtinfo->rti_ifa and rtinfo->rti_ifp. 643 */ 644 int 645 rt_getifa(struct rt_addrinfo *rtinfo) 646 { 647 struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY]; 648 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 649 struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA]; 650 int flags = rtinfo->rti_flags; 651 652 /* 653 * ifp may be specified by sockaddr_dl 654 * when protocol address is ambiguous. 655 */ 656 if (rtinfo->rti_ifp == NULL) { 657 struct sockaddr *ifpaddr; 658 659 ifpaddr = rtinfo->rti_info[RTAX_IFP]; 660 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) { 661 struct ifaddr *ifa; 662 663 ifa = ifa_ifwithnet(ifpaddr); 664 if (ifa != NULL) 665 rtinfo->rti_ifp = ifa->ifa_ifp; 666 } 667 } 668 669 if (rtinfo->rti_ifa == NULL && ifaaddr != NULL) 670 rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr); 671 if (rtinfo->rti_ifa == NULL) { 672 struct sockaddr *sa; 673 674 sa = ifaaddr != NULL ? ifaaddr : 675 (gateway != NULL ? gateway : dst); 676 if (sa != NULL && rtinfo->rti_ifp != NULL) 677 rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp); 678 else if (dst != NULL && gateway != NULL) 679 rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway); 680 else if (sa != NULL) 681 rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa); 682 } 683 if (rtinfo->rti_ifa == NULL) 684 return (ENETUNREACH); 685 686 if (rtinfo->rti_ifp == NULL) 687 rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp; 688 return (0); 689 } 690 691 /* 692 * Do appropriate manipulations of a routing tree given 693 * all the bits of info needed 694 */ 695 int 696 rtrequest( 697 int req, 698 struct sockaddr *dst, 699 struct sockaddr *gateway, 700 struct sockaddr *netmask, 701 int flags, 702 struct rtentry **ret_nrt) 703 { 704 struct rt_addrinfo rtinfo; 705 706 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 707 rtinfo.rti_info[RTAX_DST] = dst; 708 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 709 rtinfo.rti_info[RTAX_NETMASK] = netmask; 710 rtinfo.rti_flags = flags; 711 return rtrequest1(req, &rtinfo, ret_nrt); 712 } 713 714 int 715 rtrequest_global( 716 int req, 717 struct sockaddr *dst, 718 struct sockaddr *gateway, 719 struct sockaddr *netmask, 720 int flags) 721 { 722 struct rt_addrinfo rtinfo; 723 724 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 725 rtinfo.rti_info[RTAX_DST] = dst; 726 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 727 rtinfo.rti_info[RTAX_NETMASK] = netmask; 728 rtinfo.rti_flags = flags; 729 return rtrequest1_global(req, &rtinfo, NULL, NULL); 730 } 731 732 #ifdef SMP 733 734 struct netmsg_rtq { 735 struct netmsg netmsg; 736 int req; 737 struct rt_addrinfo *rtinfo; 738 rtrequest1_callback_func_t callback; 739 void *arg; 740 }; 741 742 #endif 743 744 int 745 rtrequest1_global(int req, struct rt_addrinfo *rtinfo, 746 rtrequest1_callback_func_t callback, void *arg) 747 { 748 int error; 749 #ifdef SMP 750 struct netmsg_rtq msg; 751 752 netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport, 753 0, rtrequest1_msghandler); 754 msg.netmsg.nm_lmsg.ms_error = -1; 755 msg.req = req; 756 msg.rtinfo = rtinfo; 757 msg.callback = callback; 758 msg.arg = arg; 759 error = lwkt_domsg(rtable_portfn(0), &msg.netmsg.nm_lmsg, 0); 760 #else 761 struct rtentry *rt = NULL; 762 763 error = rtrequest1(req, rtinfo, &rt); 764 if (rt) 765 --rt->rt_refcnt; 766 if (callback) 767 callback(req, error, rtinfo, rt, arg); 768 #endif 769 return (error); 770 } 771 772 /* 773 * Handle a route table request on the current cpu. Since the route table's 774 * are supposed to be identical on each cpu, an error occuring later in the 775 * message chain is considered system-fatal. 776 */ 777 #ifdef SMP 778 779 static void 780 rtrequest1_msghandler(struct netmsg *netmsg) 781 { 782 struct netmsg_rtq *msg = (void *)netmsg; 783 struct rt_addrinfo rtinfo; 784 struct rtentry *rt = NULL; 785 int nextcpu; 786 int error; 787 788 /* 789 * Copy the rtinfo. We need to make sure that the original 790 * rtinfo, which is setup by the caller, in the netmsg will 791 * _not_ be changed; else the next CPU on the netmsg forwarding 792 * path will see a different rtinfo than what this CPU has seen. 793 */ 794 rtinfo = *msg->rtinfo; 795 796 error = rtrequest1(msg->req, &rtinfo, &rt); 797 if (rt) 798 --rt->rt_refcnt; 799 if (msg->callback) 800 msg->callback(msg->req, error, &rtinfo, rt, msg->arg); 801 802 /* 803 * RTM_DELETE's are propogated even if an error occurs, since a 804 * cloned route might be undergoing deletion and cloned routes 805 * are not necessarily replicated. An overall error is returned 806 * only if no cpus have the route in question. 807 */ 808 if (msg->netmsg.nm_lmsg.ms_error < 0 || error == 0) 809 msg->netmsg.nm_lmsg.ms_error = error; 810 811 nextcpu = mycpuid + 1; 812 if (error && msg->req != RTM_DELETE) { 813 if (mycpuid != 0) { 814 panic("rtrequest1_msghandler: rtrequest table " 815 "error was not on cpu #0"); 816 } 817 lwkt_replymsg(&msg->netmsg.nm_lmsg, error); 818 } else if (nextcpu < ncpus) { 819 lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->netmsg.nm_lmsg); 820 } else { 821 lwkt_replymsg(&msg->netmsg.nm_lmsg, 822 msg->netmsg.nm_lmsg.ms_error); 823 } 824 } 825 826 #endif 827 828 int 829 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt) 830 { 831 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 832 struct rtentry *rt; 833 struct radix_node *rn; 834 struct radix_node_head *rnh; 835 struct ifaddr *ifa; 836 struct sockaddr *ndst; 837 boolean_t reportmsg; 838 int error = 0; 839 840 #define gotoerr(x) { error = x ; goto bad; } 841 842 #ifdef ROUTE_DEBUG 843 if (route_debug) 844 rt_addrinfo_print(req, rtinfo); 845 #endif 846 847 crit_enter(); 848 /* 849 * Find the correct routing tree to use for this Address Family 850 */ 851 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL) 852 gotoerr(EAFNOSUPPORT); 853 854 /* 855 * If we are adding a host route then we don't want to put 856 * a netmask in the tree, nor do we want to clone it. 857 */ 858 if (rtinfo->rti_flags & RTF_HOST) { 859 rtinfo->rti_info[RTAX_NETMASK] = NULL; 860 rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING); 861 } 862 863 switch (req) { 864 case RTM_DELETE: 865 /* Remove the item from the tree. */ 866 rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST], 867 (char *)rtinfo->rti_info[RTAX_NETMASK], 868 rnh); 869 if (rn == NULL) 870 gotoerr(ESRCH); 871 KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)), 872 ("rnh_deladdr returned flags 0x%x", rn->rn_flags)); 873 rt = (struct rtentry *)rn; 874 875 /* ref to prevent a deletion race */ 876 ++rt->rt_refcnt; 877 878 /* Free any routes cloned from this one. */ 879 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) && 880 rt_mask(rt) != NULL) { 881 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt), 882 (char *)rt_mask(rt), 883 rt_fixdelete, rt); 884 } 885 886 if (rt->rt_gwroute != NULL) { 887 RTFREE(rt->rt_gwroute); 888 rt->rt_gwroute = NULL; 889 } 890 891 /* 892 * NB: RTF_UP must be set during the search above, 893 * because we might delete the last ref, causing 894 * rt to get freed prematurely. 895 */ 896 rt->rt_flags &= ~RTF_UP; 897 898 #ifdef ROUTE_DEBUG 899 if (route_debug) 900 rt_print(rtinfo, rt); 901 #endif 902 903 /* Give the protocol a chance to keep things in sync. */ 904 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 905 ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo); 906 907 /* 908 * If the caller wants it, then it can have it, 909 * but it's up to it to free the rtentry as we won't be 910 * doing it. 911 */ 912 KASSERT(rt->rt_refcnt >= 0, 913 ("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt)); 914 if (ret_nrt != NULL) { 915 /* leave ref intact for return */ 916 *ret_nrt = rt; 917 } else { 918 /* deref / attempt to destroy */ 919 rtfree(rt); 920 } 921 break; 922 923 case RTM_RESOLVE: 924 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 925 gotoerr(EINVAL); 926 ifa = rt->rt_ifa; 927 rtinfo->rti_flags = 928 rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC); 929 rtinfo->rti_flags |= RTF_WASCLONED; 930 rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway; 931 if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL) 932 rtinfo->rti_flags |= RTF_HOST; 933 rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0]; 934 rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1]; 935 rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2]; 936 goto makeroute; 937 938 case RTM_ADD: 939 KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) || 940 rtinfo->rti_info[RTAX_GATEWAY] != NULL, 941 ("rtrequest: GATEWAY but no gateway")); 942 943 if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo))) 944 gotoerr(error); 945 ifa = rtinfo->rti_ifa; 946 makeroute: 947 R_Malloc(rt, struct rtentry *, sizeof(struct rtentry)); 948 if (rt == NULL) 949 gotoerr(ENOBUFS); 950 bzero(rt, sizeof(struct rtentry)); 951 rt->rt_flags = RTF_UP | rtinfo->rti_flags; 952 rt->rt_cpuid = mycpuid; 953 954 if (mycpuid != 0 && req == RTM_ADD) { 955 /* For RTM_ADD, we have already sent rtmsg on CPU0. */ 956 reportmsg = RTL_DONTREPORT; 957 } else { 958 /* 959 * For RTM_ADD, we only send rtmsg on CPU0. 960 * For RTM_RESOLVE, we always send rtmsg. XXX 961 */ 962 reportmsg = RTL_REPORTMSG; 963 } 964 error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY], 965 reportmsg); 966 if (error != 0) { 967 Free(rt); 968 gotoerr(error); 969 } 970 971 ndst = rt_key(rt); 972 if (rtinfo->rti_info[RTAX_NETMASK] != NULL) 973 rt_maskedcopy(dst, ndst, 974 rtinfo->rti_info[RTAX_NETMASK]); 975 else 976 bcopy(dst, ndst, dst->sa_len); 977 978 if (rtinfo->rti_info[RTAX_MPLS1] != NULL) 979 rt_setshims(rt, rtinfo->rti_info); 980 981 /* 982 * Note that we now have a reference to the ifa. 983 * This moved from below so that rnh->rnh_addaddr() can 984 * examine the ifa and ifa->ifa_ifp if it so desires. 985 */ 986 IFAREF(ifa); 987 rt->rt_ifa = ifa; 988 rt->rt_ifp = ifa->ifa_ifp; 989 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */ 990 991 rn = rnh->rnh_addaddr((char *)ndst, 992 (char *)rtinfo->rti_info[RTAX_NETMASK], 993 rnh, rt->rt_nodes); 994 if (rn == NULL) { 995 struct rtentry *oldrt; 996 997 /* 998 * We already have one of these in the tree. 999 * We do a special hack: if the old route was 1000 * cloned, then we blow it away and try 1001 * re-inserting the new one. 1002 */ 1003 oldrt = rtpurelookup(ndst); 1004 if (oldrt != NULL) { 1005 --oldrt->rt_refcnt; 1006 if (oldrt->rt_flags & RTF_WASCLONED) { 1007 rtrequest(RTM_DELETE, rt_key(oldrt), 1008 oldrt->rt_gateway, 1009 rt_mask(oldrt), 1010 oldrt->rt_flags, NULL); 1011 rn = rnh->rnh_addaddr((char *)ndst, 1012 (char *) 1013 rtinfo->rti_info[RTAX_NETMASK], 1014 rnh, rt->rt_nodes); 1015 } 1016 } 1017 } 1018 1019 /* 1020 * If it still failed to go into the tree, 1021 * then un-make it (this should be a function). 1022 */ 1023 if (rn == NULL) { 1024 if (rt->rt_gwroute != NULL) 1025 rtfree(rt->rt_gwroute); 1026 IFAFREE(ifa); 1027 Free(rt_key(rt)); 1028 Free(rt); 1029 gotoerr(EEXIST); 1030 } 1031 1032 /* 1033 * If we got here from RESOLVE, then we are cloning 1034 * so clone the rest, and note that we 1035 * are a clone (and increment the parent's references) 1036 */ 1037 if (req == RTM_RESOLVE) { 1038 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 1039 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */ 1040 if ((*ret_nrt)->rt_flags & 1041 (RTF_CLONING | RTF_PRCLONING)) { 1042 rt->rt_parent = *ret_nrt; 1043 (*ret_nrt)->rt_refcnt++; 1044 } 1045 } 1046 1047 /* 1048 * if this protocol has something to add to this then 1049 * allow it to do that as well. 1050 */ 1051 if (ifa->ifa_rtrequest != NULL) 1052 ifa->ifa_rtrequest(req, rt, rtinfo); 1053 1054 /* 1055 * We repeat the same procedure from rt_setgate() here because 1056 * it doesn't fire when we call it there because the node 1057 * hasn't been added to the tree yet. 1058 */ 1059 if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) && 1060 rt_mask(rt) != NULL) { 1061 struct rtfc_arg arg = { rt, rnh }; 1062 1063 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt), 1064 (char *)rt_mask(rt), 1065 rt_fixchange, &arg); 1066 } 1067 1068 #ifdef ROUTE_DEBUG 1069 if (route_debug) 1070 rt_print(rtinfo, rt); 1071 #endif 1072 /* 1073 * Return the resulting rtentry, 1074 * increasing the number of references by one. 1075 */ 1076 if (ret_nrt != NULL) { 1077 rt->rt_refcnt++; 1078 *ret_nrt = rt; 1079 } 1080 break; 1081 default: 1082 error = EOPNOTSUPP; 1083 } 1084 bad: 1085 #ifdef ROUTE_DEBUG 1086 if (route_debug) { 1087 if (error) 1088 kprintf("rti %p failed error %d\n", rtinfo, error); 1089 else 1090 kprintf("rti %p succeeded\n", rtinfo); 1091 } 1092 #endif 1093 crit_exit(); 1094 return (error); 1095 } 1096 1097 /* 1098 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family'' 1099 * (i.e., the routes related to it by the operation of cloning). This 1100 * routine is iterated over all potential former-child-routes by way of 1101 * rnh->rnh_walktree_from() above, and those that actually are children of 1102 * the late parent (passed in as VP here) are themselves deleted. 1103 */ 1104 static int 1105 rt_fixdelete(struct radix_node *rn, void *vp) 1106 { 1107 struct rtentry *rt = (struct rtentry *)rn; 1108 struct rtentry *rt0 = vp; 1109 1110 if (rt->rt_parent == rt0 && 1111 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) { 1112 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1113 rt->rt_flags, NULL); 1114 } 1115 return 0; 1116 } 1117 1118 /* 1119 * This routine is called from rt_setgate() to do the analogous thing for 1120 * adds and changes. There is the added complication in this case of a 1121 * middle insert; i.e., insertion of a new network route between an older 1122 * network route and (cloned) host routes. For this reason, a simple check 1123 * of rt->rt_parent is insufficient; each candidate route must be tested 1124 * against the (mask, value) of the new route (passed as before in vp) 1125 * to see if the new route matches it. 1126 * 1127 * XXX - it may be possible to do fixdelete() for changes and reserve this 1128 * routine just for adds. I'm not sure why I thought it was necessary to do 1129 * changes this way. 1130 */ 1131 #ifdef DEBUG 1132 static int rtfcdebug = 0; 1133 #endif 1134 1135 static int 1136 rt_fixchange(struct radix_node *rn, void *vp) 1137 { 1138 struct rtentry *rt = (struct rtentry *)rn; 1139 struct rtfc_arg *ap = vp; 1140 struct rtentry *rt0 = ap->rt0; 1141 struct radix_node_head *rnh = ap->rnh; 1142 u_char *xk1, *xm1, *xk2, *xmp; 1143 int i, len, mlen; 1144 1145 #ifdef DEBUG 1146 if (rtfcdebug) 1147 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0); 1148 #endif 1149 1150 if (rt->rt_parent == NULL || 1151 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) { 1152 #ifdef DEBUG 1153 if (rtfcdebug) kprintf("no parent, pinned or cloning\n"); 1154 #endif 1155 return 0; 1156 } 1157 1158 if (rt->rt_parent == rt0) { 1159 #ifdef DEBUG 1160 if (rtfcdebug) kprintf("parent match\n"); 1161 #endif 1162 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1163 rt->rt_flags, NULL); 1164 } 1165 1166 /* 1167 * There probably is a function somewhere which does this... 1168 * if not, there should be. 1169 */ 1170 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len); 1171 1172 xk1 = (u_char *)rt_key(rt0); 1173 xm1 = (u_char *)rt_mask(rt0); 1174 xk2 = (u_char *)rt_key(rt); 1175 1176 /* avoid applying a less specific route */ 1177 xmp = (u_char *)rt_mask(rt->rt_parent); 1178 mlen = rt_key(rt->rt_parent)->sa_len; 1179 if (mlen > rt_key(rt0)->sa_len) { 1180 #ifdef DEBUG 1181 if (rtfcdebug) 1182 kprintf("rt_fixchange: inserting a less " 1183 "specific route\n"); 1184 #endif 1185 return 0; 1186 } 1187 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) { 1188 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) { 1189 #ifdef DEBUG 1190 if (rtfcdebug) 1191 kprintf("rt_fixchange: inserting a less " 1192 "specific route\n"); 1193 #endif 1194 return 0; 1195 } 1196 } 1197 1198 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) { 1199 if ((xk2[i] & xm1[i]) != xk1[i]) { 1200 #ifdef DEBUG 1201 if (rtfcdebug) kprintf("no match\n"); 1202 #endif 1203 return 0; 1204 } 1205 } 1206 1207 /* 1208 * OK, this node is a clone, and matches the node currently being 1209 * changed/added under the node's mask. So, get rid of it. 1210 */ 1211 #ifdef DEBUG 1212 if (rtfcdebug) kprintf("deleting\n"); 1213 #endif 1214 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1215 rt->rt_flags, NULL); 1216 } 1217 1218 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 1219 1220 int 1221 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate, 1222 boolean_t generate_report) 1223 { 1224 char *space, *oldspace; 1225 int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len); 1226 struct rtentry *rt = rt0; 1227 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family]; 1228 1229 /* 1230 * A host route with the destination equal to the gateway 1231 * will interfere with keeping LLINFO in the routing 1232 * table, so disallow it. 1233 */ 1234 if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) == 1235 (RTF_HOST | RTF_GATEWAY)) && 1236 dst->sa_len == gate->sa_len && 1237 sa_equal(dst, gate)) { 1238 /* 1239 * The route might already exist if this is an RTM_CHANGE 1240 * or a routing redirect, so try to delete it. 1241 */ 1242 if (rt_key(rt0) != NULL) 1243 rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway, 1244 rt_mask(rt0), rt0->rt_flags, NULL); 1245 return EADDRNOTAVAIL; 1246 } 1247 1248 /* 1249 * Both dst and gateway are stored in the same malloc'ed chunk 1250 * (If I ever get my hands on....) 1251 * if we need to malloc a new chunk, then keep the old one around 1252 * till we don't need it any more. 1253 */ 1254 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) { 1255 oldspace = (char *)rt_key(rt); 1256 R_Malloc(space, char *, dlen + glen); 1257 if (space == NULL) 1258 return ENOBUFS; 1259 rt->rt_nodes->rn_key = space; 1260 } else { 1261 space = (char *)rt_key(rt); /* Just use the old space. */ 1262 oldspace = NULL; 1263 } 1264 1265 /* Set the gateway value. */ 1266 rt->rt_gateway = (struct sockaddr *)(space + dlen); 1267 bcopy(gate, rt->rt_gateway, glen); 1268 1269 if (oldspace != NULL) { 1270 /* 1271 * If we allocated a new chunk, preserve the original dst. 1272 * This way, rt_setgate() really just sets the gate 1273 * and leaves the dst field alone. 1274 */ 1275 bcopy(dst, space, dlen); 1276 Free(oldspace); 1277 } 1278 1279 /* 1280 * If there is already a gwroute, it's now almost definitely wrong 1281 * so drop it. 1282 */ 1283 if (rt->rt_gwroute != NULL) { 1284 RTFREE(rt->rt_gwroute); 1285 rt->rt_gwroute = NULL; 1286 } 1287 if (rt->rt_flags & RTF_GATEWAY) { 1288 /* 1289 * Cloning loop avoidance: In the presence of 1290 * protocol-cloning and bad configuration, it is 1291 * possible to get stuck in bottomless mutual recursion 1292 * (rtrequest rt_setgate rtlookup). We avoid this 1293 * by not allowing protocol-cloning to operate for 1294 * gateways (which is probably the correct choice 1295 * anyway), and avoid the resulting reference loops 1296 * by disallowing any route to run through itself as 1297 * a gateway. This is obviously mandatory when we 1298 * get rt->rt_output(). 1299 * 1300 * This breaks TTCP for hosts outside the gateway! XXX JH 1301 */ 1302 rt->rt_gwroute = _rtlookup(gate, generate_report, 1303 RTF_PRCLONING); 1304 if (rt->rt_gwroute == rt) { 1305 rt->rt_gwroute = NULL; 1306 --rt->rt_refcnt; 1307 return EDQUOT; /* failure */ 1308 } 1309 } 1310 1311 /* 1312 * This isn't going to do anything useful for host routes, so 1313 * don't bother. Also make sure we have a reasonable mask 1314 * (we don't yet have one during adds). 1315 */ 1316 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) { 1317 struct rtfc_arg arg = { rt, rnh }; 1318 1319 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt), 1320 (char *)rt_mask(rt), 1321 rt_fixchange, &arg); 1322 } 1323 1324 return 0; 1325 } 1326 1327 static void 1328 rt_maskedcopy( 1329 struct sockaddr *src, 1330 struct sockaddr *dst, 1331 struct sockaddr *netmask) 1332 { 1333 u_char *cp1 = (u_char *)src; 1334 u_char *cp2 = (u_char *)dst; 1335 u_char *cp3 = (u_char *)netmask; 1336 u_char *cplim = cp2 + *cp3; 1337 u_char *cplim2 = cp2 + *cp1; 1338 1339 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1340 cp3 += 2; 1341 if (cplim > cplim2) 1342 cplim = cplim2; 1343 while (cp2 < cplim) 1344 *cp2++ = *cp1++ & *cp3++; 1345 if (cp2 < cplim2) 1346 bzero(cp2, cplim2 - cp2); 1347 } 1348 1349 int 1350 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt) 1351 { 1352 struct rtentry *up_rt, *rt; 1353 1354 if (!(rt0->rt_flags & RTF_UP)) { 1355 up_rt = rtlookup(dst); 1356 if (up_rt == NULL) 1357 return (EHOSTUNREACH); 1358 up_rt->rt_refcnt--; 1359 } else 1360 up_rt = rt0; 1361 if (up_rt->rt_flags & RTF_GATEWAY) { 1362 if (up_rt->rt_gwroute == NULL) { 1363 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway); 1364 if (up_rt->rt_gwroute == NULL) 1365 return (EHOSTUNREACH); 1366 } else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) { 1367 rtfree(up_rt->rt_gwroute); 1368 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway); 1369 if (up_rt->rt_gwroute == NULL) 1370 return (EHOSTUNREACH); 1371 } 1372 rt = up_rt->rt_gwroute; 1373 } else 1374 rt = up_rt; 1375 if (rt->rt_flags & RTF_REJECT && 1376 (rt->rt_rmx.rmx_expire == 0 || /* rt doesn't expire */ 1377 time_second < rt->rt_rmx.rmx_expire)) /* rt not expired */ 1378 return (rt->rt_flags & RTF_HOST ? EHOSTDOWN : EHOSTUNREACH); 1379 *drt = rt; 1380 return 0; 1381 } 1382 1383 static int 1384 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){ 1385 int i; 1386 1387 for (i=0; i<3; i++) { 1388 struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i]; 1389 int shimlen; 1390 1391 if (shim == NULL) 1392 break; 1393 1394 shimlen = ROUNDUP(shim->sa_len); 1395 R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen); 1396 bcopy(shim, rt->rt_shim[i], shimlen); 1397 } 1398 1399 return 0; 1400 } 1401 1402 #ifdef ROUTE_DEBUG 1403 1404 /* 1405 * Print out a route table entry 1406 */ 1407 void 1408 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn) 1409 { 1410 kprintf("rti %p cpu %d route %p flags %08lx: ", 1411 rtinfo, mycpuid, rn, rn->rt_flags); 1412 sockaddr_print(rt_key(rn)); 1413 kprintf(" mask "); 1414 sockaddr_print(rt_mask(rn)); 1415 kprintf(" gw "); 1416 sockaddr_print(rn->rt_gateway); 1417 kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?"); 1418 kprintf(" ifa %p\n", rn->rt_ifa); 1419 } 1420 1421 void 1422 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti) 1423 { 1424 int didit = 0; 1425 int i; 1426 1427 #ifdef ROUTE_DEBUG 1428 if (cmd == RTM_DELETE && route_debug > 1) 1429 print_backtrace(-1); 1430 #endif 1431 1432 switch(cmd) { 1433 case RTM_ADD: 1434 kprintf("ADD "); 1435 break; 1436 case RTM_RESOLVE: 1437 kprintf("RES "); 1438 break; 1439 case RTM_DELETE: 1440 kprintf("DEL "); 1441 break; 1442 default: 1443 kprintf("C%02d ", cmd); 1444 break; 1445 } 1446 kprintf("rti %p cpu %d ", rti, mycpuid); 1447 for (i = 0; i < rti->rti_addrs; ++i) { 1448 if (rti->rti_info[i] == NULL) 1449 continue; 1450 if (didit) 1451 kprintf(" ,"); 1452 switch(i) { 1453 case RTAX_DST: 1454 kprintf("(DST "); 1455 break; 1456 case RTAX_GATEWAY: 1457 kprintf("(GWY "); 1458 break; 1459 case RTAX_NETMASK: 1460 kprintf("(MSK "); 1461 break; 1462 case RTAX_GENMASK: 1463 kprintf("(GEN "); 1464 break; 1465 case RTAX_IFP: 1466 kprintf("(IFP "); 1467 break; 1468 case RTAX_IFA: 1469 kprintf("(IFA "); 1470 break; 1471 case RTAX_AUTHOR: 1472 kprintf("(AUT "); 1473 break; 1474 case RTAX_BRD: 1475 kprintf("(BRD "); 1476 break; 1477 default: 1478 kprintf("(?%02d ", i); 1479 break; 1480 } 1481 sockaddr_print(rti->rti_info[i]); 1482 kprintf(")"); 1483 didit = 1; 1484 } 1485 kprintf("\n"); 1486 } 1487 1488 void 1489 sockaddr_print(struct sockaddr *sa) 1490 { 1491 struct sockaddr_in *sa4; 1492 struct sockaddr_in6 *sa6; 1493 int len; 1494 int i; 1495 1496 if (sa == NULL) { 1497 kprintf("NULL"); 1498 return; 1499 } 1500 1501 len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]); 1502 1503 switch(sa->sa_family) { 1504 case AF_INET: 1505 case AF_INET6: 1506 default: 1507 switch(sa->sa_family) { 1508 case AF_INET: 1509 sa4 = (struct sockaddr_in *)sa; 1510 kprintf("INET %d %d.%d.%d.%d", 1511 ntohs(sa4->sin_port), 1512 (ntohl(sa4->sin_addr.s_addr) >> 24) & 255, 1513 (ntohl(sa4->sin_addr.s_addr) >> 16) & 255, 1514 (ntohl(sa4->sin_addr.s_addr) >> 8) & 255, 1515 (ntohl(sa4->sin_addr.s_addr) >> 0) & 255 1516 ); 1517 break; 1518 case AF_INET6: 1519 sa6 = (struct sockaddr_in6 *)sa; 1520 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x", 1521 ntohs(sa6->sin6_port), 1522 sa6->sin6_addr.s6_addr16[0], 1523 sa6->sin6_addr.s6_addr16[1], 1524 sa6->sin6_addr.s6_addr16[2], 1525 sa6->sin6_addr.s6_addr16[3], 1526 sa6->sin6_addr.s6_addr16[4], 1527 sa6->sin6_addr.s6_addr16[5], 1528 sa6->sin6_addr.s6_addr16[6], 1529 sa6->sin6_addr.s6_addr16[7] 1530 ); 1531 break; 1532 default: 1533 kprintf("AF%d ", sa->sa_family); 1534 while (len > 0 && sa->sa_data[len-1] == 0) 1535 --len; 1536 1537 for (i = 0; i < len; ++i) { 1538 if (i) 1539 kprintf("."); 1540 kprintf("%d", (unsigned char)sa->sa_data[i]); 1541 } 1542 break; 1543 } 1544 } 1545 } 1546 1547 #endif 1548 1549 /* 1550 * Set up a routing table entry, normally for an interface. 1551 */ 1552 int 1553 rtinit(struct ifaddr *ifa, int cmd, int flags) 1554 { 1555 struct sockaddr *dst, *deldst, *netmask; 1556 struct mbuf *m = NULL; 1557 struct radix_node_head *rnh; 1558 struct radix_node *rn; 1559 struct rt_addrinfo rtinfo; 1560 int error; 1561 1562 if (flags & RTF_HOST) { 1563 dst = ifa->ifa_dstaddr; 1564 netmask = NULL; 1565 } else { 1566 dst = ifa->ifa_addr; 1567 netmask = ifa->ifa_netmask; 1568 } 1569 /* 1570 * If it's a delete, check that if it exists, it's on the correct 1571 * interface or we might scrub a route to another ifa which would 1572 * be confusing at best and possibly worse. 1573 */ 1574 if (cmd == RTM_DELETE) { 1575 /* 1576 * It's a delete, so it should already exist.. 1577 * If it's a net, mask off the host bits 1578 * (Assuming we have a mask) 1579 */ 1580 if (netmask != NULL) { 1581 m = m_get(MB_DONTWAIT, MT_SONAME); 1582 if (m == NULL) 1583 return (ENOBUFS); 1584 mbuftrackid(m, 34); 1585 deldst = mtod(m, struct sockaddr *); 1586 rt_maskedcopy(dst, deldst, netmask); 1587 dst = deldst; 1588 } 1589 /* 1590 * Look up an rtentry that is in the routing tree and 1591 * contains the correct info. 1592 */ 1593 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL || 1594 (rn = rnh->rnh_lookup((char *)dst, 1595 (char *)netmask, rnh)) == NULL || 1596 ((struct rtentry *)rn)->rt_ifa != ifa || 1597 !sa_equal((struct sockaddr *)rn->rn_key, dst)) { 1598 if (m != NULL) 1599 m_free(m); 1600 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); 1601 } 1602 /* XXX */ 1603 #if 0 1604 else { 1605 /* 1606 * One would think that as we are deleting, and we know 1607 * it doesn't exist, we could just return at this point 1608 * with an "ELSE" clause, but apparently not.. 1609 */ 1610 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); 1611 } 1612 #endif 1613 } 1614 /* 1615 * Do the actual request 1616 */ 1617 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1618 rtinfo.rti_info[RTAX_DST] = dst; 1619 rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1620 rtinfo.rti_info[RTAX_NETMASK] = netmask; 1621 rtinfo.rti_flags = flags | ifa->ifa_flags; 1622 rtinfo.rti_ifa = ifa; 1623 error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa); 1624 if (m != NULL) 1625 m_free(m); 1626 return (error); 1627 } 1628 1629 static void 1630 rtinit_rtrequest_callback(int cmd, int error, 1631 struct rt_addrinfo *rtinfo, struct rtentry *rt, 1632 void *arg) 1633 { 1634 struct ifaddr *ifa = arg; 1635 1636 if (error == 0 && rt) { 1637 if (mycpuid == 0) { 1638 ++rt->rt_refcnt; 1639 rt_newaddrmsg(cmd, ifa, error, rt); 1640 --rt->rt_refcnt; 1641 } 1642 if (cmd == RTM_DELETE) { 1643 if (rt->rt_refcnt == 0) { 1644 ++rt->rt_refcnt; 1645 rtfree(rt); 1646 } 1647 } 1648 } 1649 } 1650 1651 struct netmsg_rts { 1652 struct netmsg netmsg; 1653 int req; 1654 struct rt_addrinfo *rtinfo; 1655 rtsearch_callback_func_t callback; 1656 void *arg; 1657 boolean_t exact_match; 1658 int found_cnt; 1659 }; 1660 1661 int 1662 rtsearch_global(int req, struct rt_addrinfo *rtinfo, 1663 rtsearch_callback_func_t callback, void *arg, 1664 boolean_t exact_match) 1665 { 1666 struct netmsg_rts msg; 1667 1668 netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport, 1669 0, rtsearch_msghandler); 1670 msg.req = req; 1671 msg.rtinfo = rtinfo; 1672 msg.callback = callback; 1673 msg.arg = arg; 1674 msg.exact_match = exact_match; 1675 msg.found_cnt = 0; 1676 return lwkt_domsg(rtable_portfn(0), &msg.netmsg.nm_lmsg, 0); 1677 } 1678 1679 static void 1680 rtsearch_msghandler(struct netmsg *netmsg) 1681 { 1682 struct netmsg_rts *msg = (void *)netmsg; 1683 struct rt_addrinfo rtinfo; 1684 struct radix_node_head *rnh; 1685 struct rtentry *rt; 1686 int nextcpu, error; 1687 1688 /* 1689 * Copy the rtinfo. We need to make sure that the original 1690 * rtinfo, which is setup by the caller, in the netmsg will 1691 * _not_ be changed; else the next CPU on the netmsg forwarding 1692 * path will see a different rtinfo than what this CPU has seen. 1693 */ 1694 rtinfo = *msg->rtinfo; 1695 1696 /* 1697 * Find the correct routing tree to use for this Address Family 1698 */ 1699 if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) { 1700 if (mycpuid != 0) 1701 panic("partially initialized routing tables\n"); 1702 lwkt_replymsg(&msg->netmsg.nm_lmsg, EAFNOSUPPORT); 1703 return; 1704 } 1705 1706 /* 1707 * Correct rtinfo for the host route searching. 1708 */ 1709 if (rtinfo.rti_flags & RTF_HOST) { 1710 rtinfo.rti_netmask = NULL; 1711 rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING); 1712 } 1713 1714 rt = (struct rtentry *) 1715 rnh->rnh_lookup((char *)rtinfo.rti_dst, 1716 (char *)rtinfo.rti_netmask, rnh); 1717 1718 /* 1719 * If we are asked to do the "exact match", we need to make sure 1720 * that host route searching got a host route while a network 1721 * route searching got a network route. 1722 */ 1723 if (rt != NULL && msg->exact_match && 1724 ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST)) 1725 rt = NULL; 1726 1727 if (rt == NULL) { 1728 /* 1729 * No matching routes have been found, don't count this 1730 * as a critical error (here, we set 'error' to 0), just 1731 * keep moving on, since at least prcloned routes are not 1732 * duplicated onto each CPU. 1733 */ 1734 error = 0; 1735 } else { 1736 msg->found_cnt++; 1737 1738 rt->rt_refcnt++; 1739 error = msg->callback(msg->req, &rtinfo, rt, msg->arg, 1740 msg->found_cnt); 1741 rt->rt_refcnt--; 1742 1743 if (error == EJUSTRETURN) { 1744 lwkt_replymsg(&msg->netmsg.nm_lmsg, 0); 1745 return; 1746 } 1747 } 1748 1749 nextcpu = mycpuid + 1; 1750 if (error) { 1751 KKASSERT(msg->found_cnt > 0); 1752 1753 /* 1754 * Under following cases, unrecoverable error has 1755 * not occured: 1756 * o Request is RTM_GET 1757 * o The first time that we find the route, but the 1758 * modification fails. 1759 */ 1760 if (msg->req != RTM_GET && msg->found_cnt > 1) { 1761 panic("rtsearch_msghandler: unrecoverable error " 1762 "cpu %d", mycpuid); 1763 } 1764 lwkt_replymsg(&msg->netmsg.nm_lmsg, error); 1765 } else if (nextcpu < ncpus) { 1766 lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->netmsg.nm_lmsg); 1767 } else { 1768 if (msg->found_cnt == 0) { 1769 /* The requested route was never seen ... */ 1770 error = ESRCH; 1771 } 1772 lwkt_replymsg(&msg->netmsg.nm_lmsg, error); 1773 } 1774 } 1775 1776 int 1777 rtmask_add_global(struct sockaddr *mask) 1778 { 1779 struct netmsg nmsg; 1780 1781 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1782 0, rtmask_add_msghandler); 1783 nmsg.nm_lmsg.u.ms_resultp = mask; 1784 1785 return lwkt_domsg(rtable_portfn(0), &nmsg.nm_lmsg, 0); 1786 } 1787 1788 struct sockaddr * 1789 _rtmask_lookup(struct sockaddr *mask, boolean_t search) 1790 { 1791 struct radix_node *n; 1792 1793 #define clen(s) (*(u_char *)(s)) 1794 n = rn_addmask((char *)mask, search, 1); 1795 if (n != NULL && 1796 mask->sa_len >= clen(n->rn_key) && 1797 bcmp((char *)mask + 1, 1798 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) { 1799 return (struct sockaddr *)n->rn_key; 1800 } else { 1801 return NULL; 1802 } 1803 #undef clen 1804 } 1805 1806 static void 1807 rtmask_add_msghandler(struct netmsg *nmsg) 1808 { 1809 struct lwkt_msg *lmsg = &nmsg->nm_lmsg; 1810 struct sockaddr *mask = lmsg->u.ms_resultp; 1811 int error = 0, nextcpu; 1812 1813 if (rtmask_lookup(mask) == NULL) 1814 error = ENOBUFS; 1815 1816 nextcpu = mycpuid + 1; 1817 if (!error && nextcpu < ncpus) 1818 lwkt_forwardmsg(rtable_portfn(nextcpu), lmsg); 1819 else 1820 lwkt_replymsg(lmsg, error); 1821 } 1822 1823 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */ 1824 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0); 1825