1 /* 2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 2004, 2005 Jeffrey M. Hsu. All rights reserved. 35 * 36 * License terms: all terms for the DragonFly license above plus the following: 37 * 38 * 4. All advertising materials mentioning features or use of this software 39 * must display the following acknowledgement: 40 * 41 * This product includes software developed by Jeffrey M. Hsu 42 * for the DragonFly Project. 43 * 44 * This requirement may be waived with permission from Jeffrey Hsu. 45 * Permission will be granted to any DragonFly user for free. 46 * This requirement will sunset and may be removed on Jan 31, 2006, 47 * after which the standard DragonFly license (as shown above) will 48 * apply. 49 */ 50 51 /* 52 * Copyright (c) 1980, 1986, 1991, 1993 53 * The Regents of the University of California. All rights reserved. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 3. All advertising materials mentioning features or use of this software 64 * must display the following acknowledgement: 65 * This product includes software developed by the University of 66 * California, Berkeley and its contributors. 67 * 4. Neither the name of the University nor the names of its contributors 68 * may be used to endorse or promote products derived from this software 69 * without specific prior written permission. 70 * 71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 81 * SUCH DAMAGE. 82 * 83 * @(#)route.c 8.3 (Berkeley) 1/9/95 84 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $ 85 * $DragonFly: src/sys/net/route.c,v 1.27 2006/12/22 23:44:54 swildner Exp $ 86 */ 87 88 #include "opt_inet.h" 89 90 #include <sys/param.h> 91 #include <sys/systm.h> 92 #include <sys/malloc.h> 93 #include <sys/mbuf.h> 94 #include <sys/socket.h> 95 #include <sys/domain.h> 96 #include <sys/kernel.h> 97 #include <sys/sysctl.h> 98 #include <sys/globaldata.h> 99 #include <sys/thread.h> 100 #include <sys/thread2.h> 101 #include <sys/msgport2.h> 102 103 #include <net/if.h> 104 #include <net/route.h> 105 #include <net/netisr.h> 106 107 #include <netinet/in.h> 108 #include <net/ip_mroute/ip_mroute.h> 109 110 static struct rtstatistics rtstatistics_percpu[MAXCPU]; 111 #ifdef SMP 112 #define rtstat rtstatistics_percpu[mycpuid] 113 #else 114 #define rtstat rtstatistics_percpu[0] 115 #endif 116 117 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1]; 118 struct lwkt_port *rt_ports[MAXCPU]; 119 120 static void rt_maskedcopy (struct sockaddr *, struct sockaddr *, 121 struct sockaddr *); 122 static void rtable_init(void); 123 static void rtable_service_loop(void *dummy); 124 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *, 125 struct rtentry *, void *); 126 127 #ifdef SMP 128 static int rtredirect_msghandler(struct lwkt_msg *lmsg); 129 static int rtrequest1_msghandler(struct lwkt_msg *lmsg); 130 #endif 131 132 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing"); 133 134 #ifdef ROUTE_DEBUG 135 static int route_debug = 1; 136 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW, 137 &route_debug, 0, ""); 138 #endif 139 140 /* 141 * Initialize the route table(s) for protocol domains and 142 * create a helper thread which will be responsible for updating 143 * route table entries on each cpu. 144 */ 145 void 146 route_init(void) 147 { 148 int cpu, origcpu; 149 thread_t rtd; 150 151 for (cpu = 0; cpu < ncpus; ++cpu) 152 bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics)); 153 rn_init(); /* initialize all zeroes, all ones, mask table */ 154 origcpu = mycpuid; 155 for (cpu = 0; cpu < ncpus; cpu++) { 156 lwkt_migratecpu(cpu); 157 rtable_init(); 158 lwkt_create(rtable_service_loop, NULL, &rtd, NULL, 159 TDF_STOPREQ, cpu, "rtable_cpu %d", cpu); 160 rt_ports[cpu] = &rtd->td_msgport; 161 lwkt_schedule(rtd); 162 } 163 lwkt_migratecpu(origcpu); 164 } 165 166 static void 167 rtable_init(void) 168 { 169 struct domain *dom; 170 171 SLIST_FOREACH(dom, &domains, dom_next) { 172 if (dom->dom_rtattach) { 173 dom->dom_rtattach( 174 (void **)&rt_tables[mycpuid][dom->dom_family], 175 dom->dom_rtoffset); 176 } 177 } 178 } 179 180 /* 181 * Our per-cpu table management protocol thread. All route table operations 182 * are chained through all cpus in order starting at cpu #0 in order to 183 * maintain duplicate route tables on each cpu. Having a spearate route 184 * table management thread allows the protocol and interrupt threads to 185 * issue route table changes. 186 */ 187 static void 188 rtable_service_loop(void *dummy __unused) 189 { 190 struct lwkt_msg *lmsg; 191 thread_t td = curthread; 192 193 while ((lmsg = lwkt_waitport(&td->td_msgport, NULL)) != NULL) { 194 lmsg->ms_cmd.cm_func(lmsg); 195 } 196 } 197 198 /* 199 * Routing statistics. 200 */ 201 #ifdef SMP 202 static int 203 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS) 204 { 205 int cpu, error = 0; 206 207 for (cpu = 0; cpu < ncpus; ++cpu) { 208 if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu], 209 sizeof(struct rtstatistics)))) 210 break; 211 if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu], 212 sizeof(struct rtstatistics)))) 213 break; 214 } 215 216 return (error); 217 } 218 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW), 219 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics"); 220 #else 221 SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics, 222 "Routing statistics"); 223 #endif 224 225 /* 226 * Packet routing routines. 227 */ 228 229 /* 230 * Look up and fill in the "ro_rt" rtentry field in a route structure given 231 * an address in the "ro_dst" field. Always send a report on a miss and 232 * always clone routes. 233 */ 234 void 235 rtalloc(struct route *ro) 236 { 237 rtalloc_ign(ro, 0UL); 238 } 239 240 /* 241 * Look up and fill in the "ro_rt" rtentry field in a route structure given 242 * an address in the "ro_dst" field. Always send a report on a miss and 243 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being 244 * ignored. 245 */ 246 void 247 rtalloc_ign(struct route *ro, u_long ignoreflags) 248 { 249 if (ro->ro_rt != NULL) { 250 if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP) 251 return; 252 rtfree(ro->ro_rt); 253 ro->ro_rt = NULL; 254 } 255 ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags); 256 } 257 258 /* 259 * Look up the route that matches the given "dst" address. 260 * 261 * Route lookup can have the side-effect of creating and returning 262 * a cloned route instead when "dst" matches a cloning route and the 263 * RTF_CLONING and RTF_PRCLONING flags are not being ignored. 264 * 265 * Any route returned has its reference count incremented. 266 */ 267 struct rtentry * 268 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore) 269 { 270 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family]; 271 struct rtentry *rt; 272 273 if (rnh == NULL) 274 goto unreach; 275 276 /* 277 * Look up route in the radix tree. 278 */ 279 rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh); 280 if (rt == NULL) 281 goto unreach; 282 283 /* 284 * Handle cloning routes. 285 */ 286 if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) { 287 struct rtentry *clonedroute; 288 int error; 289 290 clonedroute = rt; /* copy in/copy out parameter */ 291 error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0, 292 &clonedroute); /* clone the route */ 293 if (error != 0) { /* cloning failed */ 294 if (generate_report) 295 rt_dstmsg(RTM_MISS, dst, error); 296 rt->rt_refcnt++; 297 return (rt); /* return the uncloned route */ 298 } 299 if (generate_report) { 300 if (clonedroute->rt_flags & RTF_XRESOLVE) 301 rt_dstmsg(RTM_RESOLVE, dst, 0); 302 else 303 rt_rtmsg(RTM_ADD, clonedroute, 304 clonedroute->rt_ifp, 0); 305 } 306 return (clonedroute); /* return cloned route */ 307 } 308 309 /* 310 * Increment the reference count of the matched route and return. 311 */ 312 rt->rt_refcnt++; 313 return (rt); 314 315 unreach: 316 rtstat.rts_unreach++; 317 if (generate_report) 318 rt_dstmsg(RTM_MISS, dst, 0); 319 return (NULL); 320 } 321 322 void 323 rtfree(struct rtentry *rt) 324 { 325 KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt)); 326 327 --rt->rt_refcnt; 328 if (rt->rt_refcnt == 0) { 329 struct radix_node_head *rnh = 330 rt_tables[mycpuid][rt_key(rt)->sa_family]; 331 332 if (rnh->rnh_close) 333 rnh->rnh_close((struct radix_node *)rt, rnh); 334 if (!(rt->rt_flags & RTF_UP)) { 335 /* deallocate route */ 336 if (rt->rt_ifa != NULL) 337 IFAFREE(rt->rt_ifa); 338 if (rt->rt_parent != NULL) 339 RTFREE(rt->rt_parent); /* recursive call! */ 340 Free(rt_key(rt)); 341 Free(rt); 342 } 343 } 344 } 345 346 static int 347 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway, 348 struct sockaddr *netmask, int flags, struct sockaddr *src) 349 { 350 struct rtentry *rt = NULL; 351 struct rt_addrinfo rtinfo; 352 struct ifaddr *ifa; 353 u_long *stat = NULL; 354 int error; 355 356 /* verify the gateway is directly reachable */ 357 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 358 error = ENETUNREACH; 359 goto out; 360 } 361 362 /* 363 * If the redirect isn't from our current router for this destination, 364 * it's either old or wrong. 365 */ 366 if (!(flags & RTF_DONE) && /* XXX JH */ 367 (rt = rtpurelookup(dst)) != NULL && 368 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) { 369 error = EINVAL; 370 goto done; 371 } 372 373 /* 374 * If it redirects us to ourselves, we have a routing loop, 375 * perhaps as a result of an interface going down recently. 376 */ 377 if (ifa_ifwithaddr(gateway)) { 378 error = EHOSTUNREACH; 379 goto done; 380 } 381 382 /* 383 * Create a new entry if the lookup failed or if we got back 384 * a wildcard entry for the default route. This is necessary 385 * for hosts which use routing redirects generated by smart 386 * gateways to dynamically build the routing tables. 387 */ 388 if (rt == NULL) 389 goto create; 390 if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) { 391 rtfree(rt); 392 goto create; 393 } 394 395 /* Ignore redirects for directly connected hosts. */ 396 if (!(rt->rt_flags & RTF_GATEWAY)) { 397 error = EHOSTUNREACH; 398 goto done; 399 } 400 401 if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) { 402 /* 403 * Changing from a network route to a host route. 404 * Create a new host route rather than smashing the 405 * network route. 406 */ 407 create: 408 flags |= RTF_GATEWAY | RTF_DYNAMIC; 409 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 410 rtinfo.rti_info[RTAX_DST] = dst; 411 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 412 rtinfo.rti_info[RTAX_NETMASK] = netmask; 413 rtinfo.rti_flags = flags; 414 rtinfo.rti_ifa = ifa; 415 rt = NULL; /* copy-in/copy-out parameter */ 416 error = rtrequest1(RTM_ADD, &rtinfo, &rt); 417 if (rt != NULL) 418 flags = rt->rt_flags; 419 stat = &rtstat.rts_dynamic; 420 } else { 421 /* 422 * Smash the current notion of the gateway to this destination. 423 * Should check about netmask!!! 424 */ 425 rt->rt_flags |= RTF_MODIFIED; 426 flags |= RTF_MODIFIED; 427 rt_setgate(rt, rt_key(rt), gateway); 428 error = 0; 429 stat = &rtstat.rts_newgateway; 430 } 431 432 done: 433 if (rt != NULL) 434 rtfree(rt); 435 out: 436 if (error != 0) 437 rtstat.rts_badredirect++; 438 else if (stat != NULL) 439 (*stat)++; 440 441 return error; 442 } 443 444 #ifdef SMP 445 446 struct netmsg_rtredirect { 447 struct lwkt_msg lmsg; 448 struct sockaddr *dst; 449 struct sockaddr *gateway; 450 struct sockaddr *netmask; 451 int flags; 452 struct sockaddr *src; 453 }; 454 455 #endif 456 457 /* 458 * Force a routing table entry to the specified 459 * destination to go through the given gateway. 460 * Normally called as a result of a routing redirect 461 * message from the network layer. 462 * 463 * N.B.: must be called at splnet 464 */ 465 void 466 rtredirect(struct sockaddr *dst, struct sockaddr *gateway, 467 struct sockaddr *netmask, int flags, struct sockaddr *src) 468 { 469 struct rt_addrinfo rtinfo; 470 int error; 471 #ifdef SMP 472 struct netmsg_rtredirect msg; 473 474 lwkt_initmsg(&msg.lmsg, &curthread->td_msgport, 0, 475 lwkt_cmd_func(rtredirect_msghandler), lwkt_cmd_op_none); 476 msg.dst = dst; 477 msg.gateway = gateway; 478 msg.netmask = netmask; 479 msg.flags = flags; 480 msg.src = src; 481 error = lwkt_domsg(rtable_portfn(0), &msg.lmsg); 482 #else 483 error = rtredirect_oncpu(dst, gateway, netmask, flags, src); 484 #endif 485 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 486 rtinfo.rti_info[RTAX_DST] = dst; 487 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 488 rtinfo.rti_info[RTAX_NETMASK] = netmask; 489 rtinfo.rti_info[RTAX_AUTHOR] = src; 490 rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error); 491 } 492 493 #ifdef SMP 494 495 static int 496 rtredirect_msghandler(struct lwkt_msg *lmsg) 497 { 498 struct netmsg_rtredirect *msg = (void *)lmsg; 499 int nextcpu; 500 501 rtredirect_oncpu(msg->dst, msg->gateway, msg->netmask, 502 msg->flags, msg->src); 503 nextcpu = mycpuid + 1; 504 if (nextcpu < ncpus) 505 lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->lmsg); 506 else 507 lwkt_replymsg(&msg->lmsg, 0); 508 return (0); 509 } 510 511 #endif 512 513 /* 514 * Routing table ioctl interface. 515 */ 516 int 517 rtioctl(u_long req, caddr_t data, struct ucred *cred) 518 { 519 #ifdef INET 520 /* Multicast goop, grrr... */ 521 return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP; 522 #else 523 return ENXIO; 524 #endif 525 } 526 527 struct ifaddr * 528 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway) 529 { 530 struct ifaddr *ifa; 531 532 if (!(flags & RTF_GATEWAY)) { 533 /* 534 * If we are adding a route to an interface, 535 * and the interface is a point-to-point link, 536 * we should search for the destination 537 * as our clue to the interface. Otherwise 538 * we can use the local address. 539 */ 540 ifa = NULL; 541 if (flags & RTF_HOST) { 542 ifa = ifa_ifwithdstaddr(dst); 543 } 544 if (ifa == NULL) 545 ifa = ifa_ifwithaddr(gateway); 546 } else { 547 /* 548 * If we are adding a route to a remote net 549 * or host, the gateway may still be on the 550 * other end of a pt to pt link. 551 */ 552 ifa = ifa_ifwithdstaddr(gateway); 553 } 554 if (ifa == NULL) 555 ifa = ifa_ifwithnet(gateway); 556 if (ifa == NULL) { 557 struct rtentry *rt; 558 559 rt = rtpurelookup(gateway); 560 if (rt == NULL) 561 return (NULL); 562 rt->rt_refcnt--; 563 if ((ifa = rt->rt_ifa) == NULL) 564 return (NULL); 565 } 566 if (ifa->ifa_addr->sa_family != dst->sa_family) { 567 struct ifaddr *oldifa = ifa; 568 569 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 570 if (ifa == NULL) 571 ifa = oldifa; 572 } 573 return (ifa); 574 } 575 576 static int rt_fixdelete (struct radix_node *, void *); 577 static int rt_fixchange (struct radix_node *, void *); 578 579 struct rtfc_arg { 580 struct rtentry *rt0; 581 struct radix_node_head *rnh; 582 }; 583 584 /* 585 * Set rtinfo->rti_ifa and rtinfo->rti_ifp. 586 */ 587 int 588 rt_getifa(struct rt_addrinfo *rtinfo) 589 { 590 struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY]; 591 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 592 struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA]; 593 int flags = rtinfo->rti_flags; 594 595 /* 596 * ifp may be specified by sockaddr_dl 597 * when protocol address is ambiguous. 598 */ 599 if (rtinfo->rti_ifp == NULL) { 600 struct sockaddr *ifpaddr; 601 602 ifpaddr = rtinfo->rti_info[RTAX_IFP]; 603 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) { 604 struct ifaddr *ifa; 605 606 ifa = ifa_ifwithnet(ifpaddr); 607 if (ifa != NULL) 608 rtinfo->rti_ifp = ifa->ifa_ifp; 609 } 610 } 611 612 if (rtinfo->rti_ifa == NULL && ifaaddr != NULL) 613 rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr); 614 if (rtinfo->rti_ifa == NULL) { 615 struct sockaddr *sa; 616 617 sa = ifaaddr != NULL ? ifaaddr : 618 (gateway != NULL ? gateway : dst); 619 if (sa != NULL && rtinfo->rti_ifp != NULL) 620 rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp); 621 else if (dst != NULL && gateway != NULL) 622 rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway); 623 else if (sa != NULL) 624 rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa); 625 } 626 if (rtinfo->rti_ifa == NULL) 627 return (ENETUNREACH); 628 629 if (rtinfo->rti_ifp == NULL) 630 rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp; 631 return (0); 632 } 633 634 /* 635 * Do appropriate manipulations of a routing tree given 636 * all the bits of info needed 637 */ 638 int 639 rtrequest( 640 int req, 641 struct sockaddr *dst, 642 struct sockaddr *gateway, 643 struct sockaddr *netmask, 644 int flags, 645 struct rtentry **ret_nrt) 646 { 647 struct rt_addrinfo rtinfo; 648 649 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 650 rtinfo.rti_info[RTAX_DST] = dst; 651 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 652 rtinfo.rti_info[RTAX_NETMASK] = netmask; 653 rtinfo.rti_flags = flags; 654 return rtrequest1(req, &rtinfo, ret_nrt); 655 } 656 657 int 658 rtrequest_global( 659 int req, 660 struct sockaddr *dst, 661 struct sockaddr *gateway, 662 struct sockaddr *netmask, 663 int flags) 664 { 665 struct rt_addrinfo rtinfo; 666 667 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 668 rtinfo.rti_info[RTAX_DST] = dst; 669 rtinfo.rti_info[RTAX_GATEWAY] = gateway; 670 rtinfo.rti_info[RTAX_NETMASK] = netmask; 671 rtinfo.rti_flags = flags; 672 return rtrequest1_global(req, &rtinfo, NULL, NULL); 673 } 674 675 #ifdef SMP 676 677 struct netmsg_rtq { 678 struct lwkt_msg lmsg; 679 int req; 680 struct rt_addrinfo *rtinfo; 681 rtrequest1_callback_func_t callback; 682 void *arg; 683 }; 684 685 #endif 686 687 int 688 rtrequest1_global(int req, struct rt_addrinfo *rtinfo, 689 rtrequest1_callback_func_t callback, void *arg) 690 { 691 int error; 692 #ifdef SMP 693 struct netmsg_rtq msg; 694 695 lwkt_initmsg(&msg.lmsg, &curthread->td_msgport, 0, 696 lwkt_cmd_func(rtrequest1_msghandler), lwkt_cmd_op_none); 697 msg.lmsg.ms_error = -1; 698 msg.req = req; 699 msg.rtinfo = rtinfo; 700 msg.callback = callback; 701 msg.arg = arg; 702 error = lwkt_domsg(rtable_portfn(0), &msg.lmsg); 703 #else 704 struct rtentry *rt = NULL; 705 706 error = rtrequest1(req, rtinfo, &rt); 707 if (rt) 708 --rt->rt_refcnt; 709 if (callback) 710 callback(req, error, rtinfo, rt, arg); 711 #endif 712 return (error); 713 } 714 715 /* 716 * Handle a route table request on the current cpu. Since the route table's 717 * are supposed to be identical on each cpu, an error occuring later in the 718 * message chain is considered system-fatal. 719 */ 720 #ifdef SMP 721 722 static int 723 rtrequest1_msghandler(struct lwkt_msg *lmsg) 724 { 725 struct netmsg_rtq *msg = (void *)lmsg; 726 struct rtentry *rt = NULL; 727 int nextcpu; 728 int error; 729 730 error = rtrequest1(msg->req, msg->rtinfo, &rt); 731 if (rt) 732 --rt->rt_refcnt; 733 if (msg->callback) 734 msg->callback(msg->req, error, msg->rtinfo, rt, msg->arg); 735 736 /* 737 * RTM_DELETE's are propogated even if an error occurs, since a 738 * cloned route might be undergoing deletion and cloned routes 739 * are not necessarily replicated. An overall error is returned 740 * only if no cpus have the route in question. 741 */ 742 if (msg->lmsg.ms_error < 0 || error == 0) 743 msg->lmsg.ms_error = error; 744 745 nextcpu = mycpuid + 1; 746 if (error && msg->req != RTM_DELETE) { 747 if (mycpuid != 0) { 748 panic("rtrequest1_msghandler: rtrequest table " 749 "error was not on cpu #0: %p", msg->rtinfo); 750 } 751 lwkt_replymsg(&msg->lmsg, error); 752 } else if (nextcpu < ncpus) { 753 lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->lmsg); 754 } else { 755 lwkt_replymsg(&msg->lmsg, msg->lmsg.ms_error); 756 } 757 return (0); 758 } 759 760 #endif 761 762 int 763 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt) 764 { 765 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 766 struct rtentry *rt; 767 struct radix_node *rn; 768 struct radix_node_head *rnh; 769 struct ifaddr *ifa; 770 struct sockaddr *ndst; 771 int error = 0; 772 773 #define gotoerr(x) { error = x ; goto bad; } 774 775 #ifdef ROUTE_DEBUG 776 if (route_debug) 777 rt_addrinfo_print(req, rtinfo); 778 #endif 779 780 crit_enter(); 781 /* 782 * Find the correct routing tree to use for this Address Family 783 */ 784 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL) 785 gotoerr(EAFNOSUPPORT); 786 787 /* 788 * If we are adding a host route then we don't want to put 789 * a netmask in the tree, nor do we want to clone it. 790 */ 791 if (rtinfo->rti_flags & RTF_HOST) { 792 rtinfo->rti_info[RTAX_NETMASK] = NULL; 793 rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING); 794 } 795 796 switch (req) { 797 case RTM_DELETE: 798 /* Remove the item from the tree. */ 799 rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST], 800 (char *)rtinfo->rti_info[RTAX_NETMASK], 801 rnh); 802 if (rn == NULL) 803 gotoerr(ESRCH); 804 KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)), 805 ("rnh_deladdr returned flags 0x%x", rn->rn_flags)); 806 rt = (struct rtentry *)rn; 807 808 /* ref to prevent a deletion race */ 809 ++rt->rt_refcnt; 810 811 /* Free any routes cloned from this one. */ 812 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) && 813 rt_mask(rt) != NULL) { 814 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt), 815 (char *)rt_mask(rt), 816 rt_fixdelete, rt); 817 } 818 819 if (rt->rt_gwroute != NULL) { 820 RTFREE(rt->rt_gwroute); 821 rt->rt_gwroute = NULL; 822 } 823 824 /* 825 * NB: RTF_UP must be set during the search above, 826 * because we might delete the last ref, causing 827 * rt to get freed prematurely. 828 */ 829 rt->rt_flags &= ~RTF_UP; 830 831 #ifdef ROUTE_DEBUG 832 if (route_debug) 833 rt_print(rtinfo, rt); 834 #endif 835 836 /* Give the protocol a chance to keep things in sync. */ 837 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 838 ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo); 839 840 /* 841 * If the caller wants it, then it can have it, 842 * but it's up to it to free the rtentry as we won't be 843 * doing it. 844 */ 845 KASSERT(rt->rt_refcnt >= 0, 846 ("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt)); 847 if (ret_nrt != NULL) { 848 /* leave ref intact for return */ 849 *ret_nrt = rt; 850 } else { 851 /* deref / attempt to destroy */ 852 rtfree(rt); 853 } 854 break; 855 856 case RTM_RESOLVE: 857 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 858 gotoerr(EINVAL); 859 ifa = rt->rt_ifa; 860 rtinfo->rti_flags = 861 rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC); 862 rtinfo->rti_flags |= RTF_WASCLONED; 863 rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway; 864 if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL) 865 rtinfo->rti_flags |= RTF_HOST; 866 goto makeroute; 867 868 case RTM_ADD: 869 KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) || 870 rtinfo->rti_info[RTAX_GATEWAY] != NULL, 871 ("rtrequest: GATEWAY but no gateway")); 872 873 if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo))) 874 gotoerr(error); 875 ifa = rtinfo->rti_ifa; 876 makeroute: 877 R_Malloc(rt, struct rtentry *, sizeof(struct rtentry)); 878 if (rt == NULL) 879 gotoerr(ENOBUFS); 880 bzero(rt, sizeof(struct rtentry)); 881 rt->rt_flags = RTF_UP | rtinfo->rti_flags; 882 error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY]); 883 if (error != 0) { 884 Free(rt); 885 gotoerr(error); 886 } 887 888 ndst = rt_key(rt); 889 if (rtinfo->rti_info[RTAX_NETMASK] != NULL) 890 rt_maskedcopy(dst, ndst, 891 rtinfo->rti_info[RTAX_NETMASK]); 892 else 893 bcopy(dst, ndst, dst->sa_len); 894 895 /* 896 * Note that we now have a reference to the ifa. 897 * This moved from below so that rnh->rnh_addaddr() can 898 * examine the ifa and ifa->ifa_ifp if it so desires. 899 */ 900 IFAREF(ifa); 901 rt->rt_ifa = ifa; 902 rt->rt_ifp = ifa->ifa_ifp; 903 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */ 904 905 rn = rnh->rnh_addaddr((char *)ndst, 906 (char *)rtinfo->rti_info[RTAX_NETMASK], 907 rnh, rt->rt_nodes); 908 if (rn == NULL) { 909 struct rtentry *oldrt; 910 911 /* 912 * We already have one of these in the tree. 913 * We do a special hack: if the old route was 914 * cloned, then we blow it away and try 915 * re-inserting the new one. 916 */ 917 oldrt = rtpurelookup(ndst); 918 if (oldrt != NULL) { 919 --oldrt->rt_refcnt; 920 if (oldrt->rt_flags & RTF_WASCLONED) { 921 rtrequest(RTM_DELETE, rt_key(oldrt), 922 oldrt->rt_gateway, 923 rt_mask(oldrt), 924 oldrt->rt_flags, NULL); 925 rn = rnh->rnh_addaddr((char *)ndst, 926 (char *) 927 rtinfo->rti_info[RTAX_NETMASK], 928 rnh, rt->rt_nodes); 929 } 930 } 931 } 932 933 /* 934 * If it still failed to go into the tree, 935 * then un-make it (this should be a function). 936 */ 937 if (rn == NULL) { 938 if (rt->rt_gwroute != NULL) 939 rtfree(rt->rt_gwroute); 940 IFAFREE(ifa); 941 Free(rt_key(rt)); 942 Free(rt); 943 gotoerr(EEXIST); 944 } 945 946 /* 947 * If we got here from RESOLVE, then we are cloning 948 * so clone the rest, and note that we 949 * are a clone (and increment the parent's references) 950 */ 951 if (req == RTM_RESOLVE) { 952 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 953 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */ 954 if ((*ret_nrt)->rt_flags & 955 (RTF_CLONING | RTF_PRCLONING)) { 956 rt->rt_parent = *ret_nrt; 957 (*ret_nrt)->rt_refcnt++; 958 } 959 } 960 961 /* 962 * if this protocol has something to add to this then 963 * allow it to do that as well. 964 */ 965 if (ifa->ifa_rtrequest != NULL) 966 ifa->ifa_rtrequest(req, rt, rtinfo); 967 968 /* 969 * We repeat the same procedure from rt_setgate() here because 970 * it doesn't fire when we call it there because the node 971 * hasn't been added to the tree yet. 972 */ 973 if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) && 974 rt_mask(rt) != NULL) { 975 struct rtfc_arg arg = { rt, rnh }; 976 977 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt), 978 (char *)rt_mask(rt), 979 rt_fixchange, &arg); 980 } 981 982 #ifdef ROUTE_DEBUG 983 if (route_debug) 984 rt_print(rtinfo, rt); 985 #endif 986 /* 987 * Return the resulting rtentry, 988 * increasing the number of references by one. 989 */ 990 if (ret_nrt != NULL) { 991 rt->rt_refcnt++; 992 *ret_nrt = rt; 993 } 994 break; 995 default: 996 error = EOPNOTSUPP; 997 } 998 bad: 999 #ifdef ROUTE_DEBUG 1000 if (route_debug) { 1001 if (error) 1002 kprintf("rti %p failed error %d\n", rtinfo, error); 1003 else 1004 kprintf("rti %p succeeded\n", rtinfo); 1005 } 1006 #endif 1007 crit_exit(); 1008 return (error); 1009 } 1010 1011 /* 1012 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family'' 1013 * (i.e., the routes related to it by the operation of cloning). This 1014 * routine is iterated over all potential former-child-routes by way of 1015 * rnh->rnh_walktree_from() above, and those that actually are children of 1016 * the late parent (passed in as VP here) are themselves deleted. 1017 */ 1018 static int 1019 rt_fixdelete(struct radix_node *rn, void *vp) 1020 { 1021 struct rtentry *rt = (struct rtentry *)rn; 1022 struct rtentry *rt0 = vp; 1023 1024 if (rt->rt_parent == rt0 && 1025 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) { 1026 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1027 rt->rt_flags, NULL); 1028 } 1029 return 0; 1030 } 1031 1032 /* 1033 * This routine is called from rt_setgate() to do the analogous thing for 1034 * adds and changes. There is the added complication in this case of a 1035 * middle insert; i.e., insertion of a new network route between an older 1036 * network route and (cloned) host routes. For this reason, a simple check 1037 * of rt->rt_parent is insufficient; each candidate route must be tested 1038 * against the (mask, value) of the new route (passed as before in vp) 1039 * to see if the new route matches it. 1040 * 1041 * XXX - it may be possible to do fixdelete() for changes and reserve this 1042 * routine just for adds. I'm not sure why I thought it was necessary to do 1043 * changes this way. 1044 */ 1045 #ifdef DEBUG 1046 static int rtfcdebug = 0; 1047 #endif 1048 1049 static int 1050 rt_fixchange(struct radix_node *rn, void *vp) 1051 { 1052 struct rtentry *rt = (struct rtentry *)rn; 1053 struct rtfc_arg *ap = vp; 1054 struct rtentry *rt0 = ap->rt0; 1055 struct radix_node_head *rnh = ap->rnh; 1056 u_char *xk1, *xm1, *xk2, *xmp; 1057 int i, len, mlen; 1058 1059 #ifdef DEBUG 1060 if (rtfcdebug) 1061 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0); 1062 #endif 1063 1064 if (rt->rt_parent == NULL || 1065 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) { 1066 #ifdef DEBUG 1067 if (rtfcdebug) kprintf("no parent, pinned or cloning\n"); 1068 #endif 1069 return 0; 1070 } 1071 1072 if (rt->rt_parent == rt0) { 1073 #ifdef DEBUG 1074 if (rtfcdebug) kprintf("parent match\n"); 1075 #endif 1076 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1077 rt->rt_flags, NULL); 1078 } 1079 1080 /* 1081 * There probably is a function somewhere which does this... 1082 * if not, there should be. 1083 */ 1084 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len); 1085 1086 xk1 = (u_char *)rt_key(rt0); 1087 xm1 = (u_char *)rt_mask(rt0); 1088 xk2 = (u_char *)rt_key(rt); 1089 1090 /* avoid applying a less specific route */ 1091 xmp = (u_char *)rt_mask(rt->rt_parent); 1092 mlen = rt_key(rt->rt_parent)->sa_len; 1093 if (mlen > rt_key(rt0)->sa_len) { 1094 #ifdef DEBUG 1095 if (rtfcdebug) 1096 kprintf("rt_fixchange: inserting a less " 1097 "specific route\n"); 1098 #endif 1099 return 0; 1100 } 1101 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) { 1102 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) { 1103 #ifdef DEBUG 1104 if (rtfcdebug) 1105 kprintf("rt_fixchange: inserting a less " 1106 "specific route\n"); 1107 #endif 1108 return 0; 1109 } 1110 } 1111 1112 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) { 1113 if ((xk2[i] & xm1[i]) != xk1[i]) { 1114 #ifdef DEBUG 1115 if (rtfcdebug) kprintf("no match\n"); 1116 #endif 1117 return 0; 1118 } 1119 } 1120 1121 /* 1122 * OK, this node is a clone, and matches the node currently being 1123 * changed/added under the node's mask. So, get rid of it. 1124 */ 1125 #ifdef DEBUG 1126 if (rtfcdebug) kprintf("deleting\n"); 1127 #endif 1128 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1129 rt->rt_flags, NULL); 1130 } 1131 1132 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 1133 1134 int 1135 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate) 1136 { 1137 char *space, *oldspace; 1138 int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len); 1139 struct rtentry *rt = rt0; 1140 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family]; 1141 1142 /* 1143 * A host route with the destination equal to the gateway 1144 * will interfere with keeping LLINFO in the routing 1145 * table, so disallow it. 1146 */ 1147 if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) == 1148 (RTF_HOST | RTF_GATEWAY)) && 1149 dst->sa_len == gate->sa_len && 1150 sa_equal(dst, gate)) { 1151 /* 1152 * The route might already exist if this is an RTM_CHANGE 1153 * or a routing redirect, so try to delete it. 1154 */ 1155 if (rt_key(rt0) != NULL) 1156 rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway, 1157 rt_mask(rt0), rt0->rt_flags, NULL); 1158 return EADDRNOTAVAIL; 1159 } 1160 1161 /* 1162 * Both dst and gateway are stored in the same malloc'ed chunk 1163 * (If I ever get my hands on....) 1164 * if we need to malloc a new chunk, then keep the old one around 1165 * till we don't need it any more. 1166 */ 1167 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) { 1168 oldspace = (char *)rt_key(rt); 1169 R_Malloc(space, char *, dlen + glen); 1170 if (space == NULL) 1171 return ENOBUFS; 1172 rt->rt_nodes->rn_key = space; 1173 } else { 1174 space = (char *)rt_key(rt); /* Just use the old space. */ 1175 oldspace = NULL; 1176 } 1177 1178 /* Set the gateway value. */ 1179 rt->rt_gateway = (struct sockaddr *)(space + dlen); 1180 bcopy(gate, rt->rt_gateway, glen); 1181 1182 if (oldspace != NULL) { 1183 /* 1184 * If we allocated a new chunk, preserve the original dst. 1185 * This way, rt_setgate() really just sets the gate 1186 * and leaves the dst field alone. 1187 */ 1188 bcopy(dst, space, dlen); 1189 Free(oldspace); 1190 } 1191 1192 /* 1193 * If there is already a gwroute, it's now almost definitely wrong 1194 * so drop it. 1195 */ 1196 if (rt->rt_gwroute != NULL) { 1197 RTFREE(rt->rt_gwroute); 1198 rt->rt_gwroute = NULL; 1199 } 1200 if (rt->rt_flags & RTF_GATEWAY) { 1201 /* 1202 * Cloning loop avoidance: In the presence of 1203 * protocol-cloning and bad configuration, it is 1204 * possible to get stuck in bottomless mutual recursion 1205 * (rtrequest rt_setgate rtlookup). We avoid this 1206 * by not allowing protocol-cloning to operate for 1207 * gateways (which is probably the correct choice 1208 * anyway), and avoid the resulting reference loops 1209 * by disallowing any route to run through itself as 1210 * a gateway. This is obviously mandatory when we 1211 * get rt->rt_output(). 1212 * 1213 * This breaks TTCP for hosts outside the gateway! XXX JH 1214 */ 1215 rt->rt_gwroute = _rtlookup(gate, RTL_REPORTMSG, RTF_PRCLONING); 1216 if (rt->rt_gwroute == rt) { 1217 rt->rt_gwroute = NULL; 1218 --rt->rt_refcnt; 1219 return EDQUOT; /* failure */ 1220 } 1221 } 1222 1223 /* 1224 * This isn't going to do anything useful for host routes, so 1225 * don't bother. Also make sure we have a reasonable mask 1226 * (we don't yet have one during adds). 1227 */ 1228 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) { 1229 struct rtfc_arg arg = { rt, rnh }; 1230 1231 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt), 1232 (char *)rt_mask(rt), 1233 rt_fixchange, &arg); 1234 } 1235 1236 return 0; 1237 } 1238 1239 static void 1240 rt_maskedcopy( 1241 struct sockaddr *src, 1242 struct sockaddr *dst, 1243 struct sockaddr *netmask) 1244 { 1245 u_char *cp1 = (u_char *)src; 1246 u_char *cp2 = (u_char *)dst; 1247 u_char *cp3 = (u_char *)netmask; 1248 u_char *cplim = cp2 + *cp3; 1249 u_char *cplim2 = cp2 + *cp1; 1250 1251 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1252 cp3 += 2; 1253 if (cplim > cplim2) 1254 cplim = cplim2; 1255 while (cp2 < cplim) 1256 *cp2++ = *cp1++ & *cp3++; 1257 if (cp2 < cplim2) 1258 bzero(cp2, cplim2 - cp2); 1259 } 1260 1261 int 1262 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt) 1263 { 1264 struct rtentry *up_rt, *rt; 1265 1266 if (!(rt0->rt_flags & RTF_UP)) { 1267 up_rt = rtlookup(dst); 1268 if (up_rt == NULL) 1269 return (EHOSTUNREACH); 1270 up_rt->rt_refcnt--; 1271 } else 1272 up_rt = rt0; 1273 if (up_rt->rt_flags & RTF_GATEWAY) { 1274 if (up_rt->rt_gwroute == NULL) { 1275 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway); 1276 if (up_rt->rt_gwroute == NULL) 1277 return (EHOSTUNREACH); 1278 } else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) { 1279 rtfree(up_rt->rt_gwroute); 1280 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway); 1281 if (up_rt->rt_gwroute == NULL) 1282 return (EHOSTUNREACH); 1283 } 1284 rt = up_rt->rt_gwroute; 1285 } else 1286 rt = up_rt; 1287 if (rt->rt_flags & RTF_REJECT && 1288 (rt->rt_rmx.rmx_expire == 0 || /* rt doesn't expire */ 1289 time_second < rt->rt_rmx.rmx_expire)) /* rt not expired */ 1290 return (rt->rt_flags & RTF_HOST ? EHOSTDOWN : EHOSTUNREACH); 1291 *drt = rt; 1292 return 0; 1293 } 1294 1295 #ifdef ROUTE_DEBUG 1296 1297 /* 1298 * Print out a route table entry 1299 */ 1300 void 1301 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn) 1302 { 1303 kprintf("rti %p cpu %d route %p flags %08lx: ", 1304 rtinfo, mycpuid, rn, rn->rt_flags); 1305 sockaddr_print(rt_key(rn)); 1306 kprintf(" mask "); 1307 sockaddr_print(rt_mask(rn)); 1308 kprintf(" gw "); 1309 sockaddr_print(rn->rt_gateway); 1310 kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?"); 1311 kprintf(" ifa %p\n", rn->rt_ifa); 1312 } 1313 1314 void 1315 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti) 1316 { 1317 int didit = 0; 1318 int i; 1319 1320 #ifdef ROUTE_DEBUG 1321 if (cmd == RTM_DELETE && route_debug > 1) 1322 db_print_backtrace(); 1323 #endif 1324 1325 switch(cmd) { 1326 case RTM_ADD: 1327 kprintf("ADD "); 1328 break; 1329 case RTM_RESOLVE: 1330 kprintf("RES "); 1331 break; 1332 case RTM_DELETE: 1333 kprintf("DEL "); 1334 break; 1335 default: 1336 kprintf("C%02d ", cmd); 1337 break; 1338 } 1339 kprintf("rti %p cpu %d ", rti, mycpuid); 1340 for (i = 0; i < rti->rti_addrs; ++i) { 1341 if (rti->rti_info[i] == NULL) 1342 continue; 1343 if (didit) 1344 kprintf(" ,"); 1345 switch(i) { 1346 case RTAX_DST: 1347 kprintf("(DST "); 1348 break; 1349 case RTAX_GATEWAY: 1350 kprintf("(GWY "); 1351 break; 1352 case RTAX_NETMASK: 1353 kprintf("(MSK "); 1354 break; 1355 case RTAX_GENMASK: 1356 kprintf("(GEN "); 1357 break; 1358 case RTAX_IFP: 1359 kprintf("(IFP "); 1360 break; 1361 case RTAX_IFA: 1362 kprintf("(IFA "); 1363 break; 1364 case RTAX_AUTHOR: 1365 kprintf("(AUT "); 1366 break; 1367 case RTAX_BRD: 1368 kprintf("(BRD "); 1369 break; 1370 default: 1371 kprintf("(?%02d ", i); 1372 break; 1373 } 1374 sockaddr_print(rti->rti_info[i]); 1375 kprintf(")"); 1376 didit = 1; 1377 } 1378 kprintf("\n"); 1379 } 1380 1381 void 1382 sockaddr_print(struct sockaddr *sa) 1383 { 1384 struct sockaddr_in *sa4; 1385 struct sockaddr_in6 *sa6; 1386 int len; 1387 int i; 1388 1389 if (sa == NULL) { 1390 kprintf("NULL"); 1391 return; 1392 } 1393 1394 len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]); 1395 1396 switch(sa->sa_family) { 1397 case AF_INET: 1398 case AF_INET6: 1399 default: 1400 switch(sa->sa_family) { 1401 case AF_INET: 1402 sa4 = (struct sockaddr_in *)sa; 1403 kprintf("INET %d %d.%d.%d.%d", 1404 ntohs(sa4->sin_port), 1405 (ntohl(sa4->sin_addr.s_addr) >> 24) & 255, 1406 (ntohl(sa4->sin_addr.s_addr) >> 16) & 255, 1407 (ntohl(sa4->sin_addr.s_addr) >> 8) & 255, 1408 (ntohl(sa4->sin_addr.s_addr) >> 0) & 255 1409 ); 1410 break; 1411 case AF_INET6: 1412 sa6 = (struct sockaddr_in6 *)sa; 1413 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x", 1414 ntohs(sa6->sin6_port), 1415 sa6->sin6_addr.s6_addr16[0], 1416 sa6->sin6_addr.s6_addr16[1], 1417 sa6->sin6_addr.s6_addr16[2], 1418 sa6->sin6_addr.s6_addr16[3], 1419 sa6->sin6_addr.s6_addr16[4], 1420 sa6->sin6_addr.s6_addr16[5], 1421 sa6->sin6_addr.s6_addr16[6], 1422 sa6->sin6_addr.s6_addr16[7] 1423 ); 1424 break; 1425 default: 1426 kprintf("AF%d ", sa->sa_family); 1427 while (len > 0 && sa->sa_data[len-1] == 0) 1428 --len; 1429 1430 for (i = 0; i < len; ++i) { 1431 if (i) 1432 kprintf("."); 1433 kprintf("%d", (unsigned char)sa->sa_data[i]); 1434 } 1435 break; 1436 } 1437 } 1438 } 1439 1440 #endif 1441 1442 /* 1443 * Set up a routing table entry, normally for an interface. 1444 */ 1445 int 1446 rtinit(struct ifaddr *ifa, int cmd, int flags) 1447 { 1448 struct sockaddr *dst, *deldst, *netmask; 1449 struct mbuf *m = NULL; 1450 struct radix_node_head *rnh; 1451 struct radix_node *rn; 1452 struct rt_addrinfo rtinfo; 1453 int error; 1454 1455 if (flags & RTF_HOST) { 1456 dst = ifa->ifa_dstaddr; 1457 netmask = NULL; 1458 } else { 1459 dst = ifa->ifa_addr; 1460 netmask = ifa->ifa_netmask; 1461 } 1462 /* 1463 * If it's a delete, check that if it exists, it's on the correct 1464 * interface or we might scrub a route to another ifa which would 1465 * be confusing at best and possibly worse. 1466 */ 1467 if (cmd == RTM_DELETE) { 1468 /* 1469 * It's a delete, so it should already exist.. 1470 * If it's a net, mask off the host bits 1471 * (Assuming we have a mask) 1472 */ 1473 if (netmask != NULL) { 1474 m = m_get(MB_DONTWAIT, MT_SONAME); 1475 if (m == NULL) 1476 return (ENOBUFS); 1477 deldst = mtod(m, struct sockaddr *); 1478 rt_maskedcopy(dst, deldst, netmask); 1479 dst = deldst; 1480 } 1481 /* 1482 * Look up an rtentry that is in the routing tree and 1483 * contains the correct info. 1484 */ 1485 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL || 1486 (rn = rnh->rnh_lookup((char *)dst, 1487 (char *)netmask, rnh)) == NULL || 1488 ((struct rtentry *)rn)->rt_ifa != ifa || 1489 !sa_equal((struct sockaddr *)rn->rn_key, dst)) { 1490 if (m != NULL) 1491 m_free(m); 1492 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); 1493 } 1494 /* XXX */ 1495 #if 0 1496 else { 1497 /* 1498 * One would think that as we are deleting, and we know 1499 * it doesn't exist, we could just return at this point 1500 * with an "ELSE" clause, but apparently not.. 1501 */ 1502 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); 1503 } 1504 #endif 1505 } 1506 /* 1507 * Do the actual request 1508 */ 1509 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1510 rtinfo.rti_info[RTAX_DST] = dst; 1511 rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1512 rtinfo.rti_info[RTAX_NETMASK] = netmask; 1513 rtinfo.rti_flags = flags | ifa->ifa_flags; 1514 rtinfo.rti_ifa = ifa; 1515 error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa); 1516 if (m != NULL) 1517 m_free(m); 1518 return (error); 1519 } 1520 1521 static void 1522 rtinit_rtrequest_callback(int cmd, int error, 1523 struct rt_addrinfo *rtinfo, struct rtentry *rt, 1524 void *arg) 1525 { 1526 struct ifaddr *ifa = arg; 1527 1528 if (error == 0 && rt) { 1529 if (mycpuid == 0) { 1530 ++rt->rt_refcnt; 1531 rt_newaddrmsg(cmd, ifa, error, rt); 1532 --rt->rt_refcnt; 1533 } 1534 if (cmd == RTM_DELETE) { 1535 if (rt->rt_refcnt == 0) { 1536 ++rt->rt_refcnt; 1537 rtfree(rt); 1538 } 1539 } 1540 } 1541 } 1542 1543 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */ 1544 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0); 1545