1 /* $OpenBSD: route.c,v 1.105 2009/03/15 19:40:41 miod Exp $ */ 2 /* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1980, 1986, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)route.c 8.2 (Berkeley) 11/15/93 62 */ 63 64 /* 65 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 66 * 67 * NRL grants permission for redistribution and use in source and binary 68 * forms, with or without modification, of the software and documentation 69 * created at NRL provided that the following conditions are met: 70 * 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgements: 78 * This product includes software developed by the University of 79 * California, Berkeley and its contributors. 80 * This product includes software developed at the Information 81 * Technology Division, US Naval Research Laboratory. 82 * 4. Neither the name of the NRL nor the names of its contributors 83 * may be used to endorse or promote products derived from this software 84 * without specific prior written permission. 85 * 86 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS 87 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 88 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 89 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR 90 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 91 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 92 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 93 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 94 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 95 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 96 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 * 98 * The views and conclusions contained in the software and documentation 99 * are those of the authors and should not be interpreted as representing 100 * official policies, either expressed or implied, of the US Naval 101 * Research Laboratory (NRL). 102 */ 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/proc.h> 107 #include <sys/mbuf.h> 108 #include <sys/socket.h> 109 #include <sys/socketvar.h> 110 #include <sys/domain.h> 111 #include <sys/protosw.h> 112 #include <sys/ioctl.h> 113 #include <sys/kernel.h> 114 #include <sys/queue.h> 115 #include <sys/pool.h> 116 117 #include <net/if.h> 118 #include <net/route.h> 119 #include <net/raw_cb.h> 120 121 #include <netinet/in.h> 122 #include <netinet/in_var.h> 123 124 #ifdef MPLS 125 #include <netmpls/mpls.h> 126 #endif 127 128 #ifdef IPSEC 129 #include <netinet/ip_ipsp.h> 130 #include <net/if_enc.h> 131 132 struct ifaddr *encap_findgwifa(struct sockaddr *); 133 #endif 134 135 #define SA(p) ((struct sockaddr *)(p)) 136 137 struct route_cb route_cb; 138 struct rtstat rtstat; 139 struct radix_node_head ***rt_tables; 140 u_int8_t af2rtafidx[AF_MAX+1]; 141 u_int8_t rtafidx_max; 142 u_int rtbl_id_max = 0; 143 144 int rttrash; /* routes not in table but not freed */ 145 146 struct pool rtentry_pool; /* pool for rtentry structures */ 147 struct pool rttimer_pool; /* pool for rttimer structures */ 148 149 int rtable_init(struct radix_node_head ***); 150 int okaytoclone(u_int, int); 151 int rtflushclone1(struct radix_node *, void *); 152 void rtflushclone(struct radix_node_head *, struct rtentry *); 153 int rt_if_remove_rtdelete(struct radix_node *, void *); 154 #ifndef SMALL_KERNEL 155 int rt_if_linkstate_change(struct radix_node *, void *); 156 #endif 157 158 #define LABELID_MAX 50000 159 160 struct rt_label { 161 TAILQ_ENTRY(rt_label) rtl_entry; 162 char rtl_name[RTLABEL_LEN]; 163 u_int16_t rtl_id; 164 int rtl_ref; 165 }; 166 167 TAILQ_HEAD(rt_labels, rt_label) rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels); 168 169 #ifdef IPSEC 170 struct ifaddr * 171 encap_findgwifa(struct sockaddr *gw) 172 { 173 return (TAILQ_FIRST(&encif[0].sc_if.if_addrlist)); 174 } 175 #endif 176 177 int 178 rtable_init(struct radix_node_head ***table) 179 { 180 void **p; 181 struct domain *dom; 182 183 if ((p = malloc(sizeof(void *) * (rtafidx_max + 1), M_RTABLE, 184 M_NOWAIT|M_ZERO)) == NULL) 185 return (-1); 186 187 /* 2nd pass: attach */ 188 for (dom = domains; dom != NULL; dom = dom->dom_next) 189 if (dom->dom_rtattach) 190 dom->dom_rtattach(&p[af2rtafidx[dom->dom_family]], 191 dom->dom_rtoffset); 192 193 *table = (struct radix_node_head **)p; 194 return (0); 195 } 196 197 void 198 route_init() 199 { 200 struct domain *dom; 201 202 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", 203 NULL); 204 rn_init(); /* initialize all zeroes, all ones, mask table */ 205 206 bzero(af2rtafidx, sizeof(af2rtafidx)); 207 rtafidx_max = 1; /* must have NULL at index 0, so start at 1 */ 208 209 /* find out how many tables to allocate */ 210 for (dom = domains; dom != NULL; dom = dom->dom_next) 211 if (dom->dom_rtattach) 212 af2rtafidx[dom->dom_family] = rtafidx_max++; 213 214 if (rtable_add(0) == -1) 215 panic("route_init rtable_add"); 216 } 217 218 int 219 rtable_add(u_int id) /* must be called at splsoftnet */ 220 { 221 void *p; 222 223 if (id > RT_TABLEID_MAX) 224 return (-1); 225 226 if (id == 0 || id > rtbl_id_max) { 227 size_t newlen = sizeof(void *) * (id+1); 228 229 if ((p = malloc(newlen, M_RTABLE, M_NOWAIT|M_ZERO)) == NULL) 230 return (-1); 231 if (id > 0) { 232 bcopy(rt_tables, p, sizeof(void *) * (rtbl_id_max+1)); 233 free(rt_tables, M_RTABLE); 234 } 235 rt_tables = p; 236 rtbl_id_max = id; 237 } 238 239 if (rt_tables[id] != NULL) /* already exists */ 240 return (-1); 241 242 return (rtable_init(&rt_tables[id])); 243 } 244 245 int 246 rtable_exists(u_int id) /* verify table with that ID exists */ 247 { 248 if (id > RT_TABLEID_MAX) 249 return (0); 250 251 if (id > rtbl_id_max) 252 return (0); 253 254 if (rt_tables[id] == NULL) /* should not happen */ 255 return (0); 256 257 return (1); 258 } 259 260 #include "pf.h" 261 #if NPF > 0 262 void 263 rtalloc_noclone(struct route *ro, int howstrict) 264 { 265 if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP)) 266 return; /* XXX */ 267 ro->ro_rt = rtalloc2(&ro->ro_dst, 1, howstrict); 268 } 269 270 int 271 okaytoclone(u_int flags, int howstrict) 272 { 273 if (howstrict == ALL_CLONING) 274 return (1); 275 if (howstrict == ONNET_CLONING && !(flags & RTF_GATEWAY)) 276 return (1); 277 return (0); 278 } 279 280 struct rtentry * 281 rtalloc2(struct sockaddr *dst, int report, int howstrict) 282 { 283 struct radix_node_head *rnh; 284 struct rtentry *rt; 285 struct radix_node *rn; 286 struct rtentry *newrt = 0; 287 struct rt_addrinfo info; 288 int s = splnet(), err = 0, msgtype = RTM_MISS; 289 290 bzero(&info, sizeof(info)); 291 info.rti_info[RTAX_DST] = dst; 292 293 rnh = rt_gettable(dst->sa_family, 0); 294 if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) && 295 ((rn->rn_flags & RNF_ROOT) == 0)) { 296 newrt = rt = (struct rtentry *)rn; 297 if (report && (rt->rt_flags & RTF_CLONING) && 298 okaytoclone(rt->rt_flags, howstrict)) { 299 err = rtrequest1(RTM_RESOLVE, &info, RTP_DEFAULT, 300 &newrt, 0); 301 if (err) { 302 newrt = rt; 303 rt->rt_refcnt++; 304 goto miss; 305 } 306 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) { 307 msgtype = RTM_RESOLVE; 308 goto miss; 309 } 310 } else 311 rt->rt_refcnt++; 312 } else { 313 rtstat.rts_unreach++; 314 miss: 315 if (report) { 316 rt_missmsg(msgtype, &info, 0, NULL, err, 0); 317 } 318 } 319 splx(s); 320 return (newrt); 321 } 322 #endif /* NPF > 0 */ 323 324 /* 325 * Packet routing routines. 326 */ 327 void 328 rtalloc(struct route *ro) 329 { 330 if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP)) 331 return; /* XXX */ 332 ro->ro_rt = rtalloc1(&ro->ro_dst, 1, 0); 333 } 334 335 struct rtentry * 336 rtalloc1(struct sockaddr *dst, int report, u_int tableid) 337 { 338 struct radix_node_head *rnh; 339 struct rtentry *rt; 340 struct radix_node *rn; 341 struct rtentry *newrt = 0; 342 struct rt_addrinfo info; 343 int s = splsoftnet(), err = 0, msgtype = RTM_MISS; 344 345 bzero(&info, sizeof(info)); 346 info.rti_info[RTAX_DST] = dst; 347 348 rnh = rt_gettable(dst->sa_family, tableid); 349 if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) && 350 ((rn->rn_flags & RNF_ROOT) == 0)) { 351 newrt = rt = (struct rtentry *)rn; 352 if (report && (rt->rt_flags & RTF_CLONING)) { 353 err = rtrequest1(RTM_RESOLVE, &info, RTP_DEFAULT, 354 &newrt, tableid); 355 if (err) { 356 newrt = rt; 357 rt->rt_refcnt++; 358 goto miss; 359 } 360 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) { 361 msgtype = RTM_RESOLVE; 362 goto miss; 363 } 364 /* Inform listeners of the new route */ 365 bzero(&info, sizeof(info)); 366 info.rti_info[RTAX_DST] = rt_key(rt); 367 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 368 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 369 if (rt->rt_ifp != NULL) { 370 info.rti_info[RTAX_IFP] = 371 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr; 372 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; 373 } 374 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 375 rt->rt_ifp, 0, tableid); 376 } else 377 rt->rt_refcnt++; 378 } else { 379 if (dst->sa_family != PF_KEY) 380 rtstat.rts_unreach++; 381 /* 382 * IP encapsulation does lots of lookups where we don't need nor want 383 * the RTM_MISSes that would be generated. It causes RTM_MISS storms 384 * sent upward breaking user-level routing queries. 385 */ 386 miss: 387 if (report && dst->sa_family != PF_KEY) { 388 bzero((caddr_t)&info, sizeof(info)); 389 info.rti_info[RTAX_DST] = dst; 390 rt_missmsg(msgtype, &info, 0, NULL, err, tableid); 391 } 392 } 393 splx(s); 394 return (newrt); 395 } 396 397 void 398 rtfree(struct rtentry *rt) 399 { 400 struct ifaddr *ifa; 401 402 if (rt == NULL) 403 panic("rtfree"); 404 405 rt->rt_refcnt--; 406 407 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) { 408 if (rt->rt_refcnt == 0 && (rt->rt_nodes->rn_flags & RNF_ACTIVE)) 409 return; /* route still active but currently down */ 410 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 411 panic("rtfree 2"); 412 rttrash--; 413 if (rt->rt_refcnt < 0) { 414 printf("rtfree: %p not freed (neg refs)\n", rt); 415 return; 416 } 417 rt_timer_remove_all(rt); 418 ifa = rt->rt_ifa; 419 if (ifa) 420 IFAFREE(ifa); 421 rtlabel_unref(rt->rt_labelid); 422 #ifdef MPLS 423 if (rt->rt_flags & RTF_MPLS) 424 free(rt->rt_llinfo, M_TEMP); 425 #endif 426 Free(rt_key(rt)); 427 pool_put(&rtentry_pool, rt); 428 } 429 } 430 431 void 432 ifafree(struct ifaddr *ifa) 433 { 434 if (ifa == NULL) 435 panic("ifafree"); 436 if (ifa->ifa_refcnt == 0) 437 free(ifa, M_IFADDR); 438 else 439 ifa->ifa_refcnt--; 440 } 441 442 /* 443 * Force a routing table entry to the specified 444 * destination to go through the given gateway. 445 * Normally called as a result of a routing redirect 446 * message from the network layer. 447 * 448 * N.B.: must be called at splsoftnet 449 */ 450 void 451 rtredirect(struct sockaddr *dst, struct sockaddr *gateway, 452 struct sockaddr *netmask, int flags, struct sockaddr *src, 453 struct rtentry **rtp) 454 { 455 struct rtentry *rt; 456 int error = 0; 457 u_int32_t *stat = NULL; 458 struct rt_addrinfo info; 459 struct ifaddr *ifa; 460 struct ifnet *ifp = NULL; 461 462 splsoftassert(IPL_SOFTNET); 463 464 /* verify the gateway is directly reachable */ 465 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 466 error = ENETUNREACH; 467 goto out; 468 } 469 ifp = ifa->ifa_ifp; 470 rt = rtalloc1(dst, 0, 0); 471 /* 472 * If the redirect isn't from our current router for this dst, 473 * it's either old or wrong. If it redirects us to ourselves, 474 * we have a routing loop, perhaps as a result of an interface 475 * going down recently. 476 */ 477 #define equal(a1, a2) \ 478 ((a1)->sa_len == (a2)->sa_len && \ 479 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) 480 if (!(flags & RTF_DONE) && rt && 481 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 482 error = EINVAL; 483 else if (ifa_ifwithaddr(gateway) != NULL) 484 error = EHOSTUNREACH; 485 if (error) 486 goto done; 487 /* 488 * Create a new entry if we just got back a wildcard entry 489 * or the lookup failed. This is necessary for hosts 490 * which use routing redirects generated by smart gateways 491 * to dynamically build the routing tables. 492 */ 493 if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) 494 goto create; 495 /* 496 * Don't listen to the redirect if it's 497 * for a route to an interface. 498 */ 499 if (rt->rt_flags & RTF_GATEWAY) { 500 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { 501 /* 502 * Changing from route to net => route to host. 503 * Create new route, rather than smashing route to net. 504 */ 505 create: 506 if (rt) 507 rtfree(rt); 508 flags |= RTF_GATEWAY | RTF_DYNAMIC; 509 bzero(&info, sizeof(info)); 510 info.rti_info[RTAX_DST] = dst; 511 info.rti_info[RTAX_GATEWAY] = gateway; 512 info.rti_info[RTAX_NETMASK] = netmask; 513 info.rti_ifa = ifa; 514 info.rti_flags = flags; 515 rt = NULL; 516 error = rtrequest1(RTM_ADD, &info, RTP_DEFAULT, &rt, 0); 517 if (rt != NULL) 518 flags = rt->rt_flags; 519 stat = &rtstat.rts_dynamic; 520 } else { 521 /* 522 * Smash the current notion of the gateway to 523 * this destination. Should check about netmask!!! 524 */ 525 rt->rt_flags |= RTF_MODIFIED; 526 flags |= RTF_MODIFIED; 527 stat = &rtstat.rts_newgateway; 528 rt_setgate(rt, rt_key(rt), gateway, 0); 529 } 530 } else 531 error = EHOSTUNREACH; 532 done: 533 if (rt) { 534 if (rtp && !error) 535 *rtp = rt; 536 else 537 rtfree(rt); 538 } 539 out: 540 if (error) 541 rtstat.rts_badredirect++; 542 else if (stat != NULL) 543 (*stat)++; 544 bzero((caddr_t)&info, sizeof(info)); 545 info.rti_info[RTAX_DST] = dst; 546 info.rti_info[RTAX_GATEWAY] = gateway; 547 info.rti_info[RTAX_NETMASK] = netmask; 548 info.rti_info[RTAX_AUTHOR] = src; 549 rt_missmsg(RTM_REDIRECT, &info, flags, ifp, error, 0); 550 } 551 552 /* 553 * Delete a route and generate a message 554 */ 555 int 556 rtdeletemsg(struct rtentry *rt, u_int tableid) 557 { 558 int error; 559 struct rt_addrinfo info; 560 struct ifnet *ifp; 561 562 /* 563 * Request the new route so that the entry is not actually 564 * deleted. That will allow the information being reported to 565 * be accurate (and consistent with route_output()). 566 */ 567 bzero((caddr_t)&info, sizeof(info)); 568 info.rti_info[RTAX_DST] = rt_key(rt); 569 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 570 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 571 info.rti_flags = rt->rt_flags; 572 ifp = rt->rt_ifp; 573 error = rtrequest1(RTM_DELETE, &info, rt->rt_priority, &rt, tableid); 574 575 rt_missmsg(RTM_DELETE, &info, info.rti_flags, ifp, error, tableid); 576 577 /* Adjust the refcount */ 578 if (error == 0 && rt->rt_refcnt <= 0) { 579 rt->rt_refcnt++; 580 rtfree(rt); 581 } 582 return (error); 583 } 584 585 int 586 rtflushclone1(struct radix_node *rn, void *arg) 587 { 588 struct rtentry *rt, *parent; 589 590 rt = (struct rtentry *)rn; 591 parent = (struct rtentry *)arg; 592 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent) 593 rtdeletemsg(rt, 0); 594 return 0; 595 } 596 597 void 598 rtflushclone(struct radix_node_head *rnh, struct rtentry *parent) 599 { 600 601 #ifdef DIAGNOSTIC 602 if (!parent || (parent->rt_flags & RTF_CLONING) == 0) 603 panic("rtflushclone: called with a non-cloning route"); 604 if (!rnh->rnh_walktree) 605 panic("rtflushclone: no rnh_walktree"); 606 #endif 607 rnh->rnh_walktree(rnh, rtflushclone1, (void *)parent); 608 } 609 610 int 611 rtioctl(u_long req, caddr_t data, struct proc *p) 612 { 613 return (EOPNOTSUPP); 614 } 615 616 struct ifaddr * 617 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway) 618 { 619 struct ifaddr *ifa; 620 621 #ifdef IPSEC 622 /* 623 * If the destination is a PF_KEY address, we'll look 624 * for the existence of a encap interface number or address 625 * in the options list of the gateway. By default, we'll return 626 * enc0. 627 */ 628 if (dst && (dst->sa_family == PF_KEY)) 629 return (encap_findgwifa(gateway)); 630 #endif 631 632 if ((flags & RTF_GATEWAY) == 0) { 633 /* 634 * If we are adding a route to an interface, 635 * and the interface is a pt to pt link 636 * we should search for the destination 637 * as our clue to the interface. Otherwise 638 * we can use the local address. 639 */ 640 ifa = NULL; 641 if (flags & RTF_HOST) 642 ifa = ifa_ifwithdstaddr(dst); 643 if (ifa == NULL) 644 ifa = ifa_ifwithaddr(gateway); 645 } else { 646 /* 647 * If we are adding a route to a remote net 648 * or host, the gateway may still be on the 649 * other end of a pt to pt link. 650 */ 651 ifa = ifa_ifwithdstaddr(gateway); 652 } 653 if (ifa == NULL) 654 ifa = ifa_ifwithnet(gateway); 655 if (ifa == NULL) { 656 struct rtentry *rt = rtalloc1(gateway, 0, 0); 657 if (rt == NULL) 658 return (NULL); 659 rt->rt_refcnt--; 660 /* The gateway must be local if the same address family. */ 661 if ((rt->rt_flags & RTF_GATEWAY) && 662 rt_key(rt)->sa_family == dst->sa_family) 663 return (0); 664 if ((ifa = rt->rt_ifa) == NULL) 665 return (NULL); 666 } 667 if (ifa->ifa_addr->sa_family != dst->sa_family) { 668 struct ifaddr *oifa = ifa; 669 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 670 if (ifa == NULL) 671 ifa = oifa; 672 } 673 return (ifa); 674 } 675 676 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 677 678 int 679 rt_getifa(struct rt_addrinfo *info) 680 { 681 struct ifaddr *ifa; 682 int error = 0; 683 684 /* 685 * ifp may be specified by sockaddr_dl when protocol address 686 * is ambiguous 687 */ 688 if (info->rti_ifp == NULL && info->rti_info[RTAX_IFP] != NULL 689 && info->rti_info[RTAX_IFP]->sa_family == AF_LINK && 690 (ifa = ifa_ifwithnet((struct sockaddr *)info->rti_info[RTAX_IFP])) 691 != NULL) 692 info->rti_ifp = ifa->ifa_ifp; 693 694 if (info->rti_ifa == NULL && info->rti_info[RTAX_IFA] != NULL) 695 info->rti_ifa = ifa_ifwithaddr(info->rti_info[RTAX_IFA]); 696 697 if (info->rti_ifa == NULL) { 698 struct sockaddr *sa; 699 700 if ((sa = info->rti_info[RTAX_IFA]) == NULL) 701 if ((sa = info->rti_info[RTAX_GATEWAY]) == NULL) 702 sa = info->rti_info[RTAX_DST]; 703 704 if (sa != NULL && info->rti_ifp != NULL) 705 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp); 706 else if (info->rti_info[RTAX_DST] != NULL && 707 info->rti_info[RTAX_GATEWAY] != NULL) 708 info->rti_ifa = ifa_ifwithroute(info->rti_flags, 709 info->rti_info[RTAX_DST], 710 info->rti_info[RTAX_GATEWAY]); 711 else if (sa != NULL) 712 info->rti_ifa = ifa_ifwithroute(info->rti_flags, 713 sa, sa); 714 } 715 if ((ifa = info->rti_ifa) != NULL) { 716 if (info->rti_ifp == NULL) 717 info->rti_ifp = ifa->ifa_ifp; 718 } else 719 error = ENETUNREACH; 720 return (error); 721 } 722 723 int 724 rtrequest1(int req, struct rt_addrinfo *info, u_int8_t prio, 725 struct rtentry **ret_nrt, u_int tableid) 726 { 727 int s = splsoftnet(); int error = 0; 728 struct rtentry *rt, *crt; 729 struct radix_node *rn; 730 struct radix_node_head *rnh; 731 struct ifaddr *ifa; 732 struct sockaddr *ndst; 733 struct sockaddr_rtlabel *sa_rl; 734 #ifdef MPLS 735 struct sockaddr_mpls *sa_mpls; 736 #endif 737 #define senderr(x) { error = x ; goto bad; } 738 739 if ((rnh = rt_gettable(info->rti_info[RTAX_DST]->sa_family, tableid)) == 740 NULL) 741 senderr(EAFNOSUPPORT); 742 if (info->rti_flags & RTF_HOST) 743 info->rti_info[RTAX_NETMASK] = NULL; 744 switch (req) { 745 case RTM_DELETE: 746 if ((rn = rnh->rnh_lookup(info->rti_info[RTAX_DST], 747 info->rti_info[RTAX_NETMASK], rnh)) == NULL) 748 senderr(ESRCH); 749 rt = (struct rtentry *)rn; 750 #ifndef SMALL_KERNEL 751 /* 752 * if we got multipath routes, we require users to specify 753 * a matching RTAX_GATEWAY. 754 */ 755 if (rn_mpath_capable(rnh)) { 756 rt = rt_mpath_matchgate(rt, 757 info->rti_info[RTAX_GATEWAY], prio); 758 rn = (struct radix_node *)rt; 759 if (!rt) 760 senderr(ESRCH); 761 } 762 #endif 763 if ((rn = rnh->rnh_deladdr(info->rti_info[RTAX_DST], 764 info->rti_info[RTAX_NETMASK], rnh, rn)) == NULL) 765 senderr(ESRCH); 766 rt = (struct rtentry *)rn; 767 768 /* clean up any cloned children */ 769 if ((rt->rt_flags & RTF_CLONING) != 0) 770 rtflushclone(rnh, rt); 771 772 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 773 panic ("rtrequest delete"); 774 775 if (rt->rt_gwroute) { 776 rt = rt->rt_gwroute; RTFREE(rt); 777 (rt = (struct rtentry *)rn)->rt_gwroute = NULL; 778 } 779 780 if (rt->rt_parent) { 781 rt->rt_parent->rt_refcnt--; 782 rt->rt_parent = NULL; 783 } 784 785 #ifndef SMALL_KERNEL 786 if (rn_mpath_capable(rnh)) { 787 if ((rn = rnh->rnh_lookup(info->rti_info[RTAX_DST], 788 info->rti_info[RTAX_NETMASK], rnh)) != NULL && 789 rn_mpath_next(rn, 0) == NULL) 790 ((struct rtentry *)rn)->rt_flags &= ~RTF_MPATH; 791 } 792 #endif 793 794 rt->rt_flags &= ~RTF_UP; 795 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 796 ifa->ifa_rtrequest(RTM_DELETE, rt, info); 797 rttrash++; 798 799 if (ret_nrt) 800 *ret_nrt = rt; 801 else if (rt->rt_refcnt <= 0) { 802 rt->rt_refcnt++; 803 rtfree(rt); 804 } 805 break; 806 807 case RTM_RESOLVE: 808 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 809 senderr(EINVAL); 810 if ((rt->rt_flags & RTF_CLONING) == 0) 811 senderr(EINVAL); 812 ifa = rt->rt_ifa; 813 info->rti_flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC); 814 info->rti_flags |= RTF_CLONED; 815 info->rti_info[RTAX_GATEWAY] = rt->rt_gateway; 816 if ((info->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL) 817 info->rti_flags |= RTF_HOST; 818 goto makeroute; 819 820 case RTM_ADD: 821 if (info->rti_ifa == 0 && (error = rt_getifa(info))) 822 senderr(error); 823 ifa = info->rti_ifa; 824 makeroute: 825 rt = pool_get(&rtentry_pool, PR_NOWAIT); 826 if (rt == NULL) 827 senderr(ENOBUFS); 828 Bzero(rt, sizeof(*rt)); 829 830 rt->rt_flags = info->rti_flags; 831 832 if (prio == 0) 833 prio = ifa->ifa_ifp->if_priority + RTP_STATIC; 834 rt->rt_priority = prio; /* init routing priority */ 835 #if 0 836 if ((LINK_STATE_IS_UP(ifa->ifa_ifp->if_link_state) || 837 ifa->ifa_ifp->if_link_state == LINK_STATE_UNKNOWN) && 838 ifa->ifa_ifp->if_flags & IFF_UP) 839 rt->rt_flags |= RTF_UP; 840 else { 841 rt->rt_flags &= ~RTF_UP; 842 rt->rt_priority |= RTP_DOWN; 843 } 844 #endif 845 LIST_INIT(&rt->rt_timer); 846 if (rt_setgate(rt, info->rti_info[RTAX_DST], 847 info->rti_info[RTAX_GATEWAY], tableid)) { 848 pool_put(&rtentry_pool, rt); 849 senderr(ENOBUFS); 850 } 851 ndst = rt_key(rt); 852 if (info->rti_info[RTAX_NETMASK] != NULL) { 853 rt_maskedcopy(info->rti_info[RTAX_DST], ndst, 854 info->rti_info[RTAX_NETMASK]); 855 } else 856 Bcopy(info->rti_info[RTAX_DST], ndst, 857 info->rti_info[RTAX_DST]->sa_len); 858 #ifndef SMALL_KERNEL 859 /* do not permit exactly the same dst/mask/gw pair */ 860 if (rn_mpath_capable(rnh) && 861 rt_mpath_conflict(rnh, rt, info->rti_info[RTAX_NETMASK], 862 info->rti_flags & RTF_MPATH)) { 863 if (rt->rt_gwroute) 864 rtfree(rt->rt_gwroute); 865 Free(rt_key(rt)); 866 pool_put(&rtentry_pool, rt); 867 senderr(EEXIST); 868 } 869 #endif 870 871 if (info->rti_info[RTAX_LABEL] != NULL) { 872 sa_rl = (struct sockaddr_rtlabel *) 873 info->rti_info[RTAX_LABEL]; 874 rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label); 875 } 876 877 #ifdef MPLS 878 /* We have to allocate additional space for MPLS infos */ 879 if (info->rti_info[RTAX_SRC] != NULL || 880 info->rti_info[RTAX_DST]->sa_family == AF_MPLS) { 881 struct rt_mpls *rt_mpls; 882 883 sa_mpls = (struct sockaddr_mpls *) 884 info->rti_info[RTAX_SRC]; 885 886 rt->rt_llinfo = (caddr_t)malloc(sizeof(struct rt_mpls), 887 M_TEMP, M_NOWAIT|M_ZERO); 888 889 if (rt->rt_llinfo == NULL) { 890 if (rt->rt_gwroute) 891 rtfree(rt->rt_gwroute); 892 Free(rt_key(rt)); 893 pool_put(&rtentry_pool, rt); 894 senderr(ENOMEM); 895 } 896 897 rt_mpls = (struct rt_mpls *)rt->rt_llinfo; 898 899 if (sa_mpls != NULL) 900 rt_mpls->mpls_label = sa_mpls->smpls_label; 901 902 rt_mpls->mpls_operation = info->rti_mpls; 903 904 /* XXX: set experimental bits */ 905 906 rt->rt_flags |= RTF_MPLS; 907 } 908 #endif 909 910 ifa->ifa_refcnt++; 911 rt->rt_ifa = ifa; 912 rt->rt_ifp = ifa->ifa_ifp; 913 if (req == RTM_RESOLVE) { 914 /* 915 * Copy both metrics and a back pointer to the cloned 916 * route's parent. 917 */ 918 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 919 rt->rt_priority = (*ret_nrt)->rt_priority; 920 rt->rt_parent = *ret_nrt; /* Back ptr. to parent. */ 921 rt->rt_parent->rt_refcnt++; 922 } 923 rn = rnh->rnh_addaddr((caddr_t)ndst, 924 (caddr_t)info->rti_info[RTAX_NETMASK], rnh, rt->rt_nodes, 925 rt->rt_priority); 926 if (rn == NULL && (crt = rtalloc1(ndst, 0, tableid)) != NULL) { 927 /* overwrite cloned route */ 928 if ((crt->rt_flags & RTF_CLONED) != 0) { 929 rtdeletemsg(crt, tableid); 930 rn = rnh->rnh_addaddr((caddr_t)ndst, 931 (caddr_t)info->rti_info[RTAX_NETMASK], 932 rnh, rt->rt_nodes, rt->rt_priority); 933 } 934 RTFREE(crt); 935 } 936 if (rn == 0) { 937 IFAFREE(ifa); 938 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent) 939 rtfree(rt->rt_parent); 940 if (rt->rt_gwroute) 941 rtfree(rt->rt_gwroute); 942 Free(rt_key(rt)); 943 pool_put(&rtentry_pool, rt); 944 senderr(EEXIST); 945 } 946 947 #ifndef SMALL_KERNEL 948 if (rn_mpath_capable(rnh) && 949 (rn = rnh->rnh_lookup(info->rti_info[RTAX_DST], 950 info->rti_info[RTAX_NETMASK], rnh)) != NULL && 951 (rn = rn_mpath_prio(rn, prio)) != NULL) { 952 if (rn_mpath_next(rn, 0) == NULL) 953 ((struct rtentry *)rn)->rt_flags &= ~RTF_MPATH; 954 else 955 ((struct rtentry *)rn)->rt_flags |= RTF_MPATH; 956 } 957 #endif 958 959 if (ifa->ifa_rtrequest) 960 ifa->ifa_rtrequest(req, rt, info); 961 if (ret_nrt) { 962 *ret_nrt = rt; 963 rt->rt_refcnt++; 964 } 965 if ((rt->rt_flags & RTF_CLONING) != 0) { 966 /* clean up any cloned children */ 967 rtflushclone(rnh, rt); 968 } 969 970 if_group_routechange(info->rti_info[RTAX_DST], 971 info->rti_info[RTAX_NETMASK]); 972 break; 973 } 974 bad: 975 splx(s); 976 return (error); 977 } 978 979 int 980 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate, 981 u_int tableid) 982 { 983 caddr_t new, old; 984 int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len); 985 struct rtentry *rt = rt0; 986 987 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) { 988 old = (caddr_t)rt_key(rt); 989 R_Malloc(new, caddr_t, dlen + glen); 990 if (new == NULL) 991 return 1; 992 rt->rt_nodes->rn_key = new; 993 } else { 994 new = rt->rt_nodes->rn_key; 995 old = NULL; 996 } 997 Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen); 998 if (old) { 999 Bcopy(dst, new, dlen); 1000 Free(old); 1001 } 1002 if (rt->rt_gwroute != NULL) { 1003 rt = rt->rt_gwroute; 1004 RTFREE(rt); 1005 rt = rt0; 1006 rt->rt_gwroute = NULL; 1007 } 1008 if (rt->rt_flags & RTF_GATEWAY) { 1009 rt->rt_gwroute = rtalloc1(gate, 1, tableid); 1010 /* 1011 * If we switched gateways, grab the MTU from the new 1012 * gateway route if the current MTU is 0 or greater 1013 * than the MTU of gateway. 1014 * Note that, if the MTU of gateway is 0, we will reset the 1015 * MTU of the route to run PMTUD again from scratch. XXX 1016 */ 1017 if (rt->rt_gwroute && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 1018 rt->rt_rmx.rmx_mtu && 1019 rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) { 1020 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu; 1021 } 1022 } 1023 return (0); 1024 } 1025 1026 void 1027 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, 1028 struct sockaddr *netmask) 1029 { 1030 u_char *cp1 = (u_char *)src; 1031 u_char *cp2 = (u_char *)dst; 1032 u_char *cp3 = (u_char *)netmask; 1033 u_char *cplim = cp2 + *cp3; 1034 u_char *cplim2 = cp2 + *cp1; 1035 1036 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1037 cp3 += 2; 1038 if (cplim > cplim2) 1039 cplim = cplim2; 1040 while (cp2 < cplim) 1041 *cp2++ = *cp1++ & *cp3++; 1042 if (cp2 < cplim2) 1043 bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2)); 1044 } 1045 1046 /* 1047 * Set up a routing table entry, normally 1048 * for an interface. 1049 */ 1050 int 1051 rtinit(struct ifaddr *ifa, int cmd, int flags) 1052 { 1053 struct rtentry *rt; 1054 struct sockaddr *dst, *deldst; 1055 struct mbuf *m = NULL; 1056 struct rtentry *nrt = NULL; 1057 int error; 1058 struct rt_addrinfo info; 1059 struct sockaddr_rtlabel sa_rl; 1060 const char *label; 1061 1062 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr; 1063 if (cmd == RTM_DELETE) { 1064 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) { 1065 m = m_get(M_DONTWAIT, MT_SONAME); 1066 if (m == NULL) 1067 return (ENOBUFS); 1068 deldst = mtod(m, struct sockaddr *); 1069 rt_maskedcopy(dst, deldst, ifa->ifa_netmask); 1070 dst = deldst; 1071 } 1072 if ((rt = rtalloc1(dst, 0, 0)) != NULL) { 1073 rt->rt_refcnt--; 1074 if (rt->rt_ifa != ifa) { 1075 if (m != NULL) 1076 (void) m_free(m); 1077 return (flags & RTF_HOST ? EHOSTUNREACH 1078 : ENETUNREACH); 1079 } 1080 } 1081 } 1082 bzero(&info, sizeof(info)); 1083 info.rti_ifa = ifa; 1084 info.rti_flags = flags | ifa->ifa_flags; 1085 info.rti_info[RTAX_DST] = dst; 1086 if (cmd == RTM_ADD) 1087 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1088 if (ifa->ifa_ifp->if_rtlabelid && 1089 (label = rtlabel_id2name(ifa->ifa_ifp->if_rtlabelid)) != NULL) { 1090 bzero(&sa_rl, sizeof(sa_rl)); 1091 sa_rl.sr_len = sizeof(sa_rl); 1092 sa_rl.sr_family = AF_UNSPEC; 1093 strlcpy(sa_rl.sr_label, label, sizeof(sa_rl.sr_label)); 1094 info.rti_info[RTAX_LABEL] = (struct sockaddr *)&sa_rl; 1095 } 1096 1097 /* 1098 * XXX here, it seems that we are assuming that ifa_netmask is NULL 1099 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate 1100 * variable) when RTF_HOST is 1. still not sure if i can safely 1101 * change it to meet bsdi4 behavior. 1102 */ 1103 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 1104 error = rtrequest1(cmd, &info, RTP_CONNECTED, &nrt, 0); 1105 if (cmd == RTM_DELETE && error == 0 && (rt = nrt) != NULL) { 1106 rt_newaddrmsg(cmd, ifa, error, nrt); 1107 if (rt->rt_refcnt <= 0) { 1108 rt->rt_refcnt++; 1109 rtfree(rt); 1110 } 1111 } 1112 if (cmd == RTM_ADD && error == 0 && (rt = nrt) != NULL) { 1113 rt->rt_refcnt--; 1114 if (rt->rt_ifa != ifa) { 1115 printf("rtinit: wrong ifa (%p) was (%p)\n", 1116 ifa, rt->rt_ifa); 1117 if (rt->rt_ifa->ifa_rtrequest) 1118 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL); 1119 IFAFREE(rt->rt_ifa); 1120 rt->rt_ifa = ifa; 1121 rt->rt_ifp = ifa->ifa_ifp; 1122 ifa->ifa_refcnt++; 1123 if (ifa->ifa_rtrequest) 1124 ifa->ifa_rtrequest(RTM_ADD, rt, NULL); 1125 } 1126 rt_newaddrmsg(cmd, ifa, error, nrt); 1127 } 1128 return (error); 1129 } 1130 1131 /* 1132 * Route timer routines. These routes allow functions to be called 1133 * for various routes at any time. This is useful in supporting 1134 * path MTU discovery and redirect route deletion. 1135 * 1136 * This is similar to some BSDI internal functions, but it provides 1137 * for multiple queues for efficiency's sake... 1138 */ 1139 1140 LIST_HEAD(, rttimer_queue) rttimer_queue_head; 1141 static int rt_init_done = 0; 1142 1143 #define RTTIMER_CALLOUT(r) { \ 1144 if (r->rtt_func != NULL) { \ 1145 (*r->rtt_func)(r->rtt_rt, r); \ 1146 } else { \ 1147 struct rt_addrinfo info; \ 1148 bzero(&info, sizeof(info)); \ 1149 info.rti_info[RTAX_DST] = rt_key(r->rtt_rt); \ 1150 rtrequest1(RTM_DELETE, &info, \ 1151 r->rtt_rt->rt_priority, NULL, 0 /* XXX */); \ 1152 } \ 1153 } 1154 1155 /* 1156 * Some subtle order problems with domain initialization mean that 1157 * we cannot count on this being run from rt_init before various 1158 * protocol initializations are done. Therefore, we make sure 1159 * that this is run when the first queue is added... 1160 */ 1161 1162 void 1163 rt_timer_init() 1164 { 1165 static struct timeout rt_timer_timeout; 1166 1167 if (rt_init_done) 1168 panic("rt_timer_init: already initialized"); 1169 1170 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", 1171 NULL); 1172 1173 LIST_INIT(&rttimer_queue_head); 1174 timeout_set(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout); 1175 timeout_add_sec(&rt_timer_timeout, 1); 1176 rt_init_done = 1; 1177 } 1178 1179 struct rttimer_queue * 1180 rt_timer_queue_create(u_int timeout) 1181 { 1182 struct rttimer_queue *rtq; 1183 1184 if (rt_init_done == 0) 1185 rt_timer_init(); 1186 1187 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq); 1188 if (rtq == NULL) 1189 return (NULL); 1190 Bzero(rtq, sizeof *rtq); 1191 1192 rtq->rtq_timeout = timeout; 1193 rtq->rtq_count = 0; 1194 TAILQ_INIT(&rtq->rtq_head); 1195 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link); 1196 1197 return (rtq); 1198 } 1199 1200 void 1201 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout) 1202 { 1203 rtq->rtq_timeout = timeout; 1204 } 1205 1206 void 1207 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy) 1208 { 1209 struct rttimer *r; 1210 1211 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) { 1212 LIST_REMOVE(r, rtt_link); 1213 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1214 if (destroy) 1215 RTTIMER_CALLOUT(r); 1216 pool_put(&rttimer_pool, r); 1217 if (rtq->rtq_count > 0) 1218 rtq->rtq_count--; 1219 else 1220 printf("rt_timer_queue_destroy: rtq_count reached 0\n"); 1221 } 1222 1223 LIST_REMOVE(rtq, rtq_link); 1224 1225 /* 1226 * Caller is responsible for freeing the rttimer_queue structure. 1227 */ 1228 } 1229 1230 unsigned long 1231 rt_timer_count(struct rttimer_queue *rtq) 1232 { 1233 return (rtq->rtq_count); 1234 } 1235 1236 void 1237 rt_timer_remove_all(struct rtentry *rt) 1238 { 1239 struct rttimer *r; 1240 1241 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) { 1242 LIST_REMOVE(r, rtt_link); 1243 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1244 if (r->rtt_queue->rtq_count > 0) 1245 r->rtt_queue->rtq_count--; 1246 else 1247 printf("rt_timer_remove_all: rtq_count reached 0\n"); 1248 pool_put(&rttimer_pool, r); 1249 } 1250 } 1251 1252 int 1253 rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *, 1254 struct rttimer *), struct rttimer_queue *queue) 1255 { 1256 struct rttimer *r; 1257 long current_time; 1258 1259 current_time = time_uptime; 1260 rt->rt_rmx.rmx_expire = time_second + queue->rtq_timeout; 1261 1262 /* 1263 * If there's already a timer with this action, destroy it before 1264 * we add a new one. 1265 */ 1266 for (r = LIST_FIRST(&rt->rt_timer); r != NULL; 1267 r = LIST_NEXT(r, rtt_link)) { 1268 if (r->rtt_func == func) { 1269 LIST_REMOVE(r, rtt_link); 1270 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1271 if (r->rtt_queue->rtq_count > 0) 1272 r->rtt_queue->rtq_count--; 1273 else 1274 printf("rt_timer_add: rtq_count reached 0\n"); 1275 pool_put(&rttimer_pool, r); 1276 break; /* only one per list, so we can quit... */ 1277 } 1278 } 1279 1280 r = pool_get(&rttimer_pool, PR_NOWAIT); 1281 if (r == NULL) 1282 return (ENOBUFS); 1283 Bzero(r, sizeof(*r)); 1284 1285 r->rtt_rt = rt; 1286 r->rtt_time = current_time; 1287 r->rtt_func = func; 1288 r->rtt_queue = queue; 1289 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link); 1290 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next); 1291 r->rtt_queue->rtq_count++; 1292 1293 return (0); 1294 } 1295 1296 struct radix_node_head * 1297 rt_gettable(sa_family_t af, u_int id) 1298 { 1299 return (rt_tables[id] ? rt_tables[id][af2rtafidx[af]] : NULL); 1300 } 1301 1302 struct radix_node * 1303 rt_lookup(struct sockaddr *dst, struct sockaddr *mask, int tableid) 1304 { 1305 struct radix_node_head *rnh; 1306 1307 if ((rnh = rt_gettable(dst->sa_family, tableid)) == NULL) 1308 return (NULL); 1309 1310 return (rnh->rnh_lookup(dst, mask, rnh)); 1311 } 1312 1313 /* ARGSUSED */ 1314 void 1315 rt_timer_timer(void *arg) 1316 { 1317 struct timeout *to = (struct timeout *)arg; 1318 struct rttimer_queue *rtq; 1319 struct rttimer *r; 1320 long current_time; 1321 int s; 1322 1323 current_time = time_uptime; 1324 1325 s = splsoftnet(); 1326 for (rtq = LIST_FIRST(&rttimer_queue_head); rtq != NULL; 1327 rtq = LIST_NEXT(rtq, rtq_link)) { 1328 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL && 1329 (r->rtt_time + rtq->rtq_timeout) < current_time) { 1330 LIST_REMOVE(r, rtt_link); 1331 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1332 RTTIMER_CALLOUT(r); 1333 pool_put(&rttimer_pool, r); 1334 if (rtq->rtq_count > 0) 1335 rtq->rtq_count--; 1336 else 1337 printf("rt_timer_timer: rtq_count reached 0\n"); 1338 } 1339 } 1340 splx(s); 1341 1342 timeout_add_sec(to, 1); 1343 } 1344 1345 u_int16_t 1346 rtlabel_name2id(char *name) 1347 { 1348 struct rt_label *label, *p = NULL; 1349 u_int16_t new_id = 1; 1350 1351 if (!name[0]) 1352 return (0); 1353 1354 TAILQ_FOREACH(label, &rt_labels, rtl_entry) 1355 if (strcmp(name, label->rtl_name) == 0) { 1356 label->rtl_ref++; 1357 return (label->rtl_id); 1358 } 1359 1360 /* 1361 * to avoid fragmentation, we do a linear search from the beginning 1362 * and take the first free slot we find. if there is none or the list 1363 * is empty, append a new entry at the end. 1364 */ 1365 1366 if (!TAILQ_EMPTY(&rt_labels)) 1367 for (p = TAILQ_FIRST(&rt_labels); p != NULL && 1368 p->rtl_id == new_id; p = TAILQ_NEXT(p, rtl_entry)) 1369 new_id = p->rtl_id + 1; 1370 1371 if (new_id > LABELID_MAX) 1372 return (0); 1373 1374 label = malloc(sizeof(*label), M_TEMP, M_NOWAIT|M_ZERO); 1375 if (label == NULL) 1376 return (0); 1377 strlcpy(label->rtl_name, name, sizeof(label->rtl_name)); 1378 label->rtl_id = new_id; 1379 label->rtl_ref++; 1380 1381 if (p != NULL) /* insert new entry before p */ 1382 TAILQ_INSERT_BEFORE(p, label, rtl_entry); 1383 else /* either list empty or no free slot in between */ 1384 TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry); 1385 1386 return (label->rtl_id); 1387 } 1388 1389 const char * 1390 rtlabel_id2name(u_int16_t id) 1391 { 1392 struct rt_label *label; 1393 1394 TAILQ_FOREACH(label, &rt_labels, rtl_entry) 1395 if (label->rtl_id == id) 1396 return (label->rtl_name); 1397 1398 return (NULL); 1399 } 1400 1401 void 1402 rtlabel_unref(u_int16_t id) 1403 { 1404 struct rt_label *p, *next; 1405 1406 if (id == 0) 1407 return; 1408 1409 for (p = TAILQ_FIRST(&rt_labels); p != NULL; p = next) { 1410 next = TAILQ_NEXT(p, rtl_entry); 1411 if (id == p->rtl_id) { 1412 if (--p->rtl_ref == 0) { 1413 TAILQ_REMOVE(&rt_labels, p, rtl_entry); 1414 free(p, M_TEMP); 1415 } 1416 break; 1417 } 1418 } 1419 } 1420 1421 void 1422 rt_if_remove(struct ifnet *ifp) 1423 { 1424 int i; 1425 struct radix_node_head *rnh; 1426 1427 for (i = 1; i <= AF_MAX; i++) 1428 if ((rnh = rt_gettable(i, 0)) != NULL) 1429 while ((*rnh->rnh_walktree)(rnh, 1430 rt_if_remove_rtdelete, ifp) == EAGAIN) 1431 ; /* nothing */ 1432 } 1433 1434 /* 1435 * Note that deleting a RTF_CLONING route can trigger the 1436 * deletion of more entries, so we need to cancel the walk 1437 * and return EAGAIN. The caller should restart the walk 1438 * as long as EAGAIN is returned. 1439 */ 1440 int 1441 rt_if_remove_rtdelete(struct radix_node *rn, void *vifp) 1442 { 1443 struct ifnet *ifp = vifp; 1444 struct rtentry *rt = (struct rtentry *)rn; 1445 1446 if (rt->rt_ifp == ifp) { 1447 int cloning = (rt->rt_flags & RTF_CLONING); 1448 1449 if (rtdeletemsg(rt, 0) == 0 && cloning) 1450 return (EAGAIN); 1451 } 1452 1453 /* 1454 * XXX There should be no need to check for rt_ifa belonging to this 1455 * interface, because then rt_ifp is set, right? 1456 */ 1457 1458 return (0); 1459 } 1460 1461 #ifndef SMALL_KERNEL 1462 void 1463 rt_if_track(struct ifnet *ifp) 1464 { 1465 struct radix_node_head *rnh; 1466 int i; 1467 u_int tid; 1468 1469 if (rt_tables == NULL) 1470 return; 1471 1472 for (tid = 0; tid <= rtbl_id_max; tid++) { 1473 for (i = 1; i <= AF_MAX; i++) { 1474 if ((rnh = rt_gettable(i, tid)) != NULL) { 1475 if (!rn_mpath_capable(rnh)) 1476 continue; 1477 while ((*rnh->rnh_walktree)(rnh, 1478 rt_if_linkstate_change, ifp) == EAGAIN) 1479 ; /* nothing */ 1480 } 1481 } 1482 } 1483 } 1484 1485 int 1486 rt_if_linkstate_change(struct radix_node *rn, void *arg) 1487 { 1488 struct ifnet *ifp = arg; 1489 struct rtentry *rt = (struct rtentry *)rn; 1490 1491 if (rt->rt_ifp == ifp) { 1492 if ((LINK_STATE_IS_UP(ifp->if_link_state) || 1493 ifp->if_link_state == LINK_STATE_UNKNOWN) && 1494 ifp->if_flags & IFF_UP) { 1495 if (!(rt->rt_flags & RTF_UP)) { 1496 /* bring route up */ 1497 rt->rt_flags |= RTF_UP; 1498 rn_mpath_reprio(rn, rt->rt_priority & RTP_MASK); 1499 } 1500 } else { 1501 if (rt->rt_flags & RTF_UP) { 1502 /* take route done */ 1503 rt->rt_flags &= ~RTF_UP; 1504 rn_mpath_reprio(rn, rt->rt_priority | RTP_DOWN); 1505 } 1506 } 1507 if_group_routechange(rt_key(rt), rt_mask(rt)); 1508 } 1509 1510 return (0); 1511 } 1512 #endif 1513