1 /* $NetBSD: table.c,v 1.2 1996/08/10 01:29:59 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1983, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #if !defined(lint) && !defined(sgi) 37 #if 0 38 static char sccsid[] = "@(#)tables.c 8.1 (Berkeley) 6/5/93"; 39 #else 40 static char rcsid[] = "$NetBSD: table.c,v 1.2 1996/08/10 01:29:59 thorpej Exp $"; 41 #endif 42 #endif /* not lint */ 43 44 #include "defs.h" 45 46 static struct rt_spare *rts_better(struct rt_entry *); 47 48 struct radix_node_head *rhead; /* root of the radix tree */ 49 50 int need_flash = 1; /* flash update needed 51 * start =1 to suppress the 1st 52 */ 53 54 struct timeval age_timer; /* next check of old routes */ 55 struct timeval need_kern = { /* need to update kernel table */ 56 EPOCH+MIN_WAITTIME-1 57 }; 58 59 int stopint; 60 61 naddr age_bad_gate; 62 63 64 /* It is desirable to "aggregate" routes, to combine differing routes of 65 * the same metric and next hop into a common route with a smaller netmask 66 * or to suppress redundant routes, routes that add no information to 67 * routes with smaller netmasks. 68 * 69 * A route is redundant if and only if any and all routes with smaller 70 * but matching netmasks and nets are the same. Since routes are 71 * kept sorted in the radix tree, redundant routes always come second. 72 * 73 * There are two kinds of aggregations. First, two routes of the same bit 74 * mask and differing only in the least significant bit of the network 75 * number can be combined into a single route with a coarser mask. 76 * 77 * Second, a route can be suppressed in favor of another route with a more 78 * coarse mask provided no incompatible routes with intermediate masks 79 * are present. The second kind of aggregation involves suppressing routes. 80 * A route must not be suppressed if an incompatible route exists with 81 * an intermediate mask, since the suppressed route would be covered 82 * by the intermediate. 83 * 84 * This code relies on the radix tree walk encountering routes 85 * sorted first by address, with the smallest address first. 86 */ 87 88 struct ag_info ag_slots[NUM_AG_SLOTS], *ag_avail, *ag_corsest, *ag_finest; 89 90 /* #define DEBUG_AG */ 91 #ifdef DEBUG_AG 92 #define CHECK_AG() {int acnt = 0; struct ag_info *cag; \ 93 for (cag = ag_avail; cag != 0; cag = cag->ag_fine) \ 94 acnt++; \ 95 for (cag = ag_corsest; cag != 0; cag = cag->ag_fine) \ 96 acnt++; \ 97 if (acnt != NUM_AG_SLOTS) { \ 98 (void)fflush(stderr); \ 99 abort(); \ 100 } \ 101 } 102 #else 103 #define CHECK_AG() 104 #endif 105 106 107 /* Output the contents of an aggregation table slot. 108 * This function must always be immediately followed with the deletion 109 * of the target slot. 110 */ 111 static void 112 ag_out(struct ag_info *ag, 113 void (*out)(struct ag_info *)) 114 { 115 struct ag_info *ag_cors; 116 naddr bit; 117 118 119 /* If we output both the even and odd twins, then the immediate parent, 120 * if it is present, is redundant, unless the parent manages to 121 * aggregate into something coarser. 122 * On successive calls, this code detects the even and odd twins, 123 * and marks the parent. 124 * 125 * Note that the order in which the radix tree code emits routes 126 * ensures that the twins are seen before the parent is emitted. 127 */ 128 ag_cors = ag->ag_cors; 129 if (ag_cors != 0 130 && ag_cors->ag_mask == ag->ag_mask<<1 131 && ag_cors->ag_dst_h == (ag->ag_dst_h & ag_cors->ag_mask)) { 132 ag_cors->ag_state |= ((ag_cors->ag_dst_h == ag->ag_dst_h) 133 ? AGS_REDUN0 134 : AGS_REDUN1); 135 } 136 137 /* Skip it if this route is itself redundant. 138 * 139 * It is ok to change the contents of the slot here, since it is 140 * always deleted next. 141 */ 142 if (ag->ag_state & AGS_REDUN0) { 143 if (ag->ag_state & AGS_REDUN1) 144 return; 145 bit = (-ag->ag_mask) >> 1; 146 ag->ag_dst_h |= bit; 147 ag->ag_mask |= bit; 148 149 } else if (ag->ag_state & AGS_REDUN1) { 150 bit = (-ag->ag_mask) >> 1; 151 ag->ag_mask |= bit; 152 } 153 out(ag); 154 } 155 156 157 static void 158 ag_del(struct ag_info *ag) 159 { 160 CHECK_AG(); 161 162 if (ag->ag_cors == 0) 163 ag_corsest = ag->ag_fine; 164 else 165 ag->ag_cors->ag_fine = ag->ag_fine; 166 167 if (ag->ag_fine == 0) 168 ag_finest = ag->ag_cors; 169 else 170 ag->ag_fine->ag_cors = ag->ag_cors; 171 172 ag->ag_fine = ag_avail; 173 ag_avail = ag; 174 175 CHECK_AG(); 176 } 177 178 179 /* Flush routes waiting for aggretation. 180 * This must not suppress a route unless it is known that among all 181 * routes with coarser masks that match it, the one with the longest 182 * mask is appropriate. This is ensured by scanning the routes 183 * in lexical order, and with the most restritive mask first 184 * among routes to the same destination. 185 */ 186 void 187 ag_flush(naddr lim_dst_h, /* flush routes to here */ 188 naddr lim_mask, /* matching this mask */ 189 void (*out)(struct ag_info *)) 190 { 191 struct ag_info *ag, *ag_cors; 192 naddr dst_h; 193 194 195 for (ag = ag_finest; 196 ag != 0 && ag->ag_mask >= lim_mask; 197 ag = ag_cors) { 198 ag_cors = ag->ag_cors; 199 200 /* work on only the specified routes */ 201 dst_h = ag->ag_dst_h; 202 if ((dst_h & lim_mask) != lim_dst_h) 203 continue; 204 205 if (!(ag->ag_state & AGS_SUPPRESS)) 206 ag_out(ag, out); 207 208 else for ( ; ; ag_cors = ag_cors->ag_cors) { 209 /* Look for a route that can suppress the 210 * current route */ 211 if (ag_cors == 0) { 212 /* failed, so output it and look for 213 * another route to work on 214 */ 215 ag_out(ag, out); 216 break; 217 } 218 219 if ((dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h) { 220 /* We found a route with a coarser mask that 221 * aggregates the current target. 222 * 223 * If it has a different next hop, it 224 * cannot replace the target, so output 225 * the target. 226 */ 227 if (ag->ag_gate != ag_cors->ag_gate 228 && !(ag->ag_state & AGS_FINE_GATE) 229 && !(ag_cors->ag_state & AGS_CORS_GATE)) { 230 ag_out(ag, out); 231 break; 232 } 233 234 /* If the coarse route has a good enough 235 * metric, it suppresses the target. 236 */ 237 if (ag_cors->ag_pref <= ag->ag_pref) { 238 if (ag_cors->ag_seqno > ag->ag_seqno) 239 ag_cors->ag_seqno = ag->ag_seqno; 240 if (AG_IS_REDUN(ag->ag_state) 241 && ag_cors->ag_mask==ag->ag_mask<<1) { 242 if (ag_cors->ag_dst_h == dst_h) 243 ag_cors->ag_state |= AGS_REDUN0; 244 else 245 ag_cors->ag_state |= AGS_REDUN1; 246 } 247 if (ag->ag_tag != ag_cors->ag_tag) 248 ag_cors->ag_tag = 0; 249 if (ag->ag_nhop != ag_cors->ag_nhop) 250 ag_cors->ag_nhop = 0; 251 break; 252 } 253 } 254 } 255 256 /* That route has either been output or suppressed */ 257 ag_cors = ag->ag_cors; 258 ag_del(ag); 259 } 260 261 CHECK_AG(); 262 } 263 264 265 /* Try to aggregate a route with previous routes. 266 */ 267 void 268 ag_check(naddr dst, 269 naddr mask, 270 naddr gate, 271 naddr nhop, 272 char metric, 273 char pref, 274 u_int seqno, 275 u_short tag, 276 u_short state, 277 void (*out)(struct ag_info *)) /* output using this */ 278 { 279 struct ag_info *ag, *nag, *ag_cors; 280 naddr xaddr; 281 int x; 282 283 NTOHL(dst); 284 285 /* Punt non-contiguous subnet masks. 286 * 287 * (X & -X) contains a single bit if and only if X is a power of 2. 288 * (X + (X & -X)) == 0 if and only if X is a power of 2. 289 */ 290 if ((mask & -mask) + mask != 0) { 291 struct ag_info nc_ag; 292 293 nc_ag.ag_dst_h = dst; 294 nc_ag.ag_mask = mask; 295 nc_ag.ag_gate = gate; 296 nc_ag.ag_nhop = nhop; 297 nc_ag.ag_metric = metric; 298 nc_ag.ag_pref = pref; 299 nc_ag.ag_tag = tag; 300 nc_ag.ag_state = state; 301 nc_ag.ag_seqno = seqno; 302 out(&nc_ag); 303 return; 304 } 305 306 /* Search for the right slot in the aggregation table. 307 */ 308 ag_cors = 0; 309 ag = ag_corsest; 310 while (ag != 0) { 311 if (ag->ag_mask >= mask) 312 break; 313 314 /* Suppress old routes (i.e. combine with compatible routes 315 * with coarser masks) as we look for the right slot in the 316 * aggregation table for the new route. 317 * A route to an address less than the current destination 318 * will not be affected by the current route or any route 319 * seen hereafter. That means it is safe to suppress it. 320 * This check keeps poor routes (eg. with large hop counts) 321 * from preventing suppresion of finer routes. 322 */ 323 if (ag_cors != 0 324 && ag->ag_dst_h < dst 325 && (ag->ag_state & AGS_SUPPRESS) 326 && ag_cors->ag_pref <= ag->ag_pref 327 && (ag->ag_dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h 328 && (ag_cors->ag_gate == ag->ag_gate 329 || (ag->ag_state & AGS_FINE_GATE) 330 || (ag_cors->ag_state & AGS_CORS_GATE))) { 331 if (ag_cors->ag_seqno > ag->ag_seqno) 332 ag_cors->ag_seqno = ag->ag_seqno; 333 if (AG_IS_REDUN(ag->ag_state) 334 && ag_cors->ag_mask==ag->ag_mask<<1) { 335 if (ag_cors->ag_dst_h == dst) 336 ag_cors->ag_state |= AGS_REDUN0; 337 else 338 ag_cors->ag_state |= AGS_REDUN1; 339 } 340 if (ag->ag_tag != ag_cors->ag_tag) 341 ag_cors->ag_tag = 0; 342 if (ag->ag_nhop != ag_cors->ag_nhop) 343 ag_cors->ag_nhop = 0; 344 ag_del(ag); 345 CHECK_AG(); 346 } else { 347 ag_cors = ag; 348 } 349 ag = ag_cors->ag_fine; 350 } 351 352 /* If we find the even/odd twin of the new route, and if the 353 * masks and so forth are equal, we can aggregate them. 354 * We can probably promote one of the pair. 355 * 356 * Since the routes are encountered in lexical order, 357 * the new route must be odd. However, the second or later 358 * times around this loop, it could be the even twin promoted 359 * from the even/odd pair of twins of the finer route. 360 */ 361 while (ag != 0 362 && ag->ag_mask == mask 363 && ((ag->ag_dst_h ^ dst) & (mask<<1)) == 0) { 364 365 /* Here we know the target route and the route in the current 366 * slot have the same netmasks and differ by at most the 367 * last bit. They are either for the same destination, or 368 * for an even/odd pair of destinations. 369 */ 370 if (ag->ag_dst_h == dst) { 371 /* We have two routes to the same destination. 372 * Routes are encountered in lexical order, so a 373 * route is never promoted until the parent route is 374 * already present. So we know that the new route is 375 * a promoted pair and the route already in the slot 376 * is the explicit route. 377 * 378 * Prefer the best route if their metrics differ, 379 * or the promoted one if not, following a sort 380 * of longest-match rule. 381 */ 382 if (pref <= ag->ag_pref) { 383 ag->ag_gate = gate; 384 ag->ag_nhop = nhop; 385 ag->ag_tag = tag; 386 ag->ag_metric = metric; 387 ag->ag_pref = pref; 388 x = ag->ag_state; 389 ag->ag_state = state; 390 state = x; 391 } 392 393 /* The sequence number controls flash updating, 394 * and should be the smaller of the two. 395 */ 396 if (ag->ag_seqno > seqno) 397 ag->ag_seqno = seqno; 398 399 /* some bits are set if they are set on either route */ 400 ag->ag_state |= (state & (AGS_PROMOTE_EITHER 401 | AGS_REDUN0 | AGS_REDUN1)); 402 return; 403 } 404 405 /* If one of the routes can be promoted and the other can 406 * be suppressed, it may be possible to combine them or 407 * worthwhile to promote one. 408 * 409 * Note that any route that can be promoted is always 410 * marked to be eligible to be suppressed. 411 */ 412 if (!((state & AGS_PROMOTE) 413 && (ag->ag_state & AGS_SUPPRESS)) 414 && !((ag->ag_state & AGS_PROMOTE) 415 && (state & AGS_SUPPRESS))) 416 break; 417 418 /* A pair of even/odd twin routes can be combined 419 * if either is redundant, or if they are via the 420 * same gateway and have the same metric. 421 */ 422 if (AG_IS_REDUN(ag->ag_state) 423 || AG_IS_REDUN(state) 424 || (ag->ag_gate == gate 425 && ag->ag_pref == pref 426 && (state & ag->ag_state & AGS_PROMOTE) != 0)) { 427 428 /* We have both the even and odd pairs. 429 * Since the routes are encountered in order, 430 * the route in the slot must be the even twin. 431 * 432 * Combine and promote the pair of routes. 433 */ 434 if (seqno > ag->ag_seqno) 435 seqno = ag->ag_seqno; 436 if (!AG_IS_REDUN(state)) 437 state &= ~AGS_REDUN1; 438 if (AG_IS_REDUN(ag->ag_state)) 439 state |= AGS_REDUN0; 440 else 441 state &= ~AGS_REDUN0; 442 state |= (ag->ag_state & AGS_PROMOTE_EITHER); 443 if (ag->ag_tag != tag) 444 tag = 0; 445 if (ag->ag_nhop != nhop) 446 nhop = 0; 447 448 /* Get rid of the even twin that was already 449 * in the slot. 450 */ 451 ag_del(ag); 452 453 } else if (ag->ag_pref >= pref 454 && (ag->ag_state & AGS_PROMOTE)) { 455 /* If we cannot combine the pair, maybe the route 456 * with the worse metric can be promoted. 457 * 458 * Promote the old, even twin, by giving its slot 459 * in the table to the new, odd twin. 460 */ 461 ag->ag_dst_h = dst; 462 463 xaddr = ag->ag_gate; 464 ag->ag_gate = gate; 465 gate = xaddr; 466 467 xaddr = ag->ag_nhop; 468 ag->ag_nhop = nhop; 469 nhop = xaddr; 470 471 x = ag->ag_tag; 472 ag->ag_tag = tag; 473 tag = x; 474 475 x = ag->ag_state; 476 ag->ag_state = state; 477 state = x; 478 if (!AG_IS_REDUN(state)) 479 state &= ~AGS_REDUN0; 480 481 x = ag->ag_metric; 482 ag->ag_metric = metric; 483 metric = x; 484 485 x = ag->ag_pref; 486 ag->ag_pref = pref; 487 pref = x; 488 489 if (seqno >= ag->ag_seqno) 490 seqno = ag->ag_seqno; 491 else 492 ag->ag_seqno = seqno; 493 494 } else { 495 if (!(state & AGS_PROMOTE)) 496 break; /* cannot promote either twin */ 497 498 /* promote the new, odd twin by shaving its 499 * mask and address. 500 */ 501 if (seqno > ag->ag_seqno) 502 seqno = ag->ag_seqno; 503 else 504 ag->ag_seqno = seqno; 505 if (!AG_IS_REDUN(state)) 506 state &= ~AGS_REDUN1; 507 } 508 509 mask <<= 1; 510 dst &= mask; 511 512 if (ag_cors == 0) { 513 ag = ag_corsest; 514 break; 515 } 516 ag = ag_cors; 517 ag_cors = ag->ag_cors; 518 } 519 520 /* When we can no longer promote and combine routes, 521 * flush the old route in the target slot. Also flush 522 * any finer routes that we know will never be aggregated by 523 * the new route. 524 * 525 * In case we moved toward coarser masks, 526 * get back where we belong 527 */ 528 if (ag != 0 529 && ag->ag_mask < mask) { 530 ag_cors = ag; 531 ag = ag->ag_fine; 532 } 533 534 /* Empty the target slot 535 */ 536 if (ag != 0 && ag->ag_mask == mask) { 537 ag_flush(ag->ag_dst_h, ag->ag_mask, out); 538 ag = (ag_cors == 0) ? ag_corsest : ag_cors->ag_fine; 539 } 540 541 #ifdef DEBUG_AG 542 (void)fflush(stderr); 543 if (ag == 0 && ag_cors != ag_finest) 544 abort(); 545 if (ag_cors == 0 && ag != ag_corsest) 546 abort(); 547 if (ag != 0 && ag->ag_cors != ag_cors) 548 abort(); 549 if (ag_cors != 0 && ag_cors->ag_fine != ag) 550 abort(); 551 CHECK_AG(); 552 #endif 553 554 /* Save the new route on the end of the table. 555 */ 556 nag = ag_avail; 557 ag_avail = nag->ag_fine; 558 559 nag->ag_dst_h = dst; 560 nag->ag_mask = mask; 561 nag->ag_gate = gate; 562 nag->ag_nhop = nhop; 563 nag->ag_metric = metric; 564 nag->ag_pref = pref; 565 nag->ag_tag = tag; 566 nag->ag_state = state; 567 nag->ag_seqno = seqno; 568 569 nag->ag_fine = ag; 570 if (ag != 0) 571 ag->ag_cors = nag; 572 else 573 ag_finest = nag; 574 nag->ag_cors = ag_cors; 575 if (ag_cors == 0) 576 ag_corsest = nag; 577 else 578 ag_cors->ag_fine = nag; 579 CHECK_AG(); 580 } 581 582 583 static char * 584 rtm_type_name(u_char type) 585 { 586 static char *rtm_types[] = { 587 "RTM_ADD", 588 "RTM_DELETE", 589 "RTM_CHANGE", 590 "RTM_GET", 591 "RTM_LOSING", 592 "RTM_REDIRECT", 593 "RTM_MISS", 594 "RTM_LOCK", 595 "RTM_OLDADD", 596 "RTM_OLDDEL", 597 "RTM_RESOLVE", 598 "RTM_NEWADDR", 599 "RTM_DELADDR", 600 "RTM_IFINFO" 601 }; 602 static char name0[10]; 603 604 605 if (type > sizeof(rtm_types)/sizeof(rtm_types[0]) 606 || type == 0) { 607 sprintf(name0, "RTM type %#x", type); 608 return name0; 609 } else { 610 return rtm_types[type-1]; 611 } 612 } 613 614 615 /* Trim a mask in a sockaddr 616 * Produce a length of 0 for an address of 0. 617 * Otherwise produce the index of the first zero byte. 618 */ 619 void 620 #ifdef _HAVE_SIN_LEN 621 masktrim(struct sockaddr_in *ap) 622 #else 623 masktrim(struct sockaddr_in_new *ap) 624 #endif 625 { 626 register char *cp; 627 628 if (ap->sin_addr.s_addr == 0) { 629 ap->sin_len = 0; 630 return; 631 } 632 cp = (char *)(&ap->sin_addr.s_addr+1); 633 while (*--cp == 0) 634 continue; 635 ap->sin_len = cp - (char*)ap + 1; 636 } 637 638 639 /* Tell the kernel to add, delete or change a route 640 */ 641 static void 642 rtioctl(int action, /* RTM_DELETE, etc */ 643 naddr dst, 644 naddr gate, 645 naddr mask, 646 int metric, 647 int flags) 648 { 649 struct { 650 struct rt_msghdr w_rtm; 651 struct sockaddr_in w_dst; 652 struct sockaddr_in w_gate; 653 #ifdef _HAVE_SA_LEN 654 struct sockaddr_in w_mask; 655 #else 656 struct sockaddr_in_new w_mask; 657 #endif 658 } w; 659 long cc; 660 661 again: 662 bzero(&w, sizeof(w)); 663 w.w_rtm.rtm_msglen = sizeof(w); 664 w.w_rtm.rtm_version = RTM_VERSION; 665 w.w_rtm.rtm_type = action; 666 w.w_rtm.rtm_flags = flags; 667 w.w_rtm.rtm_seq = ++rt_sock_seqno; 668 w.w_rtm.rtm_addrs = RTA_DST|RTA_GATEWAY; 669 if (metric != 0) { 670 w.w_rtm.rtm_rmx.rmx_hopcount = metric; 671 w.w_rtm.rtm_inits |= RTV_HOPCOUNT; 672 } 673 w.w_dst.sin_family = AF_INET; 674 w.w_dst.sin_addr.s_addr = dst; 675 w.w_gate.sin_family = AF_INET; 676 w.w_gate.sin_addr.s_addr = gate; 677 #ifdef _HAVE_SA_LEN 678 w.w_dst.sin_len = sizeof(w.w_dst); 679 w.w_gate.sin_len = sizeof(w.w_gate); 680 #endif 681 if (mask == HOST_MASK) { 682 w.w_rtm.rtm_flags |= RTF_HOST; 683 w.w_rtm.rtm_msglen -= sizeof(w.w_mask); 684 } else { 685 w.w_rtm.rtm_addrs |= RTA_NETMASK; 686 w.w_mask.sin_addr.s_addr = htonl(mask); 687 #ifdef _HAVE_SA_LEN 688 masktrim(&w.w_mask); 689 if (w.w_mask.sin_len == 0) 690 w.w_mask.sin_len = sizeof(long); 691 w.w_rtm.rtm_msglen -= (sizeof(w.w_mask) - w.w_mask.sin_len); 692 #endif 693 } 694 #ifndef NO_INSTALL 695 cc = write(rt_sock, &w, w.w_rtm.rtm_msglen); 696 if (cc == w.w_rtm.rtm_msglen) 697 return; 698 if (cc < 0) { 699 if (errno == ESRCH 700 && (action == RTM_CHANGE || action == RTM_DELETE)) { 701 trace_act("route to %s disappeared before %s\n", 702 addrname(dst, mask, 0), 703 rtm_type_name(action)); 704 if (action == RTM_CHANGE) { 705 action = RTM_ADD; 706 goto again; 707 } 708 return; 709 } 710 msglog("write(rt_sock) %s %s --> %s: %s", 711 rtm_type_name(action), 712 addrname(dst, mask, 0), naddr_ntoa(gate), 713 strerror(errno)); 714 } else { 715 msglog("write(rt_sock) wrote %d instead of %d", 716 cc, w.w_rtm.rtm_msglen); 717 } 718 #endif 719 } 720 721 722 #define KHASH_SIZE 71 /* should be prime */ 723 #define KHASH(a,m) khash_bins[((a) ^ (m)) % KHASH_SIZE] 724 static struct khash { 725 struct khash *k_next; 726 naddr k_dst; 727 naddr k_mask; 728 naddr k_gate; 729 short k_metric; 730 u_short k_state; 731 #define KS_NEW 0x001 732 #define KS_DELETE 0x002 733 #define KS_ADD 0x004 /* add to the kernel */ 734 #define KS_CHANGE 0x008 /* tell kernel to change the route */ 735 #define KS_DEL_ADD 0x010 /* delete & add to change the kernel */ 736 #define KS_STATIC 0x020 /* Static flag in kernel */ 737 #define KS_GATEWAY 0x040 /* G flag in kernel */ 738 #define KS_DYNAMIC 0x080 /* result of redirect */ 739 #define KS_DELETED 0x100 /* already deleted */ 740 time_t k_keep; 741 #define K_KEEP_LIM 30 742 time_t k_redirect_time; 743 } *khash_bins[KHASH_SIZE]; 744 745 746 static struct khash* 747 kern_find(naddr dst, naddr mask, struct khash ***ppk) 748 { 749 struct khash *k, **pk; 750 751 for (pk = &KHASH(dst,mask); (k = *pk) != 0; pk = &k->k_next) { 752 if (k->k_dst == dst && k->k_mask == mask) 753 break; 754 } 755 if (ppk != 0) 756 *ppk = pk; 757 return k; 758 } 759 760 761 static struct khash* 762 kern_add(naddr dst, naddr mask) 763 { 764 struct khash *k, **pk; 765 766 k = kern_find(dst, mask, &pk); 767 if (k != 0) 768 return k; 769 770 k = (struct khash *)malloc(sizeof(*k)); 771 772 bzero(k, sizeof(*k)); 773 k->k_dst = dst; 774 k->k_mask = mask; 775 k->k_state = KS_NEW; 776 k->k_redirect_time = now.tv_sec; 777 k->k_keep = now.tv_sec; 778 *pk = k; 779 780 return k; 781 } 782 783 784 /* If it has a non-zero metric, check that it is still in the table, not 785 * having been deleted by interfaces coming and going. 786 */ 787 static void 788 kern_check_static(struct khash *k, 789 struct interface *ifp) 790 { 791 struct rt_entry *rt; 792 naddr int_addr; 793 794 if (k->k_metric == 0) 795 return; 796 797 int_addr = (ifp != 0) ? ifp->int_addr : loopaddr; 798 799 rt = rtget(k->k_dst, k->k_mask); 800 if (rt != 0) { 801 if (!(rt->rt_state & RS_STATIC)) 802 rtchange(rt, rt->rt_state | RS_STATIC, 803 k->k_gate, int_addr, 804 k->k_metric, 0, ifp, now.tv_sec, 0); 805 } else { 806 rtadd(k->k_dst, k->k_mask, k->k_gate, int_addr, 807 k->k_metric, 0, RS_STATIC, ifp); 808 } 809 } 810 811 812 /* add a route the kernel told us 813 */ 814 static void 815 rtm_add(struct rt_msghdr *rtm, 816 struct rt_addrinfo *info, 817 time_t keep) 818 { 819 struct khash *k; 820 struct interface *ifp; 821 naddr mask; 822 823 824 if (rtm->rtm_flags & RTF_HOST) { 825 mask = HOST_MASK; 826 } else if (INFO_MASK(info) != 0) { 827 mask = ntohl(S_ADDR(INFO_MASK(info))); 828 } else { 829 msglog("punt %s without mask", 830 rtm_type_name(rtm->rtm_type)); 831 return; 832 } 833 834 if (INFO_GATE(info) == 0 835 || INFO_GATE(info)->sa_family != AF_INET) { 836 msglog("punt %s without gateway", 837 rtm_type_name(rtm->rtm_type)); 838 return; 839 } 840 841 k = kern_add(S_ADDR(INFO_DST(info)), mask); 842 if (k->k_state & KS_NEW) 843 k->k_keep = now.tv_sec+keep; 844 k->k_gate = S_ADDR(INFO_GATE(info)); 845 k->k_metric = rtm->rtm_rmx.rmx_hopcount; 846 if (k->k_metric < 0) 847 k->k_metric = 0; 848 else if (k->k_metric > HOPCNT_INFINITY) 849 k->k_metric = HOPCNT_INFINITY; 850 k->k_state &= ~(KS_DELETED | KS_GATEWAY | KS_STATIC | KS_NEW); 851 if (rtm->rtm_flags & RTF_GATEWAY) 852 k->k_state |= KS_GATEWAY; 853 if (rtm->rtm_flags & RTF_STATIC) 854 k->k_state |= KS_STATIC; 855 if (rtm->rtm_flags & RTF_DYNAMIC) { 856 k->k_state |= KS_DYNAMIC; 857 k->k_redirect_time = now.tv_sec; 858 /* Routers are not supposed to listen to redirects, 859 * so delete it. 860 */ 861 if (supplier) { 862 k->k_keep = now.tv_sec; 863 trace_act("mark redirected %s --> %s for deletion" 864 "since this is a router\n", 865 addrname(k->k_dst, k->k_mask, 0), 866 naddr_ntoa(k->k_gate)); 867 } 868 } 869 870 /* If it is not a static route, quite until it is time to delete it. 871 */ 872 if (!(k->k_state & KS_STATIC)) { 873 k->k_state |= KS_DELETE; 874 LIM_SEC(need_kern, k->k_keep); 875 return; 876 } 877 878 /* Put static routes with real metrics into the daemon table so 879 * they can be advertised. 880 * 881 * Find the interface concerned 882 */ 883 ifp = iflookup(k->k_gate); 884 if (ifp == 0) { 885 /* if there is no interface, maybe it is new 886 */ 887 ifinit(); 888 ifp = iflookup(k->k_gate); 889 if (ifp == 0) 890 msglog("static route %s --> %s impossibly lacks ifp", 891 addrname(S_ADDR(INFO_DST(info)), mask, 0), 892 naddr_ntoa(k->k_gate)); 893 } 894 895 kern_check_static(k, ifp); 896 } 897 898 899 /* deal with packet loss 900 */ 901 static void 902 rtm_lose(struct rt_msghdr *rtm, 903 struct rt_addrinfo *info) 904 { 905 if (INFO_GATE(info) == 0 906 || INFO_GATE(info)->sa_family != AF_INET) { 907 msglog("punt %s without gateway", 908 rtm_type_name(rtm->rtm_type)); 909 return; 910 } 911 912 if (!supplier) 913 rdisc_age(S_ADDR(INFO_GATE(info))); 914 915 age(S_ADDR(INFO_GATE(info))); 916 } 917 918 919 /* Clean the kernel table by copying it to the daemon image. 920 * Eventually the daemon will delete any extra routes. 921 */ 922 void 923 flush_kern(void) 924 { 925 size_t needed; 926 int mib[6]; 927 char *buf, *next, *lim; 928 struct rt_msghdr *rtm; 929 struct interface *ifp; 930 static struct sockaddr_in gate_sa; 931 struct rt_addrinfo info; 932 933 934 mib[0] = CTL_NET; 935 mib[1] = PF_ROUTE; 936 mib[2] = 0; /* protocol */ 937 mib[3] = 0; /* wildcard address family */ 938 mib[4] = NET_RT_DUMP; 939 mib[5] = 0; /* no flags */ 940 if (sysctl(mib, 6, 0, &needed, 0, 0) < 0) { 941 DBGERR(1,"RT_DUMP-sysctl-estimate"); 942 return; 943 } 944 buf = malloc(needed); 945 if (sysctl(mib, 6, buf, &needed, 0, 0) < 0) 946 BADERR(1,"RT_DUMP"); 947 lim = buf + needed; 948 for (next = buf; next < lim; next += rtm->rtm_msglen) { 949 rtm = (struct rt_msghdr *)next; 950 951 rt_xaddrs(&info, 952 (struct sockaddr *)(rtm+1), 953 (struct sockaddr *)(next + rtm->rtm_msglen), 954 rtm->rtm_addrs); 955 956 if (INFO_DST(&info) == 0 957 || INFO_DST(&info)->sa_family != AF_INET) 958 continue; 959 960 /* ignore ARP table entries on systems with a merged route 961 * and ARP table. 962 */ 963 if (rtm->rtm_flags & RTF_LLINFO) 964 continue; 965 966 if (INFO_GATE(&info) == 0) 967 continue; 968 if (INFO_GATE(&info)->sa_family != AF_INET) { 969 if (INFO_GATE(&info)->sa_family != AF_LINK) 970 continue; 971 ifp = ifwithindex(((struct sockaddr_dl *) 972 INFO_GATE(&info))->sdl_index); 973 if (ifp == 0) 974 continue; 975 if ((ifp->int_if_flags & IFF_POINTOPOINT) 976 || S_ADDR(INFO_DST(&info)) == ifp->int_addr) 977 gate_sa.sin_addr.s_addr = ifp->int_addr; 978 else 979 gate_sa.sin_addr.s_addr = htonl(ifp->int_net); 980 #ifdef _HAVE_SA_LEN 981 gate_sa.sin_len = sizeof(gate_sa); 982 #endif 983 gate_sa.sin_family = AF_INET; 984 INFO_GATE(&info) = (struct sockaddr *)&gate_sa; 985 } 986 987 /* ignore multicast addresses 988 */ 989 if (IN_MULTICAST(ntohl(S_ADDR(INFO_DST(&info))))) 990 continue; 991 992 /* Note static routes and interface routes, and also 993 * preload the image of the kernel table so that 994 * we can later clean it, as well as avoid making 995 * unneeded changes. Keep the old kernel routes for a 996 * few seconds to allow a RIP or router-discovery 997 * response to be heard. 998 */ 999 rtm_add(rtm,&info,MIN_WAITTIME); 1000 } 1001 free(buf); 1002 } 1003 1004 1005 /* Listen to announcements from the kernel 1006 */ 1007 void 1008 read_rt(void) 1009 { 1010 long cc; 1011 struct interface *ifp; 1012 naddr mask; 1013 union { 1014 struct { 1015 struct rt_msghdr rtm; 1016 struct sockaddr addrs[RTAX_MAX]; 1017 } r; 1018 struct if_msghdr ifm; 1019 } m; 1020 char str[100], *strp; 1021 struct rt_addrinfo info; 1022 1023 1024 for (;;) { 1025 cc = read(rt_sock, &m, sizeof(m)); 1026 if (cc <= 0) { 1027 if (cc < 0 && errno != EWOULDBLOCK) 1028 LOGERR("read(rt_sock)"); 1029 return; 1030 } 1031 1032 if (m.r.rtm.rtm_version != RTM_VERSION) { 1033 msglog("bogus routing message version %d", 1034 m.r.rtm.rtm_version); 1035 continue; 1036 } 1037 1038 /* Ignore our own results. 1039 */ 1040 if (m.r.rtm.rtm_type <= RTM_CHANGE 1041 && m.r.rtm.rtm_pid == mypid) { 1042 static int complained = 0; 1043 if (!complained) { 1044 msglog("receiving our own change messages"); 1045 complained = 1; 1046 } 1047 continue; 1048 } 1049 1050 if (m.r.rtm.rtm_type == RTM_IFINFO 1051 || m.r.rtm.rtm_type == RTM_NEWADDR 1052 || m.r.rtm.rtm_type == RTM_DELADDR) { 1053 ifp = ifwithindex(m.ifm.ifm_index); 1054 if (ifp == 0) 1055 trace_act("note %s with flags %#x" 1056 " for index #%d\n", 1057 rtm_type_name(m.r.rtm.rtm_type), 1058 m.ifm.ifm_flags, 1059 m.ifm.ifm_index); 1060 else 1061 trace_act("note %s with flags %#x for %s\n", 1062 rtm_type_name(m.r.rtm.rtm_type), 1063 m.ifm.ifm_flags, 1064 ifp->int_name); 1065 1066 /* After being informed of a change to an interface, 1067 * check them all now if the check would otherwise 1068 * be a long time from now, if the interface is 1069 * not known, or if the interface has been turned 1070 * off or on. 1071 */ 1072 if (ifinit_timer.tv_sec-now.tv_sec>=CHECK_BAD_INTERVAL 1073 || ifp == 0 1074 || ((ifp->int_if_flags ^ m.ifm.ifm_flags) 1075 & IFF_UP_RUNNING) != 0) 1076 ifinit_timer.tv_sec = now.tv_sec; 1077 continue; 1078 } 1079 1080 strcpy(str, rtm_type_name(m.r.rtm.rtm_type)); 1081 strp = &str[strlen(str)]; 1082 if (m.r.rtm.rtm_type <= RTM_CHANGE) 1083 strp += sprintf(strp," from pid %d",m.r.rtm.rtm_pid); 1084 1085 rt_xaddrs(&info, m.r.addrs, &m.r.addrs[RTAX_MAX], 1086 m.r.rtm.rtm_addrs); 1087 1088 if (INFO_DST(&info) == 0) { 1089 trace_act("ignore %s without dst\n", str); 1090 continue; 1091 } 1092 1093 if (INFO_DST(&info)->sa_family != AF_INET) { 1094 trace_act("ignore %s for AF %d\n", str, 1095 INFO_DST(&info)->sa_family); 1096 continue; 1097 } 1098 1099 mask = ((INFO_MASK(&info) != 0) 1100 ? ntohl(S_ADDR(INFO_MASK(&info))) 1101 : (m.r.rtm.rtm_flags & RTF_HOST) 1102 ? HOST_MASK 1103 : std_mask(S_ADDR(INFO_DST(&info)))); 1104 1105 strp += sprintf(strp, ": %s", 1106 addrname(S_ADDR(INFO_DST(&info)), mask, 0)); 1107 1108 if (IN_MULTICAST(ntohl(S_ADDR(INFO_DST(&info))))) { 1109 trace_act("ignore %s for multicast %s\n", str); 1110 continue; 1111 } 1112 1113 if (INFO_GATE(&info) != 0 1114 && INFO_GATE(&info)->sa_family == AF_INET) 1115 strp += sprintf(strp, " --> %s", 1116 saddr_ntoa(INFO_GATE(&info))); 1117 1118 if (INFO_AUTHOR(&info) != 0) 1119 strp += sprintf(strp, " by authority of %s", 1120 saddr_ntoa(INFO_AUTHOR(&info))); 1121 1122 switch (m.r.rtm.rtm_type) { 1123 case RTM_ADD: 1124 case RTM_CHANGE: 1125 case RTM_REDIRECT: 1126 if (m.r.rtm.rtm_errno != 0) { 1127 trace_act("ignore %s with \"%s\" error\n", 1128 str, strerror(m.r.rtm.rtm_errno)); 1129 } else { 1130 trace_act("%s\n", str); 1131 rtm_add(&m.r.rtm,&info,0); 1132 } 1133 break; 1134 1135 case RTM_DELETE: 1136 if (m.r.rtm.rtm_errno != 0) { 1137 trace_act("ignore %s with \"%s\" error\n", 1138 str, strerror(m.r.rtm.rtm_errno)); 1139 } else { 1140 trace_act("%s\n", str); 1141 del_static(S_ADDR(INFO_DST(&info)), mask, 1); 1142 } 1143 break; 1144 1145 case RTM_LOSING: 1146 trace_act("%s\n", str); 1147 rtm_lose(&m.r.rtm,&info); 1148 break; 1149 1150 default: 1151 trace_act("ignore %s\n", str); 1152 break; 1153 } 1154 } 1155 } 1156 1157 1158 /* after aggregating, note routes that belong in the kernel 1159 */ 1160 static void 1161 kern_out(struct ag_info *ag) 1162 { 1163 struct khash *k; 1164 1165 1166 /* Do not install bad routes if they are not already present. 1167 * This includes routes that had RS_NET_SYN for interfaces that 1168 * recently died. 1169 */ 1170 if (ag->ag_metric == HOPCNT_INFINITY) { 1171 k = kern_find(htonl(ag->ag_dst_h), ag->ag_mask, 0); 1172 if (k == 0) 1173 return; 1174 } else { 1175 k = kern_add(htonl(ag->ag_dst_h), ag->ag_mask); 1176 } 1177 1178 if (k->k_state & KS_NEW) { 1179 /* will need to add new entry to the kernel table */ 1180 k->k_state = KS_ADD; 1181 if (ag->ag_state & AGS_GATEWAY) 1182 k->k_state |= KS_GATEWAY; 1183 k->k_gate = ag->ag_gate; 1184 k->k_metric = ag->ag_metric; 1185 return; 1186 } 1187 1188 if (k->k_state & KS_STATIC) 1189 return; 1190 1191 /* modify existing kernel entry if necessary */ 1192 if (k->k_gate != ag->ag_gate 1193 || k->k_metric != ag->ag_metric) { 1194 k->k_gate = ag->ag_gate; 1195 k->k_metric = ag->ag_metric; 1196 k->k_state |= KS_CHANGE; 1197 } 1198 1199 if (k->k_state & KS_DYNAMIC) { 1200 k->k_state &= ~KS_DYNAMIC; 1201 k->k_state |= (KS_ADD | KS_DEL_ADD); 1202 } 1203 1204 if ((k->k_state & KS_GATEWAY) 1205 && !(ag->ag_state & AGS_GATEWAY)) { 1206 k->k_state &= ~KS_GATEWAY; 1207 k->k_state |= (KS_ADD | KS_DEL_ADD); 1208 } else if (!(k->k_state & KS_GATEWAY) 1209 && (ag->ag_state & AGS_GATEWAY)) { 1210 k->k_state |= KS_GATEWAY; 1211 k->k_state |= (KS_ADD | KS_DEL_ADD); 1212 } 1213 1214 /* Just delete instead of deleting and then adding a bad route. 1215 * Deleting-and-adding is necessary to change aspects of a route. 1216 * Otherwise, we want to keep the route in the kernel. 1217 */ 1218 if (k->k_metric == HOPCNT_INFINITY 1219 && (k->k_state & KS_DEL_ADD)) 1220 k->k_state |= KS_DELETE; 1221 else 1222 k->k_state &= ~KS_DELETE; 1223 #undef RT 1224 } 1225 1226 1227 /* ARGSUSED */ 1228 static int 1229 walk_kern(struct radix_node *rn, void *argp) 1230 { 1231 #define RT ((struct rt_entry *)rn) 1232 #if 0 1233 struct walkarg *w = argp; 1234 #endif 1235 char metric, pref; 1236 u_int ags = 0; 1237 1238 1239 /* Do not install synthetic routes */ 1240 if (RT->rt_state & RS_NET_SYN) 1241 return 0; 1242 1243 if (!(RT->rt_state & RS_IF)) { 1244 ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_PROMOTE); 1245 1246 } else { 1247 /* Do not install routes for "external" remote interfaces. 1248 */ 1249 if (RT->rt_ifp != 0 && (RT->rt_ifp->int_state & IS_EXTERNAL)) 1250 return 0; 1251 1252 ags |= AGS_IF; 1253 1254 /* If it is not an interface, or an alias for an interface, 1255 * it must be a "gateway." 1256 * 1257 * If it is a "remote" interface, it is also a "gateway" to 1258 * the kernel if is not a alias. 1259 */ 1260 if (RT->rt_ifp == 0 1261 || ((RT->rt_ifp->int_state & IS_REMOTE) 1262 && RT->rt_ifp->int_metric == 0)) 1263 ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_PROMOTE); 1264 } 1265 1266 if (RT->rt_state & RS_RDISC) 1267 ags |= AGS_CORS_GATE; 1268 1269 /* aggregate good routes without regard to their metric */ 1270 pref = 1; 1271 metric = RT->rt_metric; 1272 if (metric == HOPCNT_INFINITY) { 1273 /* if the route is dead, so try hard to aggregate. */ 1274 pref = HOPCNT_INFINITY; 1275 ags |= (AGS_FINE_GATE | AGS_SUPPRESS); 1276 } 1277 1278 ag_check(RT->rt_dst, RT->rt_mask, RT->rt_gate, 0, 1279 metric,pref, 0, 0, ags, kern_out); 1280 return 0; 1281 #undef RT 1282 } 1283 1284 1285 /* Update the kernel table to match the daemon table. 1286 */ 1287 static void 1288 fix_kern(void) 1289 { 1290 int i, flags; 1291 struct khash *k, **pk; 1292 1293 1294 need_kern = age_timer; 1295 1296 /* Walk daemon table, updating the copy of the kernel table. 1297 */ 1298 (void)rn_walktree(rhead, walk_kern, 0); 1299 ag_flush(0,0,kern_out); 1300 1301 for (i = 0; i < KHASH_SIZE; i++) { 1302 for (pk = &khash_bins[i]; (k = *pk) != 0; ) { 1303 /* Do not touch static routes */ 1304 if (k->k_state & KS_STATIC) { 1305 kern_check_static(k,0); 1306 pk = &k->k_next; 1307 continue; 1308 } 1309 1310 /* check hold on routes deleted by the operator */ 1311 if (k->k_keep > now.tv_sec) { 1312 LIM_SEC(need_kern, k->k_keep); 1313 k->k_state |= KS_DELETE; 1314 pk = &k->k_next; 1315 continue; 1316 } 1317 1318 if (k->k_state & KS_DELETE) { 1319 if (!(k->k_state & KS_DELETED)) 1320 rtioctl(RTM_DELETE, 1321 k->k_dst,k->k_gate, 1322 k->k_mask, 0, 0); 1323 *pk = k->k_next; 1324 free(k); 1325 continue; 1326 } 1327 1328 if (k->k_state & KS_DEL_ADD) 1329 rtioctl(RTM_DELETE, 1330 k->k_dst,k->k_gate,k->k_mask, 0, 0); 1331 1332 flags = (k->k_state & KS_GATEWAY) ? RTF_GATEWAY : 0; 1333 if (k->k_state & KS_ADD) { 1334 rtioctl(RTM_ADD, 1335 k->k_dst, k->k_gate, k->k_mask, 1336 k->k_metric, flags); 1337 } else if (k->k_state & KS_CHANGE) { 1338 rtioctl(RTM_CHANGE, 1339 k->k_dst,k->k_gate,k->k_mask, 1340 k->k_metric, flags); 1341 } 1342 k->k_state &= ~(KS_ADD | KS_CHANGE | KS_DEL_ADD); 1343 1344 /* Mark this route to be deleted in the next cycle. 1345 * This deletes routes that disappear from the 1346 * daemon table, since the normal aging code 1347 * will clear the bit for routes that have not 1348 * disappeared from the daemon table. 1349 */ 1350 k->k_state |= KS_DELETE; 1351 pk = &k->k_next; 1352 } 1353 } 1354 } 1355 1356 1357 /* Delete a static route in the image of the kernel table. 1358 */ 1359 void 1360 del_static(naddr dst, 1361 naddr mask, 1362 int gone) 1363 { 1364 struct khash *k; 1365 struct rt_entry *rt; 1366 1367 /* Just mark it in the table to be deleted next time the kernel 1368 * table is updated. 1369 * If it has already been deleted, mark it as such, and set its 1370 * keep-timer so that it will not be deleted again for a while. 1371 * This lets the operator delete a route added by the daemon 1372 * and add a replacement. 1373 */ 1374 k = kern_find(dst, mask, 0); 1375 if (k != 0) { 1376 k->k_state &= ~KS_STATIC; 1377 k->k_state |= KS_DELETE; 1378 if (gone) { 1379 k->k_state |= KS_DELETED; 1380 k->k_keep = now.tv_sec + K_KEEP_LIM; 1381 } 1382 } 1383 1384 rt = rtget(dst, mask); 1385 if (rt != 0 && (rt->rt_state & RS_STATIC)) 1386 rtbad(rt); 1387 } 1388 1389 1390 /* Delete all routes generated from ICMP Redirects that use a given 1391 * gateway, as well as all old redirected routes. 1392 */ 1393 void 1394 del_redirects(naddr bad_gate, 1395 time_t old) 1396 { 1397 int i; 1398 struct khash *k; 1399 1400 1401 for (i = 0; i < KHASH_SIZE; i++) { 1402 for (k = khash_bins[i]; k != 0; k = k->k_next) { 1403 if (!(k->k_state & KS_DYNAMIC) 1404 || (k->k_state & KS_STATIC)) 1405 continue; 1406 1407 if (k->k_gate != bad_gate 1408 && k->k_redirect_time > old 1409 && !supplier) 1410 continue; 1411 1412 k->k_state |= KS_DELETE; 1413 need_kern.tv_sec = now.tv_sec; 1414 trace_act("mark redirected %s --> %s for deletion\n", 1415 addrname(k->k_dst, k->k_mask, 0), 1416 naddr_ntoa(k->k_gate)); 1417 } 1418 } 1419 } 1420 1421 1422 /* Start the daemon tables. 1423 */ 1424 void 1425 rtinit(void) 1426 { 1427 extern int max_keylen; 1428 int i; 1429 struct ag_info *ag; 1430 1431 /* Initialize the radix trees */ 1432 max_keylen = sizeof(struct sockaddr_in); 1433 rn_init(); 1434 rn_inithead((void**)&rhead, 32); 1435 1436 /* mark all of the slots in the table free */ 1437 ag_avail = ag_slots; 1438 for (ag = ag_slots, i = 1; i < NUM_AG_SLOTS; i++) { 1439 ag->ag_fine = ag+1; 1440 ag++; 1441 } 1442 } 1443 1444 1445 #ifdef _HAVE_SIN_LEN 1446 static struct sockaddr_in dst_sock = {sizeof(dst_sock), AF_INET}; 1447 static struct sockaddr_in mask_sock = {sizeof(mask_sock), AF_INET}; 1448 #else 1449 static struct sockaddr_in_new dst_sock = {_SIN_ADDR_SIZE, AF_INET}; 1450 static struct sockaddr_in_new mask_sock = {_SIN_ADDR_SIZE, AF_INET}; 1451 #endif 1452 1453 1454 void 1455 set_need_flash(void) 1456 { 1457 if (!need_flash) { 1458 need_flash = 1; 1459 /* Do not send the flash update immediately. Wait a little 1460 * while to hear from other routers. 1461 */ 1462 no_flash.tv_sec = now.tv_sec + MIN_WAITTIME; 1463 } 1464 } 1465 1466 1467 /* Get a particular routing table entry 1468 */ 1469 struct rt_entry * 1470 rtget(naddr dst, naddr mask) 1471 { 1472 struct rt_entry *rt; 1473 1474 dst_sock.sin_addr.s_addr = dst; 1475 mask_sock.sin_addr.s_addr = mask; 1476 masktrim(&mask_sock); 1477 rt = (struct rt_entry *)rhead->rnh_lookup(&dst_sock,&mask_sock,rhead); 1478 if (!rt 1479 || rt->rt_dst != dst 1480 || rt->rt_mask != mask) 1481 return 0; 1482 1483 return rt; 1484 } 1485 1486 1487 /* Find a route to dst as the kernel would. 1488 */ 1489 struct rt_entry * 1490 rtfind(naddr dst) 1491 { 1492 dst_sock.sin_addr.s_addr = dst; 1493 return (struct rt_entry *)rhead->rnh_matchaddr(&dst_sock, rhead); 1494 } 1495 1496 1497 /* add a route to the table 1498 */ 1499 void 1500 rtadd(naddr dst, 1501 naddr mask, 1502 naddr gate, /* forward packets here */ 1503 naddr router, /* on the authority of this router */ 1504 int metric, 1505 u_short tag, 1506 u_int state, /* rs_state for the entry */ 1507 struct interface *ifp) 1508 { 1509 struct rt_entry *rt; 1510 naddr smask; 1511 int i; 1512 struct rt_spare *rts; 1513 1514 rt = (struct rt_entry *)malloc(sizeof (*rt)); 1515 if (rt == 0) { 1516 BADERR(1,"rtadd malloc"); 1517 return; 1518 } 1519 bzero(rt, sizeof(*rt)); 1520 for (rts = rt->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) 1521 rts->rts_metric = HOPCNT_INFINITY; 1522 1523 rt->rt_nodes->rn_key = (caddr_t)&rt->rt_dst_sock; 1524 rt->rt_dst = dst; 1525 rt->rt_dst_sock.sin_family = AF_INET; 1526 #ifdef _HAVE_SIN_LEN 1527 rt->rt_dst_sock.sin_len = dst_sock.sin_len; 1528 #endif 1529 if (mask != HOST_MASK) { 1530 smask = std_mask(dst); 1531 if ((smask & ~mask) == 0 && mask > smask) 1532 state |= RS_SUBNET; 1533 } 1534 mask_sock.sin_addr.s_addr = mask; 1535 masktrim(&mask_sock); 1536 rt->rt_mask = mask; 1537 rt->rt_state = state; 1538 rt->rt_gate = gate; 1539 rt->rt_router = router; 1540 rt->rt_time = now.tv_sec; 1541 rt->rt_metric = metric; 1542 rt->rt_poison_metric = HOPCNT_INFINITY; 1543 rt->rt_tag = tag; 1544 rt->rt_ifp = ifp; 1545 rt->rt_seqno = update_seqno; 1546 1547 if (TRACEACTIONS) 1548 trace_add_del("Add", rt); 1549 1550 need_kern.tv_sec = now.tv_sec; 1551 set_need_flash(); 1552 1553 if (0 == rhead->rnh_addaddr(&rt->rt_dst_sock, &mask_sock, 1554 rhead, rt->rt_nodes)) { 1555 msglog("rnh_addaddr() failed for %s mask=%#x", 1556 naddr_ntoa(dst), mask); 1557 } 1558 } 1559 1560 1561 /* notice a changed route 1562 */ 1563 void 1564 rtchange(struct rt_entry *rt, 1565 u_int state, /* new state bits */ 1566 naddr gate, /* now forward packets here */ 1567 naddr router, /* on the authority of this router */ 1568 int metric, /* new metric */ 1569 u_short tag, 1570 struct interface *ifp, 1571 time_t new_time, 1572 char *label) 1573 { 1574 if (rt->rt_metric != metric) { 1575 /* Fix the kernel immediately if it seems the route 1576 * has gone bad, since there may be a working route that 1577 * aggregates this route. 1578 */ 1579 if (metric == HOPCNT_INFINITY) 1580 need_kern.tv_sec = now.tv_sec; 1581 rt->rt_seqno = update_seqno; 1582 set_need_flash(); 1583 } 1584 1585 if (rt->rt_gate != gate) { 1586 need_kern.tv_sec = now.tv_sec; 1587 rt->rt_seqno = update_seqno; 1588 set_need_flash(); 1589 } 1590 1591 state |= (rt->rt_state & RS_SUBNET); 1592 1593 if (TRACEACTIONS) 1594 trace_change(rt, state, gate, router, metric, tag, ifp, 1595 new_time, 1596 label ? label : "Chg "); 1597 1598 rt->rt_state = state; 1599 rt->rt_gate = gate; 1600 rt->rt_router = router; 1601 rt->rt_metric = metric; 1602 rt->rt_tag = tag; 1603 rt->rt_ifp = ifp; 1604 rt->rt_time = new_time; 1605 } 1606 1607 1608 /* check for a better route among the spares 1609 */ 1610 static struct rt_spare * 1611 rts_better(struct rt_entry *rt) 1612 { 1613 struct rt_spare *rts, *rts1; 1614 int i; 1615 1616 /* find the best alternative among the spares */ 1617 rts = rt->rt_spares+1; 1618 for (i = NUM_SPARES, rts1 = rts+1; i > 2; i--, rts1++) { 1619 if (BETTER_LINK(rt,rts1,rts)) 1620 rts = rts1; 1621 } 1622 1623 return rts; 1624 } 1625 1626 1627 /* switch to a backup route 1628 */ 1629 void 1630 rtswitch(struct rt_entry *rt, 1631 struct rt_spare *rts) 1632 { 1633 struct rt_spare swap; 1634 char label[10]; 1635 1636 1637 /* Do not change permanent routes */ 1638 if (0 != (rt->rt_state & RS_PERMANENT)) 1639 return; 1640 1641 /* Do not discard synthetic routes until they go bad */ 1642 if ((rt->rt_state & RS_NET_SYN) 1643 && rt->rt_metric < HOPCNT_INFINITY) 1644 return; 1645 1646 /* find the best alternative among the spares */ 1647 if (rts == 0) 1648 rts = rts_better(rt); 1649 1650 /* Do not bother if it is not worthwhile. 1651 */ 1652 if (!BETTER_LINK(rt, rts, rt->rt_spares)) 1653 return; 1654 1655 swap = rt->rt_spares[0]; 1656 (void)sprintf(label, "Use #%d", rts - rt->rt_spares); 1657 rtchange(rt, rt->rt_state & ~(RS_NET_SYN | RS_RDISC), 1658 rts->rts_gate, rts->rts_router, rts->rts_metric, 1659 rts->rts_tag, rts->rts_ifp, rts->rts_time, label); 1660 *rts = swap; 1661 } 1662 1663 1664 void 1665 rtdelete(struct rt_entry *rt) 1666 { 1667 struct khash *k; 1668 1669 1670 if (TRACEACTIONS) 1671 trace_add_del("Del", rt); 1672 1673 k = kern_find(rt->rt_dst, rt->rt_mask, 0); 1674 if (k != 0) { 1675 k->k_state |= KS_DELETE; 1676 need_kern.tv_sec = now.tv_sec; 1677 } 1678 1679 dst_sock.sin_addr.s_addr = rt->rt_dst; 1680 mask_sock.sin_addr.s_addr = rt->rt_mask; 1681 masktrim(&mask_sock); 1682 if (rt != (struct rt_entry *)rhead->rnh_deladdr(&dst_sock, &mask_sock, 1683 rhead)) { 1684 msglog("rnh_deladdr() failed"); 1685 } else { 1686 free(rt); 1687 } 1688 } 1689 1690 1691 /* Get rid of a bad route, and try to switch to a replacement. 1692 */ 1693 void 1694 rtbad(struct rt_entry *rt) 1695 { 1696 /* Poison the route */ 1697 rtchange(rt, rt->rt_state & ~(RS_IF | RS_LOCAL | RS_STATIC), 1698 rt->rt_gate, rt->rt_router, HOPCNT_INFINITY, rt->rt_tag, 1699 0, rt->rt_time, 0); 1700 1701 rtswitch(rt, 0); 1702 } 1703 1704 1705 /* Junk a RS_NET_SYN or RS_LOCAL route, 1706 * unless it is needed by another interface. 1707 */ 1708 void 1709 rtbad_sub(struct rt_entry *rt) 1710 { 1711 struct interface *ifp, *ifp1; 1712 struct intnet *intnetp; 1713 u_int state; 1714 1715 1716 ifp1 = 0; 1717 state = 0; 1718 1719 if (rt->rt_state & RS_LOCAL) { 1720 /* Is this the route through loopback for the interface? 1721 * If so, see if it is used by any other interfaces, such 1722 * as a point-to-point interface with the same local address. 1723 */ 1724 for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) { 1725 /* Retain it if another interface needs it. 1726 */ 1727 if (ifp->int_addr == rt->rt_ifp->int_addr) { 1728 state |= RS_LOCAL; 1729 ifp1 = ifp; 1730 break; 1731 } 1732 } 1733 1734 } 1735 1736 if (!(state & RS_LOCAL)) { 1737 /* Retain RIPv1 logical network route if there is another 1738 * interface that justifies it. 1739 */ 1740 if (rt->rt_state & RS_NET_SYN) { 1741 for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) { 1742 if ((ifp->int_state & IS_NEED_NET_SYN) 1743 && rt->rt_mask == ifp->int_std_mask 1744 && rt->rt_dst == ifp->int_std_addr) { 1745 state |= RS_NET_SYN; 1746 ifp1 = ifp; 1747 break; 1748 } 1749 } 1750 } 1751 1752 /* or if there is an authority route that needs it. */ 1753 for (intnetp = intnets; 1754 intnetp != 0; 1755 intnetp = intnetp->intnet_next) { 1756 if (intnetp->intnet_addr == rt->rt_dst 1757 && intnetp->intnet_mask == rt->rt_mask) { 1758 state |= (RS_NET_SYN | RS_NET_INT); 1759 break; 1760 } 1761 } 1762 } 1763 1764 if (ifp1 != 0 || (state & RS_NET_SYN)) { 1765 rtchange(rt, ((rt->rt_state & ~(RS_NET_SYN | RS_LOCAL)) 1766 | state), 1767 rt->rt_gate, rt->rt_router, rt->rt_metric, 1768 rt->rt_tag, ifp1, rt->rt_time, 0); 1769 } else { 1770 rtbad(rt); 1771 } 1772 } 1773 1774 1775 /* Called while walking the table looking for sick interfaces 1776 * or after a time change. 1777 */ 1778 /* ARGSUSED */ 1779 int 1780 walk_bad(struct radix_node *rn, void *argp) 1781 { 1782 #define RT ((struct rt_entry *)rn) 1783 #if 0 1784 struct walkarg *w = argp; /* not used */ 1785 #endif 1786 struct rt_spare *rts; 1787 int i; 1788 time_t new_time; 1789 1790 1791 /* fix any spare routes through the interface 1792 */ 1793 rts = RT->rt_spares; 1794 for (i = NUM_SPARES; i != 1; i--) { 1795 rts++; 1796 1797 if (rts->rts_ifp != 0 1798 && (rts->rts_ifp->int_state & IS_BROKE)) { 1799 new_time = rts->rts_time; 1800 if (new_time >= now_garbage) 1801 new_time = now_garbage-1; 1802 trace_upslot(RT, rts, rts->rts_gate, 1803 rts->rts_router, 0, 1804 HOPCNT_INFINITY, rts->rts_tag, 1805 new_time); 1806 rts->rts_ifp = 0; 1807 rts->rts_metric = HOPCNT_INFINITY; 1808 rts->rts_time = new_time; 1809 } 1810 } 1811 1812 /* Deal with the main route 1813 */ 1814 /* finished if it has been handled before or if its interface is ok 1815 */ 1816 if (RT->rt_ifp == 0 || !(RT->rt_ifp->int_state & IS_BROKE)) 1817 return 0; 1818 1819 /* Bad routes for other than interfaces are easy. 1820 */ 1821 if (0 == (RT->rt_state & (RS_IF | RS_NET_SYN | RS_LOCAL))) { 1822 rtbad(RT); 1823 return 0; 1824 } 1825 1826 rtbad_sub(RT); 1827 return 0; 1828 #undef RT 1829 } 1830 1831 1832 /* Check the age of an individual route. 1833 */ 1834 /* ARGSUSED */ 1835 static int 1836 walk_age(struct radix_node *rn, void *argp) 1837 { 1838 #define RT ((struct rt_entry *)rn) 1839 #if 0 1840 struct walkarg *w = argp; /* not used */ 1841 #endif 1842 struct interface *ifp; 1843 struct rt_spare *rts; 1844 int i; 1845 1846 1847 /* age all of the spare routes, including the primary route 1848 * currently in use 1849 */ 1850 rts = RT->rt_spares; 1851 for (i = NUM_SPARES; i != 0; i--, rts++) { 1852 1853 ifp = rts->rts_ifp; 1854 if (i == NUM_SPARES) { 1855 if (!AGE_RT(RT, ifp)) { 1856 /* Keep various things from deciding ageless 1857 * routes are stale */ 1858 rts->rts_time = now.tv_sec; 1859 continue; 1860 } 1861 1862 /* forget RIP routes after RIP has been turned off. 1863 */ 1864 if (rip_sock < 0) { 1865 rtdelete(RT); 1866 return 0; 1867 } 1868 } 1869 1870 /* age failing routes 1871 */ 1872 if (age_bad_gate == rts->rts_gate 1873 && rts->rts_time >= now_stale) { 1874 rts->rts_time -= SUPPLY_INTERVAL; 1875 } 1876 1877 /* trash the spare routes when they go bad */ 1878 if (rts->rts_metric < HOPCNT_INFINITY 1879 && now_garbage > rts->rts_time) { 1880 trace_upslot(RT, rts, rts->rts_gate, 1881 rts->rts_router, rts->rts_ifp, 1882 HOPCNT_INFINITY, rts->rts_tag, 1883 rts->rts_time); 1884 rts->rts_metric = HOPCNT_INFINITY; 1885 } 1886 } 1887 1888 1889 /* finished if the active route is still fresh */ 1890 if (now_stale <= RT->rt_time) 1891 return 0; 1892 1893 /* try to switch to an alternative */ 1894 rtswitch(RT, 0); 1895 1896 /* Delete a dead route after it has been publically mourned. */ 1897 if (now_garbage > RT->rt_time) { 1898 rtdelete(RT); 1899 return 0; 1900 } 1901 1902 /* Start poisoning a bad route before deleting it. */ 1903 if (now.tv_sec - RT->rt_time > EXPIRE_TIME) 1904 rtchange(RT, RT->rt_state, RT->rt_gate, RT->rt_router, 1905 HOPCNT_INFINITY, RT->rt_tag, RT->rt_ifp, 1906 RT->rt_time, 0); 1907 return 0; 1908 } 1909 1910 1911 /* Watch for dead routes and interfaces. 1912 */ 1913 void 1914 age(naddr bad_gate) 1915 { 1916 struct interface *ifp; 1917 1918 1919 age_timer.tv_sec = now.tv_sec + (rip_sock < 0 1920 ? NEVER 1921 : SUPPLY_INTERVAL); 1922 1923 for (ifp = ifnet; ifp; ifp = ifp->int_next) { 1924 /* Check for dead IS_REMOTE interfaces by timing their 1925 * transmissions. 1926 */ 1927 if ((ifp->int_state & IS_REMOTE) 1928 && !(ifp->int_state & IS_PASSIVE) 1929 && (ifp->int_state & IS_ACTIVE)) { 1930 LIM_SEC(age_timer, now.tv_sec+SUPPLY_INTERVAL); 1931 1932 if (now.tv_sec - ifp->int_act_time > EXPIRE_TIME 1933 && !(ifp->int_state & IS_BROKE)) { 1934 msglog("remote interface %s to %s timed out" 1935 "--turned off", 1936 ifp->int_name, 1937 naddr_ntoa(ifp->int_addr)); 1938 if_bad(ifp); 1939 } 1940 } 1941 } 1942 1943 /* Age routes. */ 1944 age_bad_gate = bad_gate; 1945 (void)rn_walktree(rhead, walk_age, 0); 1946 1947 /* Update the kernel routing table. */ 1948 fix_kern(); 1949 } 1950