1 /* $OpenBSD: kroute.c,v 1.67 2009/01/16 20:50:13 gollo Exp $ */ 2 3 /* 4 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 5 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/param.h> 21 #include <sys/types.h> 22 #include <sys/socket.h> 23 #include <sys/sysctl.h> 24 #include <sys/tree.h> 25 #include <sys/uio.h> 26 #include <netinet/in.h> 27 #include <arpa/inet.h> 28 #include <net/if.h> 29 #include <net/if_dl.h> 30 #include <net/if_types.h> 31 #include <net/route.h> 32 #include <err.h> 33 #include <errno.h> 34 #include <fcntl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <unistd.h> 39 40 #include "ospfd.h" 41 #include "log.h" 42 43 struct { 44 u_int32_t rtseq; 45 pid_t pid; 46 int fib_sync; 47 int fd; 48 struct event ev; 49 } kr_state; 50 51 struct kroute_node { 52 RB_ENTRY(kroute_node) entry; 53 struct kroute r; 54 struct kroute_node *next; 55 }; 56 57 struct kif_node { 58 RB_ENTRY(kif_node) entry; 59 TAILQ_HEAD(, kif_addr) addrs; 60 struct kif k; 61 }; 62 63 void kr_redist_remove(struct kroute_node *, struct kroute_node *); 64 int kr_redist_eval(struct kroute *, struct rroute *); 65 void kr_redistribute(struct kroute_node *); 66 int kroute_compare(struct kroute_node *, struct kroute_node *); 67 int kif_compare(struct kif_node *, struct kif_node *); 68 int kr_change_fib(struct kroute_node *, struct kroute *, int, int); 69 int kr_delete_fib(struct kroute_node *); 70 71 struct kroute_node *kroute_find(in_addr_t, u_int8_t, u_int8_t); 72 struct kroute_node *kroute_matchgw(struct kroute_node *, struct in_addr); 73 int kroute_insert(struct kroute_node *); 74 int kroute_remove(struct kroute_node *); 75 void kroute_clear(void); 76 77 struct kif_node *kif_find(u_short); 78 struct kif_node *kif_insert(u_short); 79 int kif_remove(struct kif_node *); 80 void kif_clear(void); 81 struct kif *kif_update(u_short, int, struct if_data *, 82 struct sockaddr_dl *); 83 int kif_validate(u_short); 84 85 struct kroute_node *kroute_match(in_addr_t); 86 87 int protect_lo(void); 88 u_int8_t prefixlen_classful(in_addr_t); 89 void get_rtaddrs(int, struct sockaddr *, struct sockaddr **); 90 void if_change(u_short, int, struct if_data *, struct sockaddr_dl *); 91 void if_newaddr(u_short, struct sockaddr_in *, struct sockaddr_in *, 92 struct sockaddr_in *); 93 void if_announce(void *); 94 95 int send_rtmsg(int, int, struct kroute *); 96 int dispatch_rtmsg(void); 97 int fetchtable(void); 98 int fetchifs(u_short); 99 100 RB_HEAD(kroute_tree, kroute_node) krt; 101 RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare) 102 RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare) 103 104 RB_HEAD(kif_tree, kif_node) kit; 105 RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare) 106 RB_GENERATE(kif_tree, kif_node, entry, kif_compare) 107 108 int 109 kif_init(void) 110 { 111 RB_INIT(&kit); 112 /* init also krt tree so that we can call kr_shutdown() */ 113 RB_INIT(&krt); 114 kr_state.fib_sync = 0; /* decoupled */ 115 116 if (fetchifs(0) == -1) 117 return (-1); 118 119 return (0); 120 } 121 122 int 123 kr_init(int fs) 124 { 125 int opt = 0, rcvbuf, default_rcvbuf; 126 socklen_t optlen; 127 128 kr_state.fib_sync = fs; 129 130 if ((kr_state.fd = socket(AF_ROUTE, SOCK_RAW, 0)) == -1) { 131 log_warn("kr_init: socket"); 132 return (-1); 133 } 134 135 /* not interested in my own messages */ 136 if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK, 137 &opt, sizeof(opt)) == -1) 138 log_warn("kr_init: setsockopt"); /* not fatal */ 139 140 /* grow receive buffer, don't wanna miss messages */ 141 optlen = sizeof(default_rcvbuf); 142 if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, 143 &default_rcvbuf, &optlen) == -1) 144 log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF"); 145 else 146 for (rcvbuf = MAX_RTSOCK_BUF; 147 rcvbuf > default_rcvbuf && 148 setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, 149 &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS; 150 rcvbuf /= 2) 151 ; /* nothing */ 152 153 kr_state.pid = getpid(); 154 kr_state.rtseq = 1; 155 156 if (fetchtable() == -1) 157 return (-1); 158 159 if (protect_lo() == -1) 160 return (-1); 161 162 event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST, 163 kr_dispatch_msg, NULL); 164 event_add(&kr_state.ev, NULL); 165 166 return (0); 167 } 168 169 int 170 kr_change_fib(struct kroute_node *kr, struct kroute *kroute, int krcount, 171 int action) 172 { 173 int i; 174 struct kroute_node *kn, *nkn; 175 176 if (action == RTM_ADD) { 177 /* 178 * First remove all stale multipath routes. 179 * This step must be skipped when the action is RTM_CHANGE 180 * because it is already a single path route that will be 181 * changed. 182 */ 183 for (kn = kr; kn != NULL; kn = nkn) { 184 for (i = 0; i < krcount; i++) { 185 if (kn->r.nexthop.s_addr == 186 kroute[i].nexthop.s_addr) 187 break; 188 } 189 nkn = kn->next; 190 if (i == krcount) { 191 /* stale route */ 192 if (kr_delete_fib(kn) == -1) 193 log_warnx("kr_delete_fib failed"); 194 /* 195 * if head element was removed we need to adjust 196 * the head 197 */ 198 if (kr == kn) 199 kr = nkn; 200 } 201 } 202 } 203 204 /* 205 * now add or change the route 206 */ 207 for (i = 0; i < krcount; i++) { 208 /* nexthop within 127/8 -> ignore silently */ 209 if ((kroute[i].nexthop.s_addr & htonl(IN_CLASSA_NET)) == 210 htonl(INADDR_LOOPBACK & IN_CLASSA_NET)) 211 continue; 212 213 if (action == RTM_ADD && kr) { 214 for (kn = kr; kn != NULL; kn = kn->next) { 215 if (kn->r.nexthop.s_addr == 216 kroute[i].nexthop.s_addr) 217 break; 218 } 219 220 if (kn != NULL) 221 /* nexthop already present, skip it */ 222 continue; 223 } else 224 /* modify first entry */ 225 kn = kr; 226 227 /* send update */ 228 if (send_rtmsg(kr_state.fd, action, &kroute[i]) == -1) 229 return (-1); 230 231 /* create new entry unless we are changing the first entry */ 232 if (action == RTM_ADD) 233 if ((kn = calloc(1, sizeof(*kn))) == NULL) 234 fatal(NULL); 235 236 kn->r.prefix.s_addr = kroute[i].prefix.s_addr; 237 kn->r.prefixlen = kroute[i].prefixlen; 238 kn->r.nexthop.s_addr = kroute[i].nexthop.s_addr; 239 kn->r.flags = kroute[i].flags | F_OSPFD_INSERTED; 240 kn->r.priority = RTP_OSPF; 241 kn->r.ext_tag = kroute[i].ext_tag; 242 rtlabel_unref(kn->r.rtlabel); /* for RTM_CHANGE */ 243 kn->r.rtlabel = kroute[i].rtlabel; 244 245 if (action == RTM_ADD) 246 if (kroute_insert(kn) == -1) { 247 log_debug("kr_update_fib: cannot insert %s", 248 inet_ntoa(kn->r.nexthop)); 249 free(kn); 250 } 251 action = RTM_ADD; 252 } 253 return (0); 254 } 255 256 int 257 kr_change(struct kroute *kroute, int krcount) 258 { 259 struct kroute_node *kr; 260 int action = RTM_ADD; 261 262 kroute->rtlabel = rtlabel_tag2id(kroute->ext_tag); 263 264 kr = kroute_find(kroute->prefix.s_addr, kroute->prefixlen, RTP_OSPF); 265 if (kr != NULL && kr->next == NULL && krcount == 1) 266 /* single path OSPF route */ 267 action = RTM_CHANGE; 268 269 return (kr_change_fib(kr, kroute, krcount, action)); 270 } 271 272 int 273 kr_delete_fib(struct kroute_node *kr) 274 { 275 if (kr->r.priority != RTP_OSPF) 276 log_warn("kr_delete_fib: %s/%d has wrong priority %d", 277 inet_ntoa(kr->r.prefix), kr->r.prefixlen, kr->r.priority); 278 279 if (send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r) == -1) 280 return (-1); 281 282 if (kroute_remove(kr) == -1) 283 return (-1); 284 285 return (0); 286 } 287 288 int 289 kr_delete(struct kroute *kroute) 290 { 291 struct kroute_node *kr, *nkr; 292 293 if ((kr = kroute_find(kroute->prefix.s_addr, kroute->prefixlen, 294 RTP_OSPF)) == NULL) 295 return (0); 296 297 while (kr != NULL) { 298 nkr = kr->next; 299 if (kr_delete_fib(kr) == -1) 300 return (-1); 301 kr = nkr; 302 } 303 return (0); 304 } 305 306 void 307 kr_shutdown(void) 308 { 309 kr_fib_decouple(); 310 kroute_clear(); 311 kif_clear(); 312 } 313 314 void 315 kr_fib_couple(void) 316 { 317 struct kroute_node *kr; 318 struct kroute_node *kn; 319 320 if (kr_state.fib_sync == 1) /* already coupled */ 321 return; 322 323 kr_state.fib_sync = 1; 324 325 RB_FOREACH(kr, kroute_tree, &krt) 326 if (kr->r.priority == RTP_OSPF) 327 for (kn = kr; kn != NULL; kn = kn->next) 328 send_rtmsg(kr_state.fd, RTM_ADD, &kn->r); 329 330 log_info("kernel routing table coupled"); 331 } 332 333 void 334 kr_fib_decouple(void) 335 { 336 struct kroute_node *kr; 337 struct kroute_node *kn; 338 339 if (kr_state.fib_sync == 0) /* already decoupled */ 340 return; 341 342 RB_FOREACH(kr, kroute_tree, &krt) 343 if (kr->r.priority == RTP_OSPF) 344 for (kn = kr; kn != NULL; kn = kn->next) 345 send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r); 346 347 kr_state.fib_sync = 0; 348 349 log_info("kernel routing table decoupled"); 350 } 351 352 /* ARGSUSED */ 353 void 354 kr_dispatch_msg(int fd, short event, void *bula) 355 { 356 /* XXX this is stupid */ 357 dispatch_rtmsg(); 358 } 359 360 void 361 kr_show_route(struct imsg *imsg) 362 { 363 struct kroute_node *kr; 364 struct kroute_node *kn; 365 int flags; 366 struct in_addr addr; 367 368 switch (imsg->hdr.type) { 369 case IMSG_CTL_KROUTE: 370 if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) { 371 log_warnx("kr_show_route: wrong imsg len"); 372 return; 373 } 374 memcpy(&flags, imsg->data, sizeof(flags)); 375 RB_FOREACH(kr, kroute_tree, &krt) 376 if (!flags || kr->r.flags & flags) { 377 kn = kr; 378 do { 379 main_imsg_compose_ospfe(IMSG_CTL_KROUTE, 380 imsg->hdr.pid, 381 &kn->r, sizeof(kn->r)); 382 } while ((kn = kn->next) != NULL); 383 } 384 break; 385 case IMSG_CTL_KROUTE_ADDR: 386 if (imsg->hdr.len != IMSG_HEADER_SIZE + 387 sizeof(struct in_addr)) { 388 log_warnx("kr_show_route: wrong imsg len"); 389 return; 390 } 391 memcpy(&addr, imsg->data, sizeof(addr)); 392 kr = NULL; 393 kr = kroute_match(addr.s_addr); 394 if (kr != NULL) 395 main_imsg_compose_ospfe(IMSG_CTL_KROUTE, imsg->hdr.pid, 396 &kr->r, sizeof(kr->r)); 397 break; 398 default: 399 log_debug("kr_show_route: error handling imsg"); 400 break; 401 } 402 403 main_imsg_compose_ospfe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0); 404 } 405 406 void 407 kr_ifinfo(char *ifname, pid_t pid) 408 { 409 struct kif_node *kif; 410 411 RB_FOREACH(kif, kif_tree, &kit) 412 if (ifname == NULL || !strcmp(ifname, kif->k.ifname)) { 413 main_imsg_compose_ospfe(IMSG_CTL_IFINFO, 414 pid, &kif->k, sizeof(kif->k)); 415 } 416 417 main_imsg_compose_ospfe(IMSG_CTL_END, pid, NULL, 0); 418 } 419 420 void 421 kr_redist_remove(struct kroute_node *kh, struct kroute_node *kn) 422 { 423 struct rroute rr; 424 425 /* was the route redistributed? */ 426 if ((kn->r.flags & F_REDISTRIBUTED) == 0) 427 return; 428 429 /* remove redistributed flag */ 430 kn->r.flags &= ~F_REDISTRIBUTED; 431 rr.kr = kn->r; 432 rr.metric = DEFAULT_REDIST_METRIC; /* some dummy value */ 433 434 /* probably inform the RDE (check if no other path is redistributed) */ 435 for (kn = kh; kn; kn = kn->next) 436 if (kn->r.flags & F_REDISTRIBUTED) 437 break; 438 439 if (kn == NULL) 440 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, &rr, 441 sizeof(struct rroute)); 442 } 443 444 int 445 kr_redist_eval(struct kroute *kr, struct rroute *rr) 446 { 447 u_int32_t a, metric = 0; 448 449 /* Only non-ospfd routes are considered for redistribution. */ 450 if (!(kr->flags & F_KERNEL)) 451 goto dont_redistribute; 452 453 /* Dynamic routes are not redistributable. */ 454 if (kr->flags & F_DYNAMIC) 455 goto dont_redistribute; 456 457 /* interface is not up and running so don't announce */ 458 if (kr->flags & F_DOWN) 459 goto dont_redistribute; 460 461 /* 462 * We consider the loopback net, multicast and experimental addresses 463 * as not redistributable. 464 */ 465 a = ntohl(kr->prefix.s_addr); 466 if (IN_MULTICAST(a) || IN_BADCLASS(a) || 467 (a >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 468 goto dont_redistribute; 469 /* 470 * Consider networks with nexthop loopback as not redistributable. 471 */ 472 if (kr->nexthop.s_addr == htonl(INADDR_LOOPBACK)) 473 goto dont_redistribute; 474 475 /* Should we redistrubute this route? */ 476 if (!ospf_redistribute(kr, &metric)) 477 goto dont_redistribute; 478 479 /* prefix should be redistributed */ 480 kr->flags |= F_REDISTRIBUTED; 481 /* 482 * only on of all multipath routes can be redistributed so 483 * redistribute the best one. 484 */ 485 if (rr->metric > metric) { 486 rr->kr = *kr; 487 rr->metric = metric; 488 } 489 return (1); 490 491 dont_redistribute: 492 /* was the route redistributed? */ 493 if ((kr->flags & F_REDISTRIBUTED) == 0) 494 return (0); 495 496 kr->flags &= ~F_REDISTRIBUTED; 497 return (1); 498 } 499 500 void 501 kr_redistribute(struct kroute_node *kh) 502 { 503 struct kroute_node *kn; 504 struct rroute rr; 505 int redistribute = 0; 506 507 /* only the highest prio route can be redistributed */ 508 if (kroute_find(kh->r.prefix.s_addr, kh->r.prefixlen, RTP_ANY) != kh) 509 return; 510 511 bzero(&rr, sizeof(rr)); 512 rr.metric = UINT_MAX; 513 for (kn = kh; kn; kn = kn->next) 514 if (kr_redist_eval(&kn->r, &rr)) 515 redistribute = 1; 516 517 if (!redistribute) 518 return; 519 520 if (rr.kr.flags & F_REDISTRIBUTED) { 521 main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, &rr, 522 sizeof(struct rroute)); 523 } else { 524 rr.metric = DEFAULT_REDIST_METRIC; /* some dummy value */ 525 rr.kr = kh->r; 526 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, &rr, 527 sizeof(struct rroute)); 528 } 529 } 530 531 void 532 kr_reload(void) 533 { 534 struct kroute_node *kr, *kn; 535 u_int32_t dummy; 536 int r; 537 538 RB_FOREACH(kr, kroute_tree, &krt) { 539 for (kn = kr; kn; kn = kn->next) { 540 r = ospf_redistribute(&kn->r, &dummy); 541 /* 542 * if it is redistributed, redistribute again metric 543 * may have changed. 544 */ 545 if ((kn->r.flags & F_REDISTRIBUTED && !r) || r) 546 break; 547 } 548 if (kn) { 549 /* 550 * kr_redistribute copes with removes and RDE with 551 * duplicates 552 */ 553 kr_redistribute(kr); 554 } 555 } 556 } 557 558 /* rb-tree compare */ 559 int 560 kroute_compare(struct kroute_node *a, struct kroute_node *b) 561 { 562 if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr)) 563 return (-1); 564 if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr)) 565 return (1); 566 if (a->r.prefixlen < b->r.prefixlen) 567 return (-1); 568 if (a->r.prefixlen > b->r.prefixlen) 569 return (1); 570 571 /* if the priority is RTP_ANY finish on the first address hit */ 572 if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY) 573 return (0); 574 if (a->r.priority < b->r.priority) 575 return (-1); 576 if (a->r.priority > b->r.priority) 577 return (1); 578 return (0); 579 } 580 581 int 582 kif_compare(struct kif_node *a, struct kif_node *b) 583 { 584 return (b->k.ifindex - a->k.ifindex); 585 } 586 587 /* tree management */ 588 struct kroute_node * 589 kroute_find(in_addr_t prefix, u_int8_t prefixlen, u_int8_t prio) 590 { 591 struct kroute_node s; 592 struct kroute_node *kn, *tmp; 593 594 s.r.prefix.s_addr = prefix; 595 s.r.prefixlen = prefixlen; 596 s.r.priority = prio; 597 598 kn = RB_FIND(kroute_tree, &krt, &s); 599 if (kn && prio == RTP_ANY) { 600 tmp = RB_PREV(kroute_tree, &krt, kn); 601 while (tmp) { 602 if (kroute_compare(&s, tmp) == 0) 603 kn = tmp; 604 else 605 break; 606 tmp = RB_PREV(kroute_tree, &krt, kn); 607 } 608 } 609 return (kn); 610 } 611 612 struct kroute_node * 613 kroute_matchgw(struct kroute_node *kr, struct in_addr nh) 614 { 615 in_addr_t nexthop; 616 617 nexthop = nh.s_addr; 618 619 while (kr) { 620 if (kr->r.nexthop.s_addr == nexthop) 621 return (kr); 622 kr = kr->next; 623 } 624 625 return (NULL); 626 } 627 628 629 int 630 kroute_insert(struct kroute_node *kr) 631 { 632 struct kroute_node *krm; 633 634 if ((krm = RB_INSERT(kroute_tree, &krt, kr)) != NULL) { 635 /* 636 * Multipath route, add at end of list and clone the 637 * ospfd inserted flag. 638 */ 639 while (krm->next != NULL) 640 krm = krm->next; 641 krm->next = kr; 642 kr->next = NULL; /* to be sure */ 643 } else 644 krm = kr; 645 646 if (!(kr->r.flags & F_KERNEL)) { 647 /* don't validate or redistribute ospf route */ 648 kr->r.flags &= ~F_DOWN; 649 return (0); 650 } 651 652 if (kif_validate(kr->r.ifindex)) 653 kr->r.flags &= ~F_DOWN; 654 else 655 kr->r.flags |= F_DOWN; 656 657 kr_redistribute(krm); 658 return (0); 659 } 660 661 int 662 kroute_remove(struct kroute_node *kr) 663 { 664 struct kroute_node *krm; 665 666 if ((krm = RB_FIND(kroute_tree, &krt, kr)) == NULL) { 667 log_warnx("kroute_remove failed to find %s/%u", 668 inet_ntoa(kr->r.prefix), kr->r.prefixlen); 669 return (-1); 670 } 671 672 if (krm == kr) { 673 /* head element */ 674 if (RB_REMOVE(kroute_tree, &krt, kr) == NULL) { 675 log_warnx("kroute_remove failed for %s/%u", 676 inet_ntoa(kr->r.prefix), kr->r.prefixlen); 677 return (-1); 678 } 679 if (kr->next != NULL) { 680 if (RB_INSERT(kroute_tree, &krt, kr->next) != NULL) { 681 log_warnx("kroute_remove failed to add %s/%u", 682 inet_ntoa(kr->r.prefix), kr->r.prefixlen); 683 return (-1); 684 } 685 } 686 } else { 687 /* somewhere in the list */ 688 while (krm->next != kr && krm->next != NULL) 689 krm = krm->next; 690 if (krm->next == NULL) { 691 log_warnx("kroute_remove multipath list corrupted " 692 "for %s/%u", inet_ntoa(kr->r.prefix), 693 kr->r.prefixlen); 694 return (-1); 695 } 696 krm->next = kr->next; 697 } 698 699 kr_redist_remove(krm, kr); 700 rtlabel_unref(kr->r.rtlabel); 701 702 free(kr); 703 return (0); 704 } 705 706 void 707 kroute_clear(void) 708 { 709 struct kroute_node *kr; 710 711 while ((kr = RB_MIN(kroute_tree, &krt)) != NULL) 712 kroute_remove(kr); 713 } 714 715 struct kif_node * 716 kif_find(u_short ifindex) 717 { 718 struct kif_node s; 719 720 bzero(&s, sizeof(s)); 721 s.k.ifindex = ifindex; 722 723 return (RB_FIND(kif_tree, &kit, &s)); 724 } 725 726 struct kif * 727 kif_findname(char *ifname, struct in_addr addr, struct kif_addr **kap) 728 { 729 struct kif_node *kif; 730 struct kif_addr *ka; 731 732 RB_FOREACH(kif, kif_tree, &kit) 733 if (!strcmp(ifname, kif->k.ifname)) { 734 ka = TAILQ_FIRST(&kif->addrs); 735 if (addr.s_addr != 0) { 736 TAILQ_FOREACH(ka, &kif->addrs, entry) { 737 if (addr.s_addr == ka->addr.s_addr) 738 break; 739 } 740 } 741 if (kap != NULL) 742 *kap = ka; 743 return (&kif->k); 744 } 745 746 return (NULL); 747 } 748 749 struct kif_node * 750 kif_insert(u_short ifindex) 751 { 752 struct kif_node *kif; 753 754 if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) 755 return (NULL); 756 757 kif->k.ifindex = ifindex; 758 TAILQ_INIT(&kif->addrs); 759 760 if (RB_INSERT(kif_tree, &kit, kif) != NULL) 761 fatalx("kif_insert: RB_INSERT"); 762 763 return (kif); 764 } 765 766 int 767 kif_remove(struct kif_node *kif) 768 { 769 struct kif_addr *ka; 770 771 if (RB_REMOVE(kif_tree, &kit, kif) == NULL) { 772 log_warnx("RB_REMOVE(kif_tree, &kit, kif)"); 773 return (-1); 774 } 775 776 while ((ka = TAILQ_FIRST(&kif->addrs)) != NULL) { 777 TAILQ_REMOVE(&kif->addrs, ka, entry); 778 free(ka); 779 } 780 free(kif); 781 return (0); 782 } 783 784 void 785 kif_clear(void) 786 { 787 struct kif_node *kif; 788 789 while ((kif = RB_MIN(kif_tree, &kit)) != NULL) 790 kif_remove(kif); 791 } 792 793 struct kif * 794 kif_update(u_short ifindex, int flags, struct if_data *ifd, 795 struct sockaddr_dl *sdl) 796 { 797 struct kif_node *kif; 798 799 if ((kif = kif_find(ifindex)) == NULL) { 800 if ((kif = kif_insert(ifindex)) == NULL) 801 return (NULL); 802 kif->k.nh_reachable = (flags & IFF_UP) && 803 (LINK_STATE_IS_UP(ifd->ifi_link_state) || 804 (ifd->ifi_link_state == LINK_STATE_UNKNOWN && 805 ifd->ifi_type != IFT_CARP)); 806 } 807 808 kif->k.flags = flags; 809 kif->k.link_state = ifd->ifi_link_state; 810 kif->k.media_type = ifd->ifi_type; 811 kif->k.baudrate = ifd->ifi_baudrate; 812 kif->k.mtu = ifd->ifi_mtu; 813 814 if (sdl && sdl->sdl_family == AF_LINK) { 815 if (sdl->sdl_nlen >= sizeof(kif->k.ifname)) 816 memcpy(kif->k.ifname, sdl->sdl_data, 817 sizeof(kif->k.ifname) - 1); 818 else if (sdl->sdl_nlen > 0) 819 memcpy(kif->k.ifname, sdl->sdl_data, 820 sdl->sdl_nlen); 821 /* string already terminated via calloc() */ 822 } 823 824 return (&kif->k); 825 } 826 827 int 828 kif_validate(u_short ifindex) 829 { 830 struct kif_node *kif; 831 832 if ((kif = kif_find(ifindex)) == NULL) { 833 log_warnx("interface with index %u not found", ifindex); 834 return (1); 835 } 836 837 return (kif->k.nh_reachable); 838 } 839 840 struct kroute_node * 841 kroute_match(in_addr_t key) 842 { 843 int i; 844 struct kroute_node *kr; 845 846 /* we will never match the default route */ 847 for (i = 32; i > 0; i--) 848 if ((kr = kroute_find(key & prefixlen2mask(i), i, 849 RTP_ANY)) != NULL) 850 return (kr); 851 852 /* if we don't have a match yet, try to find a default route */ 853 if ((kr = kroute_find(0, 0, RTP_ANY)) != NULL) 854 return (kr); 855 856 return (NULL); 857 } 858 859 /* misc */ 860 int 861 protect_lo(void) 862 { 863 struct kroute_node *kr; 864 865 /* special protection for 127/8 */ 866 if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) { 867 log_warn("protect_lo"); 868 return (-1); 869 } 870 kr->r.prefix.s_addr = htonl(INADDR_LOOPBACK); 871 kr->r.prefixlen = 8; 872 kr->r.flags = F_KERNEL|F_CONNECTED; 873 874 if (RB_INSERT(kroute_tree, &krt, kr) != NULL) 875 free(kr); /* kernel route already there, no problem */ 876 877 return (0); 878 } 879 880 u_int8_t 881 prefixlen_classful(in_addr_t ina) 882 { 883 /* it hurt to write this. */ 884 885 if (ina >= 0xf0000000U) /* class E */ 886 return (32); 887 else if (ina >= 0xe0000000U) /* class D */ 888 return (4); 889 else if (ina >= 0xc0000000U) /* class C */ 890 return (24); 891 else if (ina >= 0x80000000U) /* class B */ 892 return (16); 893 else /* class A */ 894 return (8); 895 } 896 897 u_int8_t 898 mask2prefixlen(in_addr_t ina) 899 { 900 if (ina == 0) 901 return (0); 902 else 903 return (33 - ffs(ntohl(ina))); 904 } 905 906 in_addr_t 907 prefixlen2mask(u_int8_t prefixlen) 908 { 909 if (prefixlen == 0) 910 return (0); 911 912 return (htonl(0xffffffff << (32 - prefixlen))); 913 } 914 915 #define ROUNDUP(a) \ 916 (((a) & (sizeof(long) - 1)) ? (1 + ((a) | (sizeof(long) - 1))) : (a)) 917 918 void 919 get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) 920 { 921 int i; 922 923 for (i = 0; i < RTAX_MAX; i++) { 924 if (addrs & (1 << i)) { 925 rti_info[i] = sa; 926 sa = (struct sockaddr *)((char *)(sa) + 927 ROUNDUP(sa->sa_len)); 928 } else 929 rti_info[i] = NULL; 930 } 931 } 932 933 void 934 if_change(u_short ifindex, int flags, struct if_data *ifd, 935 struct sockaddr_dl *sdl) 936 { 937 struct kroute_node *kr, *tkr; 938 struct kif *kif; 939 u_int8_t reachable; 940 941 if ((kif = kif_update(ifindex, flags, ifd, sdl)) == NULL) { 942 log_warn("if_change: kif_update(%u)", ifindex); 943 return; 944 } 945 946 reachable = (kif->flags & IFF_UP) && 947 (LINK_STATE_IS_UP(kif->link_state) || 948 (kif->link_state == LINK_STATE_UNKNOWN && 949 kif->media_type != IFT_CARP)); 950 951 if (reachable == kif->nh_reachable) 952 return; /* nothing changed wrt nexthop validity */ 953 954 kif->nh_reachable = reachable; 955 956 /* notify ospfe about interface link state */ 957 main_imsg_compose_ospfe(IMSG_IFINFO, 0, kif, sizeof(struct kif)); 958 959 /* update redistribute list */ 960 RB_FOREACH(kr, kroute_tree, &krt) { 961 for (tkr = kr; tkr != NULL; tkr = tkr->next) { 962 if (tkr->r.ifindex == ifindex) { 963 if (reachable) 964 tkr->r.flags &= ~F_DOWN; 965 else 966 tkr->r.flags |= F_DOWN; 967 968 } 969 } 970 kr_redistribute(kr); 971 } 972 } 973 974 void 975 if_newaddr(u_short ifindex, struct sockaddr_in *ifa, struct sockaddr_in *mask, 976 struct sockaddr_in *brd) 977 { 978 struct kif_node *kif; 979 struct kif_addr *ka; 980 981 if (ifa == NULL || ifa->sin_family != AF_INET) 982 return; 983 if ((kif = kif_find(ifindex)) == NULL) { 984 log_warnx("if_newaddr: corresponding if %i not found", ifindex); 985 return; 986 } 987 if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL) 988 fatal("if_newaddr"); 989 ka->addr = ifa->sin_addr; 990 if (mask) 991 ka->mask = mask->sin_addr; 992 else 993 ka->mask.s_addr = INADDR_NONE; 994 if (brd) 995 ka->dstbrd = brd->sin_addr; 996 else 997 ka->dstbrd.s_addr = INADDR_NONE; 998 999 TAILQ_INSERT_TAIL(&kif->addrs, ka, entry); 1000 } 1001 1002 void 1003 if_announce(void *msg) 1004 { 1005 struct if_announcemsghdr *ifan; 1006 struct kif_node *kif; 1007 1008 ifan = msg; 1009 1010 switch (ifan->ifan_what) { 1011 case IFAN_ARRIVAL: 1012 kif = kif_insert(ifan->ifan_index); 1013 strlcpy(kif->k.ifname, ifan->ifan_name, sizeof(kif->k.ifname)); 1014 break; 1015 case IFAN_DEPARTURE: 1016 kif = kif_find(ifan->ifan_index); 1017 kif_remove(kif); 1018 break; 1019 } 1020 } 1021 1022 /* rtsock */ 1023 int 1024 send_rtmsg(int fd, int action, struct kroute *kroute) 1025 { 1026 struct iovec iov[5]; 1027 struct rt_msghdr hdr; 1028 struct sockaddr_in prefix; 1029 struct sockaddr_in nexthop; 1030 struct sockaddr_in mask; 1031 struct sockaddr_rtlabel sa_rl; 1032 int iovcnt = 0; 1033 const char *label; 1034 1035 if (kr_state.fib_sync == 0) 1036 return (0); 1037 1038 /* initialize header */ 1039 bzero(&hdr, sizeof(hdr)); 1040 hdr.rtm_version = RTM_VERSION; 1041 hdr.rtm_type = action; 1042 hdr.rtm_priority = RTP_OSPF; 1043 if (action == RTM_CHANGE) 1044 hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE; 1045 else 1046 hdr.rtm_flags = RTF_MPATH; 1047 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ 1048 hdr.rtm_msglen = sizeof(hdr); 1049 /* adjust iovec */ 1050 iov[iovcnt].iov_base = &hdr; 1051 iov[iovcnt++].iov_len = sizeof(hdr); 1052 1053 bzero(&prefix, sizeof(prefix)); 1054 prefix.sin_len = sizeof(prefix); 1055 prefix.sin_family = AF_INET; 1056 prefix.sin_addr.s_addr = kroute->prefix.s_addr; 1057 /* adjust header */ 1058 hdr.rtm_addrs |= RTA_DST; 1059 hdr.rtm_msglen += sizeof(prefix); 1060 /* adjust iovec */ 1061 iov[iovcnt].iov_base = &prefix; 1062 iov[iovcnt++].iov_len = sizeof(prefix); 1063 1064 if (kroute->nexthop.s_addr != 0) { 1065 bzero(&nexthop, sizeof(nexthop)); 1066 nexthop.sin_len = sizeof(nexthop); 1067 nexthop.sin_family = AF_INET; 1068 nexthop.sin_addr.s_addr = kroute->nexthop.s_addr; 1069 /* adjust header */ 1070 hdr.rtm_flags |= RTF_GATEWAY; 1071 hdr.rtm_addrs |= RTA_GATEWAY; 1072 hdr.rtm_msglen += sizeof(nexthop); 1073 /* adjust iovec */ 1074 iov[iovcnt].iov_base = &nexthop; 1075 iov[iovcnt++].iov_len = sizeof(nexthop); 1076 } 1077 1078 bzero(&mask, sizeof(mask)); 1079 mask.sin_len = sizeof(mask); 1080 mask.sin_family = AF_INET; 1081 mask.sin_addr.s_addr = prefixlen2mask(kroute->prefixlen); 1082 /* adjust header */ 1083 hdr.rtm_addrs |= RTA_NETMASK; 1084 hdr.rtm_msglen += sizeof(mask); 1085 /* adjust iovec */ 1086 iov[iovcnt].iov_base = &mask; 1087 iov[iovcnt++].iov_len = sizeof(mask); 1088 1089 if (kroute->rtlabel != 0) { 1090 sa_rl.sr_len = sizeof(sa_rl); 1091 sa_rl.sr_family = AF_UNSPEC; 1092 label = rtlabel_id2name(kroute->rtlabel); 1093 if (strlcpy(sa_rl.sr_label, label, 1094 sizeof(sa_rl.sr_label)) >= sizeof(sa_rl.sr_label)) { 1095 log_warnx("send_rtmsg: invalid rtlabel"); 1096 return (-1); 1097 } 1098 /* adjust header */ 1099 hdr.rtm_addrs |= RTA_LABEL; 1100 hdr.rtm_msglen += sizeof(sa_rl); 1101 /* adjust iovec */ 1102 iov[iovcnt].iov_base = &sa_rl; 1103 iov[iovcnt++].iov_len = sizeof(sa_rl); 1104 } 1105 1106 1107 retry: 1108 if (writev(fd, iov, iovcnt) == -1) { 1109 switch (errno) { 1110 case ESRCH: 1111 if (hdr.rtm_type == RTM_CHANGE) { 1112 hdr.rtm_type = RTM_ADD; 1113 goto retry; 1114 } else if (hdr.rtm_type == RTM_DELETE) { 1115 log_info("route %s/%u vanished before delete", 1116 inet_ntoa(kroute->prefix), 1117 kroute->prefixlen); 1118 return (0); 1119 } else { 1120 log_warnx("send_rtmsg: action %u, " 1121 "prefix %s/%u: %s", hdr.rtm_type, 1122 inet_ntoa(kroute->prefix), 1123 kroute->prefixlen, strerror(errno)); 1124 return (0); 1125 } 1126 break; 1127 default: 1128 log_warnx("send_rtmsg: action %u, prefix %s/%u: %s", 1129 hdr.rtm_type, inet_ntoa(kroute->prefix), 1130 kroute->prefixlen, strerror(errno)); 1131 return (0); 1132 } 1133 } 1134 1135 return (0); 1136 } 1137 1138 int 1139 fetchtable(void) 1140 { 1141 size_t len; 1142 int mib[7]; 1143 char *buf, *next, *lim; 1144 struct rt_msghdr *rtm; 1145 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1146 struct sockaddr_in *sa_in; 1147 struct sockaddr_rtlabel *label; 1148 struct kroute_node *kr; 1149 1150 mib[0] = CTL_NET; 1151 mib[1] = AF_ROUTE; 1152 mib[2] = 0; 1153 mib[3] = AF_INET; 1154 mib[4] = NET_RT_DUMP; 1155 mib[5] = 0; 1156 mib[6] = 0; /* rtableid */ 1157 1158 if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) { 1159 log_warn("sysctl"); 1160 return (-1); 1161 } 1162 if ((buf = malloc(len)) == NULL) { 1163 log_warn("fetchtable"); 1164 return (-1); 1165 } 1166 if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) { 1167 log_warn("sysctl"); 1168 free(buf); 1169 return (-1); 1170 } 1171 1172 lim = buf + len; 1173 for (next = buf; next < lim; next += rtm->rtm_msglen) { 1174 rtm = (struct rt_msghdr *)next; 1175 if (rtm->rtm_version != RTM_VERSION) 1176 continue; 1177 sa = (struct sockaddr *)(rtm + 1); 1178 get_rtaddrs(rtm->rtm_addrs, sa, rti_info); 1179 1180 if ((sa = rti_info[RTAX_DST]) == NULL) 1181 continue; 1182 1183 if (rtm->rtm_flags & RTF_LLINFO) /* arp cache */ 1184 continue; 1185 1186 if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) { 1187 log_warn("fetchtable"); 1188 free(buf); 1189 return (-1); 1190 } 1191 1192 kr->r.flags = F_KERNEL; 1193 kr->r.priority = rtm->rtm_priority; 1194 1195 switch (sa->sa_family) { 1196 case AF_INET: 1197 kr->r.prefix.s_addr = 1198 ((struct sockaddr_in *)sa)->sin_addr.s_addr; 1199 sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK]; 1200 if (rtm->rtm_flags & RTF_STATIC) 1201 kr->r.flags |= F_STATIC; 1202 if (rtm->rtm_flags & RTF_DYNAMIC) 1203 kr->r.flags |= F_DYNAMIC; 1204 if (sa_in != NULL) { 1205 if (sa_in->sin_len == 0) 1206 break; 1207 kr->r.prefixlen = 1208 mask2prefixlen(sa_in->sin_addr.s_addr); 1209 } else if (rtm->rtm_flags & RTF_HOST) 1210 kr->r.prefixlen = 32; 1211 else 1212 kr->r.prefixlen = 1213 prefixlen_classful(kr->r.prefix.s_addr); 1214 break; 1215 default: 1216 free(kr); 1217 continue; 1218 } 1219 1220 kr->r.ifindex = rtm->rtm_index; 1221 if ((sa = rti_info[RTAX_GATEWAY]) != NULL) 1222 switch (sa->sa_family) { 1223 case AF_INET: 1224 kr->r.nexthop.s_addr = 1225 ((struct sockaddr_in *)sa)->sin_addr.s_addr; 1226 break; 1227 case AF_LINK: 1228 kr->r.flags |= F_CONNECTED; 1229 break; 1230 } 1231 1232 if (rtm->rtm_priority == RTP_OSPF) { 1233 send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r); 1234 free(kr); 1235 } else { 1236 if ((label = (struct sockaddr_rtlabel *) 1237 rti_info[RTAX_LABEL]) != NULL) { 1238 kr->r.rtlabel = 1239 rtlabel_name2id(label->sr_label); 1240 kr->r.ext_tag = 1241 rtlabel_id2tag(kr->r.rtlabel); 1242 } 1243 kroute_insert(kr); 1244 } 1245 1246 } 1247 free(buf); 1248 return (0); 1249 } 1250 1251 int 1252 fetchifs(u_short ifindex) 1253 { 1254 size_t len; 1255 int mib[6]; 1256 char *buf, *next, *lim; 1257 struct rt_msghdr *rtm; 1258 struct if_msghdr ifm; 1259 struct ifa_msghdr *ifam; 1260 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1261 1262 mib[0] = CTL_NET; 1263 mib[1] = AF_ROUTE; 1264 mib[2] = 0; 1265 mib[3] = AF_INET; 1266 mib[4] = NET_RT_IFLIST; 1267 mib[5] = ifindex; 1268 1269 if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) { 1270 log_warn("sysctl"); 1271 return (-1); 1272 } 1273 if ((buf = malloc(len)) == NULL) { 1274 log_warn("fetchif"); 1275 return (-1); 1276 } 1277 if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) { 1278 log_warn("sysctl"); 1279 free(buf); 1280 return (-1); 1281 } 1282 1283 lim = buf + len; 1284 for (next = buf; next < lim; next += rtm->rtm_msglen) { 1285 rtm = (struct rt_msghdr *)next; 1286 if (rtm->rtm_version != RTM_VERSION) 1287 continue; 1288 switch (rtm->rtm_type) { 1289 case RTM_IFINFO: 1290 memcpy(&ifm, next, sizeof(ifm)); 1291 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); 1292 get_rtaddrs(ifm.ifm_addrs, sa, rti_info); 1293 if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data, 1294 (struct sockaddr_dl *)rti_info[RTAX_IFP]); 1295 break; 1296 case RTM_NEWADDR: 1297 ifam = (struct ifa_msghdr *)rtm; 1298 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1299 RTA_BRD)) == 0) 1300 break; 1301 sa = (struct sockaddr *)(ifam + 1); 1302 get_rtaddrs(ifam->ifam_addrs, sa, rti_info); 1303 1304 if_newaddr(ifam->ifam_index, 1305 (struct sockaddr_in *)rti_info[RTAX_IFA], 1306 (struct sockaddr_in *)rti_info[RTAX_NETMASK], 1307 (struct sockaddr_in *)rti_info[RTAX_BRD]); 1308 break; 1309 } 1310 } 1311 free(buf); 1312 return (0); 1313 } 1314 1315 int 1316 dispatch_rtmsg(void) 1317 { 1318 char buf[RT_BUF_SIZE]; 1319 ssize_t n; 1320 char *next, *lim; 1321 struct rt_msghdr *rtm; 1322 struct if_msghdr ifm; 1323 struct ifa_msghdr *ifam; 1324 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1325 struct sockaddr_in *sa_in; 1326 struct sockaddr_rtlabel *label; 1327 struct kroute_node *kr, *okr; 1328 struct in_addr prefix, nexthop; 1329 u_int8_t prefixlen, prio; 1330 int flags, mpath; 1331 u_short ifindex = 0; 1332 1333 if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) { 1334 log_warn("dispatch_rtmsg: read error"); 1335 return (-1); 1336 } 1337 1338 if (n == 0) { 1339 log_warnx("routing socket closed"); 1340 return (-1); 1341 } 1342 1343 lim = buf + n; 1344 for (next = buf; next < lim; next += rtm->rtm_msglen) { 1345 rtm = (struct rt_msghdr *)next; 1346 if (rtm->rtm_version != RTM_VERSION) 1347 continue; 1348 1349 prefix.s_addr = 0; 1350 prefixlen = 0; 1351 flags = F_KERNEL; 1352 nexthop.s_addr = 0; 1353 mpath = 0; 1354 prio = 0; 1355 1356 if (rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_CHANGE || 1357 rtm->rtm_type == RTM_DELETE) { 1358 sa = (struct sockaddr *)(rtm + 1); 1359 get_rtaddrs(rtm->rtm_addrs, sa, rti_info); 1360 1361 if (rtm->rtm_tableid != 0) 1362 continue; 1363 1364 if (rtm->rtm_pid == kr_state.pid) /* caused by us */ 1365 continue; 1366 1367 if (rtm->rtm_errno) /* failed attempts... */ 1368 continue; 1369 1370 if (rtm->rtm_flags & RTF_LLINFO) /* arp cache */ 1371 continue; 1372 1373 #ifdef RTF_MPATH 1374 if (rtm->rtm_flags & RTF_MPATH) 1375 mpath = 1; 1376 #endif 1377 prio = rtm->rtm_priority; 1378 1379 switch (sa->sa_family) { 1380 case AF_INET: 1381 prefix.s_addr = 1382 ((struct sockaddr_in *)sa)->sin_addr.s_addr; 1383 sa_in = (struct sockaddr_in *) 1384 rti_info[RTAX_NETMASK]; 1385 if (sa_in != NULL) { 1386 if (sa_in->sin_len != 0) 1387 prefixlen = mask2prefixlen( 1388 sa_in->sin_addr.s_addr); 1389 } else if (rtm->rtm_flags & RTF_HOST) 1390 prefixlen = 32; 1391 else 1392 prefixlen = 1393 prefixlen_classful(prefix.s_addr); 1394 if (rtm->rtm_flags & RTF_STATIC) 1395 flags |= F_STATIC; 1396 if (rtm->rtm_flags & RTF_DYNAMIC) 1397 flags |= F_DYNAMIC; 1398 break; 1399 default: 1400 continue; 1401 } 1402 1403 ifindex = rtm->rtm_index; 1404 if ((sa = rti_info[RTAX_GATEWAY]) != NULL) { 1405 switch (sa->sa_family) { 1406 case AF_INET: 1407 nexthop.s_addr = ((struct 1408 sockaddr_in *)sa)->sin_addr.s_addr; 1409 break; 1410 case AF_LINK: 1411 flags |= F_CONNECTED; 1412 break; 1413 } 1414 } 1415 } 1416 1417 switch (rtm->rtm_type) { 1418 case RTM_ADD: 1419 case RTM_CHANGE: 1420 if (nexthop.s_addr == 0 && !(flags & F_CONNECTED)) { 1421 log_warnx("dispatch_rtmsg no nexthop for %s/%u", 1422 inet_ntoa(prefix), prefixlen); 1423 continue; 1424 } 1425 1426 if ((okr = kroute_find(prefix.s_addr, prefixlen, prio)) 1427 != NULL) { 1428 /* just add new multipath routes */ 1429 if (mpath && rtm->rtm_type == RTM_ADD) 1430 goto add; 1431 /* get the correct route */ 1432 kr = okr; 1433 if (mpath && (kr = kroute_matchgw(okr, 1434 nexthop)) == NULL) { 1435 log_warnx("dispatch_rtmsg mpath route" 1436 " not found"); 1437 /* add routes we missed out earlier */ 1438 goto add; 1439 } 1440 1441 if (kr->r.flags & F_REDISTRIBUTED) 1442 flags |= F_REDISTRIBUTED; 1443 kr->r.nexthop.s_addr = nexthop.s_addr; 1444 kr->r.flags = flags; 1445 kr->r.ifindex = ifindex; 1446 1447 rtlabel_unref(kr->r.rtlabel); 1448 kr->r.rtlabel = 0; 1449 kr->r.ext_tag = 0; 1450 if ((label = (struct sockaddr_rtlabel *) 1451 rti_info[RTAX_LABEL]) != NULL) { 1452 kr->r.rtlabel = 1453 rtlabel_name2id(label->sr_label); 1454 kr->r.ext_tag = 1455 rtlabel_id2tag(kr->r.rtlabel); 1456 } 1457 1458 if (kif_validate(kr->r.ifindex)) 1459 kr->r.flags &= ~F_DOWN; 1460 else 1461 kr->r.flags |= F_DOWN; 1462 1463 /* just readd, the RDE will care */ 1464 kr_redistribute(okr); 1465 } else { 1466 add: 1467 if ((kr = calloc(1, 1468 sizeof(struct kroute_node))) == NULL) { 1469 log_warn("dispatch_rtmsg"); 1470 return (-1); 1471 } 1472 kr->r.prefix.s_addr = prefix.s_addr; 1473 kr->r.prefixlen = prefixlen; 1474 kr->r.nexthop.s_addr = nexthop.s_addr; 1475 kr->r.flags = flags; 1476 kr->r.ifindex = ifindex; 1477 kr->r.priority = prio; 1478 1479 if ((label = (struct sockaddr_rtlabel *) 1480 rti_info[RTAX_LABEL]) != NULL) { 1481 kr->r.rtlabel = 1482 rtlabel_name2id(label->sr_label); 1483 kr->r.ext_tag = 1484 rtlabel_id2tag(kr->r.rtlabel); 1485 } 1486 1487 kroute_insert(kr); 1488 } 1489 break; 1490 case RTM_DELETE: 1491 if ((kr = kroute_find(prefix.s_addr, prefixlen, prio)) 1492 == NULL) 1493 continue; 1494 if (!(kr->r.flags & F_KERNEL)) 1495 continue; 1496 /* get the correct route */ 1497 okr = kr; 1498 if (mpath && 1499 (kr = kroute_matchgw(kr, nexthop)) == NULL) { 1500 log_warnx("dispatch_rtmsg mpath route" 1501 " not found"); 1502 return (-1); 1503 } 1504 if (kroute_remove(kr) == -1) 1505 return (-1); 1506 break; 1507 case RTM_IFINFO: 1508 memcpy(&ifm, next, sizeof(ifm)); 1509 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); 1510 get_rtaddrs(ifm.ifm_addrs, sa, rti_info); 1511 if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data, 1512 (struct sockaddr_dl *)rti_info[RTAX_IFP]); 1513 break; 1514 case RTM_NEWADDR: 1515 ifam = (struct ifa_msghdr *)rtm; 1516 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1517 RTA_BRD)) == 0) 1518 break; 1519 sa = (struct sockaddr *)(ifam + 1); 1520 get_rtaddrs(ifam->ifam_addrs, sa, rti_info); 1521 1522 if_newaddr(ifam->ifam_index, 1523 (struct sockaddr_in *)rti_info[RTAX_IFA], 1524 (struct sockaddr_in *)rti_info[RTAX_NETMASK], 1525 (struct sockaddr_in *)rti_info[RTAX_BRD]); 1526 break; 1527 case RTM_IFANNOUNCE: 1528 if_announce(next); 1529 break; 1530 default: 1531 /* ignore for now */ 1532 break; 1533 } 1534 } 1535 return (0); 1536 } 1537