1 /* $OpenBSD: kroute.c,v 1.16 2016/09/02 16:44:33 renato Exp $ */ 2 3 /* 4 * Copyright (c) 2015 Renato Westphal <renato@openbsd.org> 5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/types.h> 22 #include <sys/socket.h> 23 #include <sys/sysctl.h> 24 #include <net/if.h> 25 #include <net/if_dl.h> 26 #include <net/route.h> 27 #include <netinet/in.h> 28 29 #include <arpa/inet.h> 30 #include <errno.h> 31 #include <stdlib.h> 32 #include <string.h> 33 #include <unistd.h> 34 35 #include "eigrpd.h" 36 #include "log.h" 37 38 static struct { 39 uint32_t rtseq; 40 pid_t pid; 41 int fib_sync; 42 int fd; 43 struct event ev; 44 unsigned int rdomain; 45 } kr_state; 46 47 struct kroute_node { 48 TAILQ_ENTRY(kroute_node) entry; 49 struct kroute_priority *kprio; /* back pointer */ 50 struct kroute r; 51 }; 52 53 struct kroute_priority { 54 TAILQ_ENTRY(kroute_priority) entry; 55 struct kroute_prefix *kp; /* back pointer */ 56 uint8_t priority; 57 TAILQ_HEAD(, kroute_node) nexthops; 58 }; 59 60 struct kroute_prefix { 61 RB_ENTRY(kroute_prefix) entry; 62 int af; 63 union eigrpd_addr prefix; 64 uint8_t prefixlen; 65 TAILQ_HEAD(plist, kroute_priority) priorities; 66 }; 67 RB_HEAD(kroute_tree, kroute_prefix); 68 RB_PROTOTYPE(kroute_tree, kroute_prefix, entry, kroute_compare) 69 70 struct kif_addr { 71 TAILQ_ENTRY(kif_addr) entry; 72 struct kaddr a; 73 }; 74 75 struct kif_node { 76 RB_ENTRY(kif_node) entry; 77 TAILQ_HEAD(, kif_addr) addrs; 78 struct kif k; 79 }; 80 RB_HEAD(kif_tree, kif_node); 81 RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare) 82 83 static void kr_dispatch_msg(int, short, void *); 84 static void kr_redist_remove(struct kroute *); 85 static int kr_redist_eval(struct kroute *); 86 static void kr_redistribute(struct kroute_prefix *); 87 static __inline int kroute_compare(struct kroute_prefix *, 88 struct kroute_prefix *); 89 static struct kroute_prefix *kroute_find_prefix(int, union eigrpd_addr *, 90 uint8_t); 91 static struct kroute_priority *kroute_find_prio(struct kroute_prefix *, 92 uint8_t); 93 static struct kroute_node *kroute_find_gw(struct kroute_priority *, 94 union eigrpd_addr *); 95 static struct kroute_node *kroute_insert(struct kroute *); 96 static int kroute_remove(struct kroute *); 97 static void kroute_clear(void); 98 static __inline int kif_compare(struct kif_node *, struct kif_node *); 99 static struct kif_node *kif_find(unsigned short); 100 static struct kif_node *kif_insert(unsigned short); 101 static int kif_remove(struct kif_node *); 102 static struct kif *kif_update(unsigned short, int, struct if_data *, 103 struct sockaddr_dl *); 104 static int kif_validate(unsigned short); 105 static void protect_lo(void); 106 static uint8_t prefixlen_classful(in_addr_t); 107 static void get_rtaddrs(int, struct sockaddr *, struct sockaddr **); 108 static void if_change(unsigned short, int, struct if_data *, 109 struct sockaddr_dl *); 110 static void if_newaddr(unsigned short, struct sockaddr *, 111 struct sockaddr *, struct sockaddr *); 112 static void if_deladdr(unsigned short, struct sockaddr *, 113 struct sockaddr *, struct sockaddr *); 114 static void if_announce(void *); 115 static int send_rtmsg_v4(int, int, struct kroute *); 116 static int send_rtmsg_v6(int, int, struct kroute *); 117 static int send_rtmsg(int, int, struct kroute *); 118 static int fetchtable(void); 119 static int fetchifs(void); 120 static int dispatch_rtmsg(void); 121 static int rtmsg_process(char *, size_t); 122 static int rtmsg_process_route(struct rt_msghdr *, 123 struct sockaddr *[RTAX_MAX]); 124 125 RB_GENERATE(kroute_tree, kroute_prefix, entry, kroute_compare) 126 RB_GENERATE(kif_tree, kif_node, entry, kif_compare) 127 128 static struct kroute_tree krt = RB_INITIALIZER(&krt); 129 static struct kif_tree kit = RB_INITIALIZER(&kit); 130 131 int 132 kif_init(void) 133 { 134 if (fetchifs() == -1) 135 return (-1); 136 137 return (0); 138 } 139 140 int 141 kr_init(int fs, unsigned int rdomain) 142 { 143 int opt = 0, rcvbuf, default_rcvbuf; 144 socklen_t optlen; 145 146 kr_state.fib_sync = fs; 147 kr_state.rdomain = rdomain; 148 149 if ((kr_state.fd = socket(AF_ROUTE, 150 SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) { 151 log_warn("%s: socket", __func__); 152 return (-1); 153 } 154 155 /* not interested in my own messages */ 156 if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK, 157 &opt, sizeof(opt)) == -1) 158 log_warn("%s: setsockopt(SO_USELOOPBACK)", __func__); 159 160 /* grow receive buffer, don't wanna miss messages */ 161 optlen = sizeof(default_rcvbuf); 162 if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, 163 &default_rcvbuf, &optlen) == -1) 164 log_warn("%s: getsockopt SOL_SOCKET SO_RCVBUF", __func__); 165 else 166 for (rcvbuf = MAX_RTSOCK_BUF; 167 rcvbuf > default_rcvbuf && 168 setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, 169 &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS; 170 rcvbuf /= 2) 171 ; /* nothing */ 172 173 kr_state.pid = getpid(); 174 kr_state.rtseq = 1; 175 176 if (fetchtable() == -1) 177 return (-1); 178 179 protect_lo(); 180 181 event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST, 182 kr_dispatch_msg, NULL); 183 event_add(&kr_state.ev, NULL); 184 185 return (0); 186 } 187 188 void 189 kif_redistribute(void) 190 { 191 struct kif_node *kif; 192 struct kif_addr *ka; 193 194 RB_FOREACH(kif, kif_tree, &kit) { 195 main_imsg_compose_eigrpe(IMSG_IFINFO, 0, &kif->k, 196 sizeof(struct kif)); 197 TAILQ_FOREACH(ka, &kif->addrs, entry) { 198 main_imsg_compose_eigrpe(IMSG_NEWADDR, 0, &ka->a, 199 sizeof(ka->a)); 200 } 201 } 202 } 203 204 int 205 kr_change(struct kroute *kr) 206 { 207 struct kroute_prefix *kp; 208 struct kroute_priority *kprio; 209 struct kroute_node *kn; 210 int action = RTM_ADD; 211 212 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); 213 if (kp == NULL) 214 kn = kroute_insert(kr); 215 else { 216 kprio = kroute_find_prio(kp, kr->priority); 217 if (kprio == NULL) 218 kn = kroute_insert(kr); 219 else { 220 kn = kroute_find_gw(kprio, &kr->nexthop); 221 if (kn == NULL) 222 kn = kroute_insert(kr); 223 else 224 action = RTM_CHANGE; 225 } 226 } 227 228 /* send update */ 229 if (send_rtmsg(kr_state.fd, action, kr) == -1) 230 return (-1); 231 232 kn->r.flags |= F_EIGRPD_INSERTED; 233 234 return (0); 235 } 236 237 int 238 kr_delete(struct kroute *kr) 239 { 240 struct kroute_prefix *kp; 241 struct kroute_priority *kprio; 242 struct kroute_node *kn; 243 244 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); 245 if (kp == NULL) 246 return (0); 247 kprio = kroute_find_prio(kp, kr->priority); 248 if (kprio == NULL) 249 return (0); 250 kn = kroute_find_gw(kprio, &kr->nexthop); 251 if (kn == NULL) 252 return (0); 253 254 if (!(kn->r.flags & F_EIGRPD_INSERTED)) 255 return (0); 256 257 if (send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r) == -1) 258 return (-1); 259 260 if (kroute_remove(kr) == -1) 261 return (-1); 262 263 return (0); 264 } 265 266 void 267 kr_shutdown(void) 268 { 269 kr_fib_decouple(); 270 kroute_clear(); 271 kif_clear(); 272 } 273 274 void 275 kr_fib_couple(void) 276 { 277 struct kroute_prefix *kp; 278 struct kroute_priority *kprio; 279 struct kroute_node *kn; 280 281 if (kr_state.fib_sync == 1) /* already coupled */ 282 return; 283 284 kr_state.fib_sync = 1; 285 286 RB_FOREACH(kp, kroute_tree, &krt) 287 TAILQ_FOREACH(kprio, &kp->priorities, entry) 288 TAILQ_FOREACH(kn, &kprio->nexthops, entry) { 289 if (!(kn->r.flags & F_EIGRPD_INSERTED)) 290 continue; 291 send_rtmsg(kr_state.fd, RTM_ADD, &kn->r); 292 } 293 294 log_info("kernel routing table coupled"); 295 } 296 297 void 298 kr_fib_decouple(void) 299 { 300 struct kroute_prefix *kp; 301 struct kroute_priority *kprio; 302 struct kroute_node *kn; 303 304 if (kr_state.fib_sync == 0) /* already decoupled */ 305 return; 306 307 RB_FOREACH(kp, kroute_tree, &krt) 308 TAILQ_FOREACH(kprio, &kp->priorities, entry) 309 TAILQ_FOREACH(kn, &kprio->nexthops, entry) { 310 if (!(kn->r.flags & F_EIGRPD_INSERTED)) 311 continue; 312 313 send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r); 314 } 315 316 kr_state.fib_sync = 0; 317 318 log_info("kernel routing table decoupled"); 319 } 320 321 /* ARGSUSED */ 322 static void 323 kr_dispatch_msg(int fd, short event, void *bula) 324 { 325 if (dispatch_rtmsg() == -1) 326 event_loopexit(NULL); 327 } 328 329 void 330 kr_show_route(struct imsg *imsg) 331 { 332 struct kroute_prefix *kp; 333 struct kroute_priority *kprio; 334 struct kroute_node *kn; 335 struct kroute kr; 336 int flags; 337 338 if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) { 339 log_warnx("%s: wrong imsg len", __func__); 340 return; 341 } 342 memcpy(&flags, imsg->data, sizeof(flags)); 343 RB_FOREACH(kp, kroute_tree, &krt) 344 TAILQ_FOREACH(kprio, &kp->priorities, entry) 345 TAILQ_FOREACH(kn, &kprio->nexthops, entry) { 346 if (flags && !(kn->r.flags & flags)) 347 continue; 348 349 kr = kn->r; 350 if (kr.priority == 351 eigrpd_conf->fib_priority_external) 352 kr.flags |= F_CTL_EXTERNAL; 353 main_imsg_compose_eigrpe(IMSG_CTL_KROUTE, 354 imsg->hdr.pid, &kr, sizeof(kr)); 355 } 356 357 main_imsg_compose_eigrpe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0); 358 } 359 360 void 361 kr_ifinfo(char *ifname, pid_t pid) 362 { 363 struct kif_node *kif; 364 365 RB_FOREACH(kif, kif_tree, &kit) 366 if (ifname == NULL || !strcmp(ifname, kif->k.ifname)) { 367 main_imsg_compose_eigrpe(IMSG_CTL_IFINFO, 368 pid, &kif->k, sizeof(kif->k)); 369 } 370 371 main_imsg_compose_eigrpe(IMSG_CTL_END, pid, NULL, 0); 372 } 373 374 static void 375 kr_redist_remove(struct kroute *kr) 376 { 377 /* was the route redistributed? */ 378 if (!(kr->flags & F_REDISTRIBUTED)) 379 return; 380 381 /* remove redistributed flag */ 382 kr->flags &= ~F_REDISTRIBUTED; 383 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, kr, sizeof(*kr)); 384 } 385 386 static int 387 kr_redist_eval(struct kroute *kr) 388 { 389 /* Only non-eigrpd routes are considered for redistribution. */ 390 if (!(kr->flags & F_KERNEL)) 391 goto dont_redistribute; 392 393 /* Dynamic routes are not redistributable. */ 394 if (kr->flags & F_DYNAMIC) 395 goto dont_redistribute; 396 397 /* filter-out non-redistributable addresses */ 398 if (bad_addr(kr->af, &kr->prefix) || 399 (kr->af == AF_INET6 && IN6_IS_SCOPE_EMBED(&kr->prefix.v6))) 400 goto dont_redistribute; 401 402 /* interface is not up and running so don't announce */ 403 if (kr->flags & F_DOWN) 404 goto dont_redistribute; 405 406 /* 407 * Consider networks with nexthop loopback as not redistributable 408 * unless it is a reject or blackhole route. 409 */ 410 switch (kr->af) { 411 case AF_INET: 412 if (kr->nexthop.v4.s_addr == htonl(INADDR_LOOPBACK) && 413 !(kr->flags & (F_BLACKHOLE|F_REJECT))) 414 goto dont_redistribute; 415 break; 416 case AF_INET6: 417 if (IN6_IS_ADDR_LOOPBACK(&kr->nexthop.v6) && 418 !(kr->flags & (F_BLACKHOLE|F_REJECT))) 419 goto dont_redistribute; 420 break; 421 default: 422 log_debug("%s: unexpected address-family", __func__); 423 break; 424 } 425 426 /* prefix should be redistributed */ 427 kr->flags |= F_REDISTRIBUTED; 428 main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, kr, sizeof(*kr)); 429 return (1); 430 431 dont_redistribute: 432 kr_redist_remove(kr); 433 return (0); 434 } 435 436 static void 437 kr_redistribute(struct kroute_prefix *kp) 438 { 439 struct kroute_priority *kprio; 440 struct kroute_node *kn; 441 442 /* only the highest prio route can be redistributed */ 443 TAILQ_FOREACH_REVERSE(kprio, &kp->priorities, plist, entry) { 444 if (kprio == TAILQ_FIRST(&kp->priorities)) { 445 TAILQ_FOREACH(kn, &kprio->nexthops, entry) 446 /* pick just one entry in case of multipath */ 447 if (kr_redist_eval(&kn->r)) 448 break; 449 } else { 450 TAILQ_FOREACH(kn, &kprio->nexthops, entry) 451 kr_redist_remove(&kn->r); 452 } 453 } 454 } 455 456 static __inline int 457 kroute_compare(struct kroute_prefix *a, struct kroute_prefix *b) 458 { 459 int addrcmp; 460 461 if (a->af < b->af) 462 return (-1); 463 if (a->af > b->af) 464 return (1); 465 466 addrcmp = eigrp_addrcmp(a->af, &a->prefix, &b->prefix); 467 if (addrcmp != 0) 468 return (addrcmp); 469 470 if (a->prefixlen < b->prefixlen) 471 return (-1); 472 if (a->prefixlen > b->prefixlen) 473 return (1); 474 475 return (0); 476 } 477 478 /* tree management */ 479 static struct kroute_prefix * 480 kroute_find_prefix(int af, union eigrpd_addr *prefix, uint8_t prefixlen) 481 { 482 struct kroute_prefix s; 483 484 s.af = af; 485 s.prefix = *prefix; 486 s.prefixlen = prefixlen; 487 488 return (RB_FIND(kroute_tree, &krt, &s)); 489 } 490 491 static struct kroute_priority * 492 kroute_find_prio(struct kroute_prefix *kp, uint8_t prio) 493 { 494 struct kroute_priority *kprio; 495 496 /* RTP_ANY here picks the lowest priority node */ 497 if (prio == RTP_ANY) 498 return (TAILQ_FIRST(&kp->priorities)); 499 500 TAILQ_FOREACH(kprio, &kp->priorities, entry) 501 if (kprio->priority == prio) 502 return (kprio); 503 504 return (NULL); 505 } 506 507 static struct kroute_node * 508 kroute_find_gw(struct kroute_priority *kprio, union eigrpd_addr *nh) 509 { 510 struct kroute_node *kn; 511 512 TAILQ_FOREACH(kn, &kprio->nexthops, entry) 513 if (eigrp_addrcmp(kprio->kp->af, &kn->r.nexthop, nh) == 0) 514 return (kn); 515 516 return (NULL); 517 } 518 519 static struct kroute_node * 520 kroute_insert(struct kroute *kr) 521 { 522 struct kroute_prefix *kp; 523 struct kroute_priority *kprio, *tmp; 524 struct kroute_node *kn; 525 526 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); 527 if (kp == NULL) { 528 kp = calloc(1, sizeof((*kp))); 529 if (kp == NULL) 530 fatal("kroute_insert"); 531 kp->af = kr->af; 532 kp->prefix = kr->prefix; 533 kp->prefixlen = kr->prefixlen; 534 TAILQ_INIT(&kp->priorities); 535 RB_INSERT(kroute_tree, &krt, kp); 536 } 537 538 kprio = kroute_find_prio(kp, kr->priority); 539 if (kprio == NULL) { 540 kprio = calloc(1, sizeof(*kprio)); 541 if (kprio == NULL) 542 fatal("kroute_insert"); 543 kprio->kp = kp; 544 kprio->priority = kr->priority; 545 TAILQ_INIT(&kprio->nexthops); 546 547 /* lower priorities first */ 548 TAILQ_FOREACH(tmp, &kp->priorities, entry) 549 if (tmp->priority > kprio->priority) 550 break; 551 if (tmp) 552 TAILQ_INSERT_BEFORE(tmp, kprio, entry); 553 else 554 TAILQ_INSERT_TAIL(&kp->priorities, kprio, entry); 555 } 556 557 kn = kroute_find_gw(kprio, &kr->nexthop); 558 if (kn == NULL) { 559 kn = calloc(1, sizeof(*kn)); 560 if (kn == NULL) 561 fatal("kroute_insert"); 562 kn->kprio = kprio; 563 kn->r = *kr; 564 TAILQ_INSERT_TAIL(&kprio->nexthops, kn, entry); 565 } 566 567 if (!(kr->flags & F_KERNEL)) { 568 /* don't validate or redistribute eigrp route */ 569 kr->flags &= ~F_DOWN; 570 return (kn); 571 } 572 573 if (kif_validate(kr->ifindex)) 574 kr->flags &= ~F_DOWN; 575 else 576 kr->flags |= F_DOWN; 577 578 kr_redistribute(kp); 579 return (kn); 580 } 581 582 static int 583 kroute_remove(struct kroute *kr) 584 { 585 struct kroute_prefix *kp; 586 struct kroute_priority *kprio; 587 struct kroute_node *kn; 588 589 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); 590 if (kp == NULL) 591 goto notfound; 592 kprio = kroute_find_prio(kp, kr->priority); 593 if (kprio == NULL) 594 goto notfound; 595 kn = kroute_find_gw(kprio, &kr->nexthop); 596 if (kn == NULL) 597 goto notfound; 598 599 kr_redist_remove(&kn->r); 600 601 TAILQ_REMOVE(&kprio->nexthops, kn, entry); 602 free(kn); 603 604 if (TAILQ_EMPTY(&kprio->nexthops)) { 605 TAILQ_REMOVE(&kp->priorities, kprio, entry); 606 free(kprio); 607 } 608 609 if (TAILQ_EMPTY(&kp->priorities)) { 610 if (RB_REMOVE(kroute_tree, &krt, kp) == NULL) { 611 log_warnx("%s failed for %s/%u", __func__, 612 log_addr(kr->af, &kr->prefix), kp->prefixlen); 613 return (-1); 614 } 615 free(kp); 616 } else 617 kr_redistribute(kp); 618 619 return (0); 620 621 notfound: 622 log_warnx("%s failed to find %s/%u", __func__, 623 log_addr(kr->af, &kr->prefix), kr->prefixlen); 624 return (-1); 625 } 626 627 static void 628 kroute_clear(void) 629 { 630 struct kroute_prefix *kp; 631 struct kroute_priority *kprio; 632 struct kroute_node *kn; 633 634 while ((kp = RB_MIN(kroute_tree, &krt)) != NULL) { 635 while ((kprio = TAILQ_FIRST(&kp->priorities)) != NULL) { 636 while ((kn = TAILQ_FIRST(&kprio->nexthops)) != NULL) { 637 TAILQ_REMOVE(&kprio->nexthops, kn, entry); 638 free(kn); 639 } 640 TAILQ_REMOVE(&kp->priorities, kprio, entry); 641 free(kprio); 642 } 643 RB_REMOVE(kroute_tree, &krt, kp); 644 free(kp); 645 } 646 } 647 648 static __inline int 649 kif_compare(struct kif_node *a, struct kif_node *b) 650 { 651 return (b->k.ifindex - a->k.ifindex); 652 } 653 654 /* tree management */ 655 static struct kif_node * 656 kif_find(unsigned short ifindex) 657 { 658 struct kif_node s; 659 660 memset(&s, 0, sizeof(s)); 661 s.k.ifindex = ifindex; 662 663 return (RB_FIND(kif_tree, &kit, &s)); 664 } 665 666 struct kif * 667 kif_findname(char *ifname) 668 { 669 struct kif_node *kif; 670 671 RB_FOREACH(kif, kif_tree, &kit) 672 if (!strcmp(ifname, kif->k.ifname)) 673 return (&kif->k); 674 675 return (NULL); 676 } 677 678 static struct kif_node * 679 kif_insert(unsigned short ifindex) 680 { 681 struct kif_node *kif; 682 683 if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) 684 return (NULL); 685 686 kif->k.ifindex = ifindex; 687 TAILQ_INIT(&kif->addrs); 688 689 if (RB_INSERT(kif_tree, &kit, kif) != NULL) 690 fatalx("kif_insert: RB_INSERT"); 691 692 return (kif); 693 } 694 695 static int 696 kif_remove(struct kif_node *kif) 697 { 698 struct kif_addr *ka; 699 700 if (RB_REMOVE(kif_tree, &kit, kif) == NULL) { 701 log_warnx("%s failed for inteface %s", __func__, kif->k.ifname); 702 return (-1); 703 } 704 705 while ((ka = TAILQ_FIRST(&kif->addrs)) != NULL) { 706 TAILQ_REMOVE(&kif->addrs, ka, entry); 707 free(ka); 708 } 709 free(kif); 710 return (0); 711 } 712 713 void 714 kif_clear(void) 715 { 716 struct kif_node *kif; 717 718 while ((kif = RB_MIN(kif_tree, &kit)) != NULL) 719 kif_remove(kif); 720 } 721 722 static struct kif * 723 kif_update(unsigned short ifindex, int flags, struct if_data *ifd, 724 struct sockaddr_dl *sdl) 725 { 726 struct kif_node *kif; 727 728 if ((kif = kif_find(ifindex)) == NULL) { 729 if ((kif = kif_insert(ifindex)) == NULL) 730 return (NULL); 731 kif->k.nh_reachable = (flags & IFF_UP) && 732 LINK_STATE_IS_UP(ifd->ifi_link_state); 733 } 734 735 kif->k.flags = flags; 736 kif->k.link_state = ifd->ifi_link_state; 737 kif->k.if_type = ifd->ifi_type; 738 kif->k.baudrate = ifd->ifi_baudrate; 739 kif->k.mtu = ifd->ifi_mtu; 740 741 if (sdl && sdl->sdl_family == AF_LINK) { 742 if (sdl->sdl_nlen >= sizeof(kif->k.ifname)) 743 memcpy(kif->k.ifname, sdl->sdl_data, 744 sizeof(kif->k.ifname) - 1); 745 else if (sdl->sdl_nlen > 0) 746 memcpy(kif->k.ifname, sdl->sdl_data, 747 sdl->sdl_nlen); 748 /* string already terminated via calloc() */ 749 } 750 751 return (&kif->k); 752 } 753 754 static int 755 kif_validate(unsigned short ifindex) 756 { 757 struct kif_node *kif; 758 759 if ((kif = kif_find(ifindex)) == NULL) 760 return (0); 761 762 return (kif->k.nh_reachable); 763 } 764 765 /* misc */ 766 static void 767 protect_lo(void) 768 { 769 struct kroute kr4, kr6; 770 771 /* special protection for 127/8 */ 772 memset(&kr4, 0, sizeof(kr4)); 773 kr4.af = AF_INET; 774 kr4.prefix.v4.s_addr = htonl(INADDR_LOOPBACK & IN_CLASSA_NET); 775 kr4.prefixlen = 8; 776 kr4.flags = F_KERNEL|F_CONNECTED; 777 kroute_insert(&kr4); 778 779 /* special protection for ::1 */ 780 memset(&kr6, 0, sizeof(kr6)); 781 kr6.af = AF_INET6; 782 kr6.prefix.v6 = in6addr_loopback; 783 kr6.prefixlen = 128; 784 kr6.flags = F_KERNEL|F_CONNECTED; 785 kroute_insert(&kr6); 786 } 787 788 /* misc */ 789 static uint8_t 790 prefixlen_classful(in_addr_t ina) 791 { 792 /* it hurt to write this. */ 793 794 if (ina >= 0xf0000000U) /* class E */ 795 return (32); 796 else if (ina >= 0xe0000000U) /* class D */ 797 return (4); 798 else if (ina >= 0xc0000000U) /* class C */ 799 return (24); 800 else if (ina >= 0x80000000U) /* class B */ 801 return (16); 802 else /* class A */ 803 return (8); 804 } 805 806 #define ROUNDUP(a) \ 807 (((a) & (sizeof(long) - 1)) ? (1 + ((a) | (sizeof(long) - 1))) : (a)) 808 809 static void 810 get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) 811 { 812 int i; 813 814 for (i = 0; i < RTAX_MAX; i++) { 815 if (addrs & (1 << i)) { 816 rti_info[i] = sa; 817 sa = (struct sockaddr *)((char *)(sa) + 818 ROUNDUP(sa->sa_len)); 819 } else 820 rti_info[i] = NULL; 821 } 822 } 823 824 static void 825 if_change(unsigned short ifindex, int flags, struct if_data *ifd, 826 struct sockaddr_dl *sdl) 827 { 828 struct kroute_prefix *kp; 829 struct kroute_priority *kprio; 830 struct kroute_node *kn; 831 struct kif *kif; 832 uint8_t reachable; 833 834 if ((kif = kif_update(ifindex, flags, ifd, sdl)) == NULL) { 835 log_warn("%s: kif_update(%u)", __func__, ifindex); 836 return; 837 } 838 839 reachable = (kif->flags & IFF_UP) && 840 LINK_STATE_IS_UP(kif->link_state); 841 842 if (reachable == kif->nh_reachable) 843 return; /* nothing changed wrt nexthop validity */ 844 845 kif->nh_reachable = reachable; 846 847 /* notify eigrpe about link state */ 848 main_imsg_compose_eigrpe(IMSG_IFINFO, 0, kif, sizeof(struct kif)); 849 850 /* notify rde about link going down */ 851 if (!kif->nh_reachable) 852 main_imsg_compose_rde(IMSG_IFDOWN, 0, kif, sizeof(struct kif)); 853 854 /* update redistribute list */ 855 RB_FOREACH(kp, kroute_tree, &krt) { 856 TAILQ_FOREACH(kprio, &kp->priorities, entry) { 857 TAILQ_FOREACH(kn, &kprio->nexthops, entry) { 858 if (kn->r.ifindex != ifindex) 859 continue; 860 861 if (reachable) 862 kn->r.flags &= ~F_DOWN; 863 else 864 kn->r.flags |= F_DOWN; 865 } 866 } 867 kr_redistribute(kp); 868 } 869 } 870 871 static void 872 if_newaddr(unsigned short ifindex, struct sockaddr *ifa, struct sockaddr *mask, 873 struct sockaddr *brd) 874 { 875 struct kif_node *kif; 876 struct sockaddr_in *ifa4, *mask4, *brd4; 877 struct sockaddr_in6 *ifa6, *mask6, *brd6; 878 struct kif_addr *ka; 879 880 if (ifa == NULL) 881 return; 882 if ((kif = kif_find(ifindex)) == NULL) { 883 log_warnx("%s: corresponding if %d not found", __func__, 884 ifindex); 885 return; 886 } 887 888 switch (ifa->sa_family) { 889 case AF_INET: 890 ifa4 = (struct sockaddr_in *) ifa; 891 mask4 = (struct sockaddr_in *) mask; 892 brd4 = (struct sockaddr_in *) brd; 893 894 /* filter out unwanted addresses */ 895 if (bad_addr_v4(ifa4->sin_addr)) 896 return; 897 898 if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL) 899 fatal("if_newaddr"); 900 ka->a.addr.v4 = ifa4->sin_addr; 901 if (mask4) 902 ka->a.prefixlen = 903 mask2prefixlen(mask4->sin_addr.s_addr); 904 if (brd4) 905 ka->a.dstbrd.v4 = brd4->sin_addr; 906 break; 907 case AF_INET6: 908 ifa6 = (struct sockaddr_in6 *) ifa; 909 mask6 = (struct sockaddr_in6 *) mask; 910 brd6 = (struct sockaddr_in6 *) brd; 911 912 /* We only care about link-local and global-scope. */ 913 if (bad_addr_v6(&ifa6->sin6_addr)) 914 return; 915 916 clearscope(&ifa6->sin6_addr); 917 918 if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL) 919 fatal("if_newaddr"); 920 ka->a.addr.v6 = ifa6->sin6_addr; 921 if (mask6) 922 ka->a.prefixlen = mask2prefixlen6(mask6); 923 if (brd6) 924 ka->a.dstbrd.v6 = brd6->sin6_addr; 925 break; 926 default: 927 return; 928 } 929 930 ka->a.ifindex = ifindex; 931 ka->a.af = ifa->sa_family; 932 TAILQ_INSERT_TAIL(&kif->addrs, ka, entry); 933 934 /* notify eigrpe about new address */ 935 main_imsg_compose_eigrpe(IMSG_NEWADDR, 0, &ka->a, sizeof(ka->a)); 936 } 937 938 static void 939 if_deladdr(unsigned short ifindex, struct sockaddr *ifa, struct sockaddr *mask, 940 struct sockaddr *brd) 941 { 942 struct kif_node *kif; 943 struct sockaddr_in *ifa4, *mask4, *brd4; 944 struct sockaddr_in6 *ifa6, *mask6, *brd6; 945 struct kaddr k; 946 struct kif_addr *ka, *nka; 947 948 if (ifa == NULL) 949 return; 950 if ((kif = kif_find(ifindex)) == NULL) { 951 log_warnx("%s: corresponding if %d not found", __func__, 952 ifindex); 953 return; 954 } 955 956 memset(&k, 0, sizeof(k)); 957 k.af = ifa->sa_family; 958 switch (ifa->sa_family) { 959 case AF_INET: 960 ifa4 = (struct sockaddr_in *) ifa; 961 mask4 = (struct sockaddr_in *) mask; 962 brd4 = (struct sockaddr_in *) brd; 963 964 /* filter out unwanted addresses */ 965 if (bad_addr_v4(ifa4->sin_addr)) 966 return; 967 968 k.addr.v4 = ifa4->sin_addr; 969 if (mask4) 970 k.prefixlen = mask2prefixlen(mask4->sin_addr.s_addr); 971 if (brd4) 972 k.dstbrd.v4 = brd4->sin_addr; 973 break; 974 case AF_INET6: 975 ifa6 = (struct sockaddr_in6 *) ifa; 976 mask6 = (struct sockaddr_in6 *) mask; 977 brd6 = (struct sockaddr_in6 *) brd; 978 979 /* We only care about link-local and global-scope. */ 980 if (bad_addr_v6(&ifa6->sin6_addr)) 981 return; 982 983 clearscope(&ifa6->sin6_addr); 984 985 k.addr.v6 = ifa6->sin6_addr; 986 if (mask6) 987 k.prefixlen = mask2prefixlen6(mask6); 988 if (brd6) 989 k.dstbrd.v6 = brd6->sin6_addr; 990 break; 991 default: 992 return; 993 } 994 995 for (ka = TAILQ_FIRST(&kif->addrs); ka != NULL; ka = nka) { 996 nka = TAILQ_NEXT(ka, entry); 997 998 if (ka->a.af != k.af || 999 ka->a.prefixlen != k.prefixlen || 1000 eigrp_addrcmp(ka->a.af, &ka->a.addr, &k.addr) || 1001 eigrp_addrcmp(ka->a.af, &ka->a.dstbrd, &k.dstbrd)) 1002 continue; 1003 1004 /* notify eigrpe about removed address */ 1005 main_imsg_compose_eigrpe(IMSG_DELADDR, 0, &ka->a, 1006 sizeof(ka->a)); 1007 TAILQ_REMOVE(&kif->addrs, ka, entry); 1008 free(ka); 1009 return; 1010 } 1011 } 1012 1013 static void 1014 if_announce(void *msg) 1015 { 1016 struct if_announcemsghdr *ifan; 1017 struct kif_node *kif; 1018 1019 ifan = msg; 1020 1021 switch (ifan->ifan_what) { 1022 case IFAN_ARRIVAL: 1023 kif = kif_insert(ifan->ifan_index); 1024 if (kif) 1025 strlcpy(kif->k.ifname, ifan->ifan_name, 1026 sizeof(kif->k.ifname)); 1027 break; 1028 case IFAN_DEPARTURE: 1029 kif = kif_find(ifan->ifan_index); 1030 if (kif) 1031 kif_remove(kif); 1032 break; 1033 } 1034 } 1035 1036 /* rtsock */ 1037 static int 1038 send_rtmsg_v4(int fd, int action, struct kroute *kr) 1039 { 1040 struct iovec iov[5]; 1041 struct rt_msghdr hdr; 1042 struct sockaddr_in prefix; 1043 struct sockaddr_in nexthop; 1044 struct sockaddr_in mask; 1045 int iovcnt = 0; 1046 1047 if (kr_state.fib_sync == 0) 1048 return (0); 1049 1050 /* initialize header */ 1051 memset(&hdr, 0, sizeof(hdr)); 1052 hdr.rtm_version = RTM_VERSION; 1053 hdr.rtm_type = action; 1054 hdr.rtm_priority = kr->priority; 1055 hdr.rtm_tableid = kr_state.rdomain; /* rtableid */ 1056 if (action == RTM_CHANGE) 1057 hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE; 1058 else 1059 hdr.rtm_flags = RTF_MPATH; 1060 if (kr->flags & F_BLACKHOLE) 1061 hdr.rtm_flags |= RTF_BLACKHOLE; 1062 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ 1063 hdr.rtm_msglen = sizeof(hdr); 1064 /* adjust iovec */ 1065 iov[iovcnt].iov_base = &hdr; 1066 iov[iovcnt++].iov_len = sizeof(hdr); 1067 1068 memset(&prefix, 0, sizeof(prefix)); 1069 prefix.sin_len = sizeof(prefix); 1070 prefix.sin_family = AF_INET; 1071 prefix.sin_addr = kr->prefix.v4; 1072 /* adjust header */ 1073 hdr.rtm_addrs |= RTA_DST; 1074 hdr.rtm_msglen += sizeof(prefix); 1075 /* adjust iovec */ 1076 iov[iovcnt].iov_base = &prefix; 1077 iov[iovcnt++].iov_len = sizeof(prefix); 1078 1079 if (kr->nexthop.v4.s_addr != 0) { 1080 memset(&nexthop, 0, sizeof(nexthop)); 1081 nexthop.sin_len = sizeof(nexthop); 1082 nexthop.sin_family = AF_INET; 1083 nexthop.sin_addr = kr->nexthop.v4; 1084 /* adjust header */ 1085 hdr.rtm_flags |= RTF_GATEWAY; 1086 hdr.rtm_addrs |= RTA_GATEWAY; 1087 hdr.rtm_msglen += sizeof(nexthop); 1088 /* adjust iovec */ 1089 iov[iovcnt].iov_base = &nexthop; 1090 iov[iovcnt++].iov_len = sizeof(nexthop); 1091 } 1092 1093 memset(&mask, 0, sizeof(mask)); 1094 mask.sin_len = sizeof(mask); 1095 mask.sin_family = AF_INET; 1096 mask.sin_addr.s_addr = prefixlen2mask(kr->prefixlen); 1097 /* adjust header */ 1098 hdr.rtm_addrs |= RTA_NETMASK; 1099 hdr.rtm_msglen += sizeof(mask); 1100 /* adjust iovec */ 1101 iov[iovcnt].iov_base = &mask; 1102 iov[iovcnt++].iov_len = sizeof(mask); 1103 1104 retry: 1105 if (writev(fd, iov, iovcnt) == -1) { 1106 if (errno == ESRCH) { 1107 if (hdr.rtm_type == RTM_CHANGE) { 1108 hdr.rtm_type = RTM_ADD; 1109 goto retry; 1110 } else if (hdr.rtm_type == RTM_DELETE) { 1111 log_info("route %s/%u vanished before delete", 1112 inet_ntoa(kr->prefix.v4), 1113 kr->prefixlen); 1114 return (0); 1115 } 1116 } 1117 log_warn("%s: action %u, prefix %s/%u", __func__, hdr.rtm_type, 1118 inet_ntoa(kr->prefix.v4), kr->prefixlen); 1119 return (0); 1120 } 1121 1122 return (0); 1123 } 1124 1125 static int 1126 send_rtmsg_v6(int fd, int action, struct kroute *kr) 1127 { 1128 struct iovec iov[5]; 1129 struct rt_msghdr hdr; 1130 struct pad { 1131 struct sockaddr_in6 addr; 1132 char pad[sizeof(long)]; /* thank you IPv6 */ 1133 } prefix, nexthop, mask; 1134 int iovcnt = 0; 1135 1136 if (kr_state.fib_sync == 0) 1137 return (0); 1138 1139 /* initialize header */ 1140 memset(&hdr, 0, sizeof(hdr)); 1141 hdr.rtm_version = RTM_VERSION; 1142 hdr.rtm_type = action; 1143 hdr.rtm_priority = kr->priority; 1144 hdr.rtm_tableid = kr_state.rdomain; /* rtableid */ 1145 if (action == RTM_CHANGE) 1146 hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE; 1147 else 1148 hdr.rtm_flags = RTF_MPATH; 1149 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ 1150 hdr.rtm_msglen = sizeof(hdr); 1151 /* adjust iovec */ 1152 iov[iovcnt].iov_base = &hdr; 1153 iov[iovcnt++].iov_len = sizeof(hdr); 1154 1155 memset(&prefix, 0, sizeof(prefix)); 1156 prefix.addr.sin6_len = sizeof(struct sockaddr_in6); 1157 prefix.addr.sin6_family = AF_INET6; 1158 prefix.addr.sin6_addr = kr->prefix.v6; 1159 /* adjust header */ 1160 hdr.rtm_addrs |= RTA_DST; 1161 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); 1162 /* adjust iovec */ 1163 iov[iovcnt].iov_base = &prefix; 1164 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); 1165 1166 if (!IN6_IS_ADDR_UNSPECIFIED(&kr->nexthop.v6)) { 1167 memset(&nexthop, 0, sizeof(nexthop)); 1168 nexthop.addr.sin6_len = sizeof(struct sockaddr_in6); 1169 nexthop.addr.sin6_family = AF_INET6; 1170 nexthop.addr.sin6_addr = kr->nexthop.v6; 1171 nexthop.addr.sin6_scope_id = kr->ifindex; 1172 embedscope(&nexthop.addr); 1173 1174 /* adjust header */ 1175 hdr.rtm_flags |= RTF_GATEWAY; 1176 hdr.rtm_addrs |= RTA_GATEWAY; 1177 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); 1178 /* adjust iovec */ 1179 iov[iovcnt].iov_base = &nexthop; 1180 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); 1181 } 1182 1183 memset(&mask, 0, sizeof(mask)); 1184 mask.addr.sin6_len = sizeof(struct sockaddr_in6); 1185 mask.addr.sin6_family = AF_INET6; 1186 mask.addr.sin6_addr = *prefixlen2mask6(kr->prefixlen); 1187 /* adjust header */ 1188 if (kr->prefixlen == 128) 1189 hdr.rtm_flags |= RTF_HOST; 1190 hdr.rtm_addrs |= RTA_NETMASK; 1191 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); 1192 /* adjust iovec */ 1193 iov[iovcnt].iov_base = &mask; 1194 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); 1195 1196 retry: 1197 if (writev(fd, iov, iovcnt) == -1) { 1198 if (errno == ESRCH) { 1199 if (hdr.rtm_type == RTM_CHANGE) { 1200 hdr.rtm_type = RTM_ADD; 1201 goto retry; 1202 } else if (hdr.rtm_type == RTM_DELETE) { 1203 log_info("route %s/%u vanished before delete", 1204 log_in6addr(&kr->prefix.v6), 1205 kr->prefixlen); 1206 return (0); 1207 } 1208 } 1209 log_warn("%s: action %u, prefix %s/%u", __func__, hdr.rtm_type, 1210 log_in6addr(&kr->prefix.v6), kr->prefixlen); 1211 return (0); 1212 } 1213 1214 return (0); 1215 } 1216 1217 static int 1218 send_rtmsg(int fd, int action, struct kroute *kr) 1219 { 1220 switch (kr->af) { 1221 case AF_INET: 1222 return (send_rtmsg_v4(fd, action, kr)); 1223 case AF_INET6: 1224 return (send_rtmsg_v6(fd, action, kr)); 1225 default: 1226 break; 1227 } 1228 1229 return (-1); 1230 } 1231 1232 static int 1233 fetchtable(void) 1234 { 1235 size_t len; 1236 int mib[7]; 1237 char *buf; 1238 int rv; 1239 1240 mib[0] = CTL_NET; 1241 mib[1] = PF_ROUTE; 1242 mib[2] = 0; 1243 mib[3] = 0; 1244 mib[4] = NET_RT_DUMP; 1245 mib[5] = 0; 1246 mib[6] = kr_state.rdomain; /* rtableid */ 1247 1248 if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) { 1249 log_warn("sysctl"); 1250 return (-1); 1251 } 1252 if ((buf = malloc(len)) == NULL) { 1253 log_warn("%s", __func__); 1254 return (-1); 1255 } 1256 if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) { 1257 log_warn("sysctl"); 1258 free(buf); 1259 return (-1); 1260 } 1261 1262 rv = rtmsg_process(buf, len); 1263 free(buf); 1264 1265 return (rv); 1266 } 1267 1268 static int 1269 fetchifs(void) 1270 { 1271 size_t len; 1272 int mib[6]; 1273 char *buf; 1274 int rv; 1275 1276 mib[0] = CTL_NET; 1277 mib[1] = PF_ROUTE; 1278 mib[2] = 0; 1279 mib[3] = 0; /* wildcard */ 1280 mib[4] = NET_RT_IFLIST; 1281 mib[5] = 0; 1282 1283 if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) { 1284 log_warn("sysctl"); 1285 return (-1); 1286 } 1287 if ((buf = malloc(len)) == NULL) { 1288 log_warn("%s", __func__); 1289 return (-1); 1290 } 1291 if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) { 1292 log_warn("sysctl"); 1293 free(buf); 1294 return (-1); 1295 } 1296 1297 rv = rtmsg_process(buf, len); 1298 free(buf); 1299 1300 return (rv); 1301 } 1302 1303 static int 1304 dispatch_rtmsg(void) 1305 { 1306 char buf[RT_BUF_SIZE]; 1307 ssize_t n; 1308 1309 if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) { 1310 if (errno == EAGAIN || errno == EINTR) 1311 return (0); 1312 log_warn("%s: read error", __func__); 1313 return (-1); 1314 } 1315 1316 if (n == 0) { 1317 log_warnx("routing socket closed"); 1318 return (-1); 1319 } 1320 1321 return (rtmsg_process(buf, n)); 1322 } 1323 1324 static int 1325 rtmsg_process(char *buf, size_t len) 1326 { 1327 struct rt_msghdr *rtm; 1328 struct if_msghdr ifm; 1329 struct ifa_msghdr *ifam; 1330 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1331 size_t offset; 1332 char *next; 1333 1334 for (offset = 0; offset < len; offset += rtm->rtm_msglen) { 1335 next = buf + offset; 1336 rtm = (struct rt_msghdr *)next; 1337 if (len < offset + sizeof(unsigned short) || 1338 len < offset + rtm->rtm_msglen) 1339 fatalx("rtmsg_process: partial rtm in buffer"); 1340 if (rtm->rtm_version != RTM_VERSION) 1341 continue; 1342 1343 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); 1344 get_rtaddrs(rtm->rtm_addrs, sa, rti_info); 1345 1346 switch (rtm->rtm_type) { 1347 case RTM_ADD: 1348 case RTM_GET: 1349 case RTM_CHANGE: 1350 case RTM_DELETE: 1351 if (rtm->rtm_errno) /* failed attempts... */ 1352 continue; 1353 1354 if (rtm->rtm_tableid != kr_state.rdomain) 1355 continue; 1356 1357 if (rtm->rtm_type == RTM_GET && 1358 rtm->rtm_pid != kr_state.pid) 1359 continue; 1360 1361 /* Skip ARP/ND cache and broadcast routes. */ 1362 if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST)) 1363 continue; 1364 1365 if (rtmsg_process_route(rtm, rti_info) == -1) 1366 return (-1); 1367 } 1368 1369 switch (rtm->rtm_type) { 1370 case RTM_IFINFO: 1371 memcpy(&ifm, next, sizeof(ifm)); 1372 if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data, 1373 (struct sockaddr_dl *)rti_info[RTAX_IFP]); 1374 break; 1375 case RTM_NEWADDR: 1376 ifam = (struct ifa_msghdr *)rtm; 1377 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1378 RTA_BRD)) == 0) 1379 break; 1380 1381 if_newaddr(ifam->ifam_index, 1382 (struct sockaddr *)rti_info[RTAX_IFA], 1383 (struct sockaddr *)rti_info[RTAX_NETMASK], 1384 (struct sockaddr *)rti_info[RTAX_BRD]); 1385 break; 1386 case RTM_DELADDR: 1387 ifam = (struct ifa_msghdr *)rtm; 1388 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1389 RTA_BRD)) == 0) 1390 break; 1391 1392 if_deladdr(ifam->ifam_index, 1393 (struct sockaddr *)rti_info[RTAX_IFA], 1394 (struct sockaddr *)rti_info[RTAX_NETMASK], 1395 (struct sockaddr *)rti_info[RTAX_BRD]); 1396 break; 1397 case RTM_IFANNOUNCE: 1398 if_announce(next); 1399 break; 1400 default: 1401 /* ignore for now */ 1402 break; 1403 } 1404 } 1405 1406 return (offset); 1407 } 1408 1409 static int 1410 rtmsg_process_route(struct rt_msghdr *rtm, struct sockaddr *rti_info[RTAX_MAX]) 1411 { 1412 struct sockaddr *sa; 1413 struct sockaddr_in *sa_in; 1414 struct sockaddr_in6 *sa_in6; 1415 struct kroute kr; 1416 struct kroute_prefix *kp; 1417 struct kroute_priority *kprio; 1418 struct kroute_node *kn; 1419 1420 if ((sa = rti_info[RTAX_DST]) == NULL) 1421 return (-1); 1422 1423 memset(&kr, 0, sizeof(kr)); 1424 kr.af = sa->sa_family; 1425 switch (kr.af) { 1426 case AF_INET: 1427 kr.prefix.v4 = ((struct sockaddr_in *)sa)->sin_addr; 1428 sa_in = (struct sockaddr_in *) rti_info[RTAX_NETMASK]; 1429 if (sa_in != NULL && sa_in->sin_len != 0) 1430 kr.prefixlen = mask2prefixlen(sa_in->sin_addr.s_addr); 1431 else if (rtm->rtm_flags & RTF_HOST) 1432 kr.prefixlen = 32; 1433 else if (kr.prefix.v4.s_addr == INADDR_ANY) 1434 kr.prefixlen = 0; 1435 else 1436 kr.prefixlen = prefixlen_classful(kr.prefix.v4.s_addr); 1437 break; 1438 case AF_INET6: 1439 kr.prefix.v6 = ((struct sockaddr_in6 *)sa)->sin6_addr; 1440 sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK]; 1441 if (sa_in6 != NULL && sa_in6->sin6_len != 0) 1442 kr.prefixlen = mask2prefixlen6(sa_in6); 1443 else if (rtm->rtm_flags & RTF_HOST) 1444 kr.prefixlen = 128; 1445 else if (IN6_IS_ADDR_UNSPECIFIED(&kr.prefix.v6)) 1446 kr.prefixlen = 0; 1447 else 1448 fatalx("in6 net addr without netmask"); 1449 break; 1450 default: 1451 return (0); 1452 } 1453 kr.ifindex = rtm->rtm_index; 1454 if ((sa = rti_info[RTAX_GATEWAY]) != NULL) { 1455 switch (sa->sa_family) { 1456 case AF_INET: 1457 kr.nexthop.v4 = ((struct sockaddr_in *)sa)->sin_addr; 1458 break; 1459 case AF_INET6: 1460 sa_in6 = (struct sockaddr_in6 *)sa; 1461 recoverscope(sa_in6); 1462 kr.nexthop.v6 = sa_in6->sin6_addr; 1463 if (sa_in6->sin6_scope_id) 1464 kr.ifindex = sa_in6->sin6_scope_id; 1465 break; 1466 case AF_LINK: 1467 kr.flags |= F_CONNECTED; 1468 break; 1469 } 1470 } 1471 kr.flags |= F_KERNEL; 1472 if (rtm->rtm_flags & RTF_STATIC) 1473 kr.flags |= F_STATIC; 1474 if (rtm->rtm_flags & RTF_BLACKHOLE) 1475 kr.flags |= F_BLACKHOLE; 1476 if (rtm->rtm_flags & RTF_REJECT) 1477 kr.flags |= F_REJECT; 1478 if (rtm->rtm_flags & RTF_DYNAMIC) 1479 kr.flags |= F_DYNAMIC; 1480 if (rtm->rtm_flags & RTF_CONNECTED) 1481 kr.flags |= F_CONNECTED; 1482 kr.priority = rtm->rtm_priority; 1483 1484 if (rtm->rtm_type == RTM_CHANGE) { 1485 /* 1486 * The kernel doesn't allow RTM_CHANGE for multipath routes. 1487 * If we got this message we know that the route has only one 1488 * nexthop and we should remove it before installing the same 1489 * route with the new nexthop. 1490 */ 1491 kp = kroute_find_prefix(kr.af, &kr.prefix, kr.prefixlen); 1492 if (kp) { 1493 kprio = kroute_find_prio(kp, kr.priority); 1494 if (kprio) { 1495 kn = TAILQ_FIRST(&kprio->nexthops); 1496 if (kn) 1497 kroute_remove(&kn->r); 1498 } 1499 } 1500 } 1501 1502 kn = NULL; 1503 kp = kroute_find_prefix(kr.af, &kr.prefix, kr.prefixlen); 1504 if (kp) { 1505 kprio = kroute_find_prio(kp, kr.priority); 1506 if (kprio) 1507 kn = kroute_find_gw(kprio, &kr.nexthop); 1508 } 1509 1510 if (rtm->rtm_type == RTM_DELETE) { 1511 if (kn == NULL || !(kn->r.flags & F_KERNEL)) 1512 return (0); 1513 return (kroute_remove(&kr)); 1514 } 1515 1516 if (!eigrp_addrisset(kr.af, &kr.nexthop) && !(kr.flags & F_CONNECTED)) { 1517 log_warnx("%s: no nexthop for %s/%u", __func__, 1518 log_addr(kr.af, &kr.prefix), kr.prefixlen); 1519 return (-1); 1520 } 1521 1522 if (kn != NULL) { 1523 /* update route */ 1524 kn->r = kr; 1525 1526 if (kif_validate(kn->r.ifindex)) 1527 kn->r.flags &= ~F_DOWN; 1528 else 1529 kn->r.flags |= F_DOWN; 1530 1531 kr_redistribute(kp); 1532 } else { 1533 if ((rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_GET) && 1534 (kr.priority == eigrpd_conf->fib_priority_internal || 1535 kr.priority == eigrpd_conf->fib_priority_external || 1536 kr.priority == eigrpd_conf->fib_priority_summary)) { 1537 log_warnx("alien EIGRP route %s/%d", log_addr(kr.af, 1538 &kr.prefix), kr.prefixlen); 1539 return (send_rtmsg(kr_state.fd, RTM_DELETE, &kr)); 1540 } 1541 1542 kroute_insert(&kr); 1543 } 1544 1545 return (0); 1546 } 1547