1 /* $OpenBSD: kroute.c,v 1.44 2014/06/23 03:46:17 guenther Exp $ */ 2 3 /* 4 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 5 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/param.h> 21 #include <sys/types.h> 22 #include <sys/socket.h> 23 #include <sys/sysctl.h> 24 #include <sys/tree.h> 25 #include <sys/uio.h> 26 #include <netinet/in.h> 27 #include <arpa/inet.h> 28 #include <net/if.h> 29 #include <net/if_dl.h> 30 #include <net/if_types.h> 31 #include <net/route.h> 32 #include <err.h> 33 #include <errno.h> 34 #include <fcntl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <unistd.h> 39 40 #include "ospf6d.h" 41 #include "ospfe.h" 42 #include "log.h" 43 44 struct { 45 u_int32_t rtseq; 46 pid_t pid; 47 int fib_sync; 48 int fd; 49 struct event ev; 50 } kr_state; 51 52 struct kroute_node { 53 RB_ENTRY(kroute_node) entry; 54 struct kroute r; 55 struct kroute_node *next; 56 }; 57 58 void kr_redist_remove(struct kroute_node *, struct kroute_node *); 59 int kr_redist_eval(struct kroute *, struct rroute *); 60 void kr_redistribute(struct kroute_node *); 61 int kroute_compare(struct kroute_node *, struct kroute_node *); 62 63 struct kroute_node *kroute_find(const struct in6_addr *, u_int8_t); 64 struct kroute_node *kroute_matchgw(struct kroute_node *, 65 struct in6_addr *, unsigned int); 66 int kroute_insert(struct kroute_node *); 67 int kroute_remove(struct kroute_node *); 68 void kroute_clear(void); 69 70 struct iface *kif_update(u_short, int, struct if_data *, 71 struct sockaddr_dl *); 72 int kif_validate(u_short); 73 74 struct kroute_node *kroute_match(struct in6_addr *); 75 76 int protect_lo(void); 77 void get_rtaddrs(int, struct sockaddr *, struct sockaddr **); 78 void if_change(u_short, int, struct if_data *); 79 void if_newaddr(u_short, struct sockaddr_in6 *, 80 struct sockaddr_in6 *, struct sockaddr_in6 *); 81 void if_deladdr(u_short, struct sockaddr_in6 *, 82 struct sockaddr_in6 *, struct sockaddr_in6 *); 83 void if_announce(void *); 84 85 int send_rtmsg(int, int, struct kroute *); 86 int dispatch_rtmsg(void); 87 int fetchtable(void); 88 89 RB_HEAD(kroute_tree, kroute_node) krt; 90 RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare) 91 RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare) 92 93 int 94 kr_init(int fs) 95 { 96 int opt = 0, rcvbuf, default_rcvbuf; 97 socklen_t optlen; 98 99 kr_state.fib_sync = fs; 100 101 if ((kr_state.fd = socket(AF_ROUTE, SOCK_RAW, 0)) == -1) { 102 log_warn("kr_init: socket"); 103 return (-1); 104 } 105 106 /* not interested in my own messages */ 107 if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK, 108 &opt, sizeof(opt)) == -1) 109 log_warn("kr_init: setsockopt"); /* not fatal */ 110 111 /* grow receive buffer, don't wanna miss messages */ 112 optlen = sizeof(default_rcvbuf); 113 if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, 114 &default_rcvbuf, &optlen) == -1) 115 log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF"); 116 else 117 for (rcvbuf = MAX_RTSOCK_BUF; 118 rcvbuf > default_rcvbuf && 119 setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, 120 &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS; 121 rcvbuf /= 2) 122 ; /* nothing */ 123 124 kr_state.pid = getpid(); 125 kr_state.rtseq = 1; 126 127 RB_INIT(&krt); 128 129 if (fetchtable() == -1) 130 return (-1); 131 132 if (protect_lo() == -1) 133 return (-1); 134 135 event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST, 136 kr_dispatch_msg, NULL); 137 event_add(&kr_state.ev, NULL); 138 139 return (0); 140 } 141 142 int 143 kr_change(struct kroute *kroute) 144 { 145 struct kroute_node *kr; 146 int action = RTM_ADD; 147 148 kroute->rtlabel = rtlabel_tag2id(kroute->ext_tag); 149 150 if ((kr = kroute_find(&kroute->prefix, kroute->prefixlen)) != 151 NULL) { 152 if (!(kr->r.flags & F_KERNEL)) 153 action = RTM_CHANGE; 154 else { /* a non-ospf route already exists. not a problem */ 155 if (!(kr->r.flags & F_BGPD_INSERTED)) { 156 do { 157 kr->r.flags |= F_OSPFD_INSERTED; 158 kr = kr->next; 159 } while (kr); 160 return (0); 161 } 162 /* 163 * XXX as long as there is no multipath support in 164 * bgpd this is safe else we end up in a bad situation. 165 */ 166 /* 167 * ospf route has higher pref 168 * - reset flags to the ospf ones 169 * - use RTM_CHANGE 170 * - zero out ifindex (this is no longer relevant) 171 */ 172 action = RTM_CHANGE; 173 kr->r.flags = kroute->flags | F_OSPFD_INSERTED; 174 kr->r.ifindex = 0; 175 rtlabel_unref(kr->r.rtlabel); 176 kr->r.ext_tag = kroute->ext_tag; 177 kr->r.rtlabel = kroute->rtlabel; 178 } 179 } 180 181 /* nexthop within 127/8 -> ignore silently */ 182 if (kr && IN6_IS_ADDR_LOOPBACK(&kr->r.nexthop)) 183 return (0); 184 185 /* 186 * Ingnore updates that did not change the route. 187 * Currently only the nexthop can change. 188 */ 189 if (kr && kr->r.scope == kroute->scope && 190 IN6_ARE_ADDR_EQUAL(&kr->r.nexthop, &kroute->nexthop)) 191 return (0); 192 193 if (send_rtmsg(kr_state.fd, action, kroute) == -1) 194 return (-1); 195 196 if (action == RTM_ADD) { 197 if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) { 198 log_warn("kr_change"); 199 return (-1); 200 } 201 kr->r.prefix = kroute->prefix; 202 kr->r.prefixlen = kroute->prefixlen; 203 kr->r.nexthop = kroute->nexthop; 204 kr->r.scope = kroute->scope; 205 kr->r.flags = kroute->flags | F_OSPFD_INSERTED; 206 kr->r.ext_tag = kroute->ext_tag; 207 kr->r.rtlabel = kroute->rtlabel; 208 209 if (kroute_insert(kr) == -1) 210 free(kr); 211 } else if (kr) { 212 kr->r.nexthop = kroute->nexthop; 213 kr->r.scope = kroute->scope; 214 } 215 216 return (0); 217 } 218 219 int 220 kr_delete(struct kroute *kroute) 221 { 222 struct kroute_node *kr; 223 224 if ((kr = kroute_find(&kroute->prefix, kroute->prefixlen)) == 225 NULL) 226 return (0); 227 228 if (!(kr->r.flags & F_OSPFD_INSERTED)) 229 return (0); 230 231 if (kr->r.flags & F_KERNEL) { 232 /* remove F_OSPFD_INSERTED flag, route still exists in kernel */ 233 do { 234 kr->r.flags &= ~F_OSPFD_INSERTED; 235 kr = kr->next; 236 } while (kr); 237 return (0); 238 } 239 240 if (send_rtmsg(kr_state.fd, RTM_DELETE, kroute) == -1) 241 return (-1); 242 243 if (kroute_remove(kr) == -1) 244 return (-1); 245 246 return (0); 247 } 248 249 void 250 kr_shutdown(void) 251 { 252 kr_fib_decouple(); 253 kroute_clear(); 254 } 255 256 void 257 kr_fib_couple(void) 258 { 259 struct kroute_node *kr; 260 261 if (kr_state.fib_sync == 1) /* already coupled */ 262 return; 263 264 kr_state.fib_sync = 1; 265 266 RB_FOREACH(kr, kroute_tree, &krt) 267 if (!(kr->r.flags & F_KERNEL)) 268 send_rtmsg(kr_state.fd, RTM_ADD, &kr->r); 269 270 log_info("kernel routing table coupled"); 271 } 272 273 void 274 kr_fib_decouple(void) 275 { 276 struct kroute_node *kr; 277 278 if (kr_state.fib_sync == 0) /* already decoupled */ 279 return; 280 281 RB_FOREACH(kr, kroute_tree, &krt) 282 if (!(kr->r.flags & F_KERNEL)) 283 send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r); 284 285 kr_state.fib_sync = 0; 286 287 log_info("kernel routing table decoupled"); 288 } 289 290 /* ARGSUSED */ 291 void 292 kr_dispatch_msg(int fd, short event, void *bula) 293 { 294 dispatch_rtmsg(); 295 } 296 297 void 298 kr_show_route(struct imsg *imsg) 299 { 300 struct kroute_node *kr; 301 struct kroute_node *kn; 302 int flags; 303 struct in6_addr addr; 304 305 switch (imsg->hdr.type) { 306 case IMSG_CTL_KROUTE: 307 if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) { 308 log_warnx("kr_show_route: wrong imsg len"); 309 return; 310 } 311 memcpy(&flags, imsg->data, sizeof(flags)); 312 RB_FOREACH(kr, kroute_tree, &krt) 313 if (!flags || kr->r.flags & flags) { 314 kn = kr; 315 do { 316 main_imsg_compose_ospfe(IMSG_CTL_KROUTE, 317 imsg->hdr.pid, 318 &kn->r, sizeof(kn->r)); 319 } while ((kn = kn->next) != NULL); 320 } 321 break; 322 case IMSG_CTL_KROUTE_ADDR: 323 if (imsg->hdr.len != IMSG_HEADER_SIZE + 324 sizeof(struct in6_addr)) { 325 log_warnx("kr_show_route: wrong imsg len"); 326 return; 327 } 328 memcpy(&addr, imsg->data, sizeof(addr)); 329 kr = NULL; 330 kr = kroute_match(&addr); 331 if (kr != NULL) 332 main_imsg_compose_ospfe(IMSG_CTL_KROUTE, imsg->hdr.pid, 333 &kr->r, sizeof(kr->r)); 334 break; 335 default: 336 log_debug("kr_show_route: error handling imsg"); 337 break; 338 } 339 340 main_imsg_compose_ospfe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0); 341 } 342 343 void 344 kr_redist_remove(struct kroute_node *kh, struct kroute_node *kn) 345 { 346 struct rroute rr; 347 348 /* was the route redistributed? */ 349 if ((kn->r.flags & F_REDISTRIBUTED) == 0) 350 return; 351 352 /* remove redistributed flag */ 353 kn->r.flags &= ~F_REDISTRIBUTED; 354 rr.kr = kn->r; 355 rr.metric = DEFAULT_REDIST_METRIC; /* some dummy value */ 356 357 /* probably inform the RDE (check if no other path is redistributed) */ 358 for (kn = kh; kn; kn = kn->next) 359 if (kn->r.flags & F_REDISTRIBUTED) 360 break; 361 362 if (kn == NULL) 363 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, &rr, 364 sizeof(struct rroute)); 365 } 366 367 int 368 kr_redist_eval(struct kroute *kr, struct rroute *rr) 369 { 370 u_int32_t metric = 0; 371 372 /* Only non-ospfd routes are considered for redistribution. */ 373 if (!(kr->flags & F_KERNEL)) 374 goto dont_redistribute; 375 376 /* Dynamic routes are not redistributable. */ 377 if (kr->flags & F_DYNAMIC) 378 goto dont_redistribute; 379 380 /* interface is not up and running so don't announce */ 381 if (kr->flags & F_DOWN) 382 goto dont_redistribute; 383 384 /* 385 * We consider loopback, multicast, link- and site-local, 386 * IPv4 mapped and IPv4 compatible addresses as not redistributable. 387 */ 388 if (IN6_IS_ADDR_LOOPBACK(&kr->prefix) || 389 IN6_IS_ADDR_MULTICAST(&kr->prefix) || 390 IN6_IS_ADDR_LINKLOCAL(&kr->prefix) || 391 IN6_IS_ADDR_SITELOCAL(&kr->prefix) || 392 IN6_IS_ADDR_V4MAPPED(&kr->prefix) || 393 IN6_IS_ADDR_V4COMPAT(&kr->prefix)) 394 goto dont_redistribute; 395 /* 396 * Consider networks with nexthop loopback as not redistributable 397 * unless it is a reject or blackhole route. 398 */ 399 if (IN6_IS_ADDR_LOOPBACK(&kr->nexthop) && 400 !(kr->flags & (F_BLACKHOLE|F_REJECT))) 401 goto dont_redistribute; 402 403 /* Should we redistribute this route? */ 404 if (!ospf_redistribute(kr, &metric)) 405 goto dont_redistribute; 406 407 /* prefix should be redistributed */ 408 kr->flags |= F_REDISTRIBUTED; 409 /* 410 * only one of all multipath routes can be redistributed so 411 * redistribute the best one. 412 */ 413 if (rr->metric > metric) { 414 rr->kr = *kr; 415 rr->metric = metric; 416 } 417 418 return (1); 419 420 dont_redistribute: 421 /* was the route redistributed? */ 422 if ((kr->flags & F_REDISTRIBUTED) == 0) 423 return (0); 424 425 kr->flags &= ~F_REDISTRIBUTED; 426 return (1); 427 } 428 429 void 430 kr_redistribute(struct kroute_node *kh) 431 { 432 struct kroute_node *kn; 433 struct rroute rr; 434 int redistribute = 0; 435 436 bzero(&rr, sizeof(rr)); 437 rr.metric = UINT_MAX; 438 for (kn = kh; kn; kn = kn->next) 439 if (kr_redist_eval(&kn->r, &rr)) 440 redistribute = 1; 441 442 if (!redistribute) 443 return; 444 445 if (rr.kr.flags & F_REDISTRIBUTED) { 446 main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, &rr, 447 sizeof(struct rroute)); 448 } else { 449 rr.metric = DEFAULT_REDIST_METRIC; /* some dummy value */ 450 rr.kr = kh->r; 451 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, &rr, 452 sizeof(struct rroute)); 453 } 454 } 455 456 void 457 kr_reload(void) 458 { 459 struct kroute_node *kr, *kn; 460 u_int32_t dummy; 461 int r; 462 463 RB_FOREACH(kr, kroute_tree, &krt) { 464 for (kn = kr; kn; kn = kn->next) { 465 r = ospf_redistribute(&kn->r, &dummy); 466 /* 467 * if it is redistributed, redistribute again metric 468 * may have changed. 469 */ 470 if ((kn->r.flags & F_REDISTRIBUTED && !r) || r) 471 break; 472 } 473 if (kn) { 474 /* 475 * kr_redistribute copes with removes and RDE with 476 * duplicates 477 */ 478 kr_redistribute(kr); 479 } 480 } 481 } 482 483 /* rb-tree compare */ 484 int 485 kroute_compare(struct kroute_node *a, struct kroute_node *b) 486 { 487 int i; 488 489 /* XXX maybe switch a & b */ 490 i = memcmp(&a->r.prefix, &b->r.prefix, sizeof(a->r.prefix)); 491 if (i) 492 return (i); 493 if (a->r.prefixlen < b->r.prefixlen) 494 return (-1); 495 if (a->r.prefixlen > b->r.prefixlen) 496 return (1); 497 return (0); 498 } 499 500 /* tree management */ 501 struct kroute_node * 502 kroute_find(const struct in6_addr *prefix, u_int8_t prefixlen) 503 { 504 struct kroute_node s; 505 506 s.r.prefix = *prefix; 507 s.r.prefixlen = prefixlen; 508 509 return (RB_FIND(kroute_tree, &krt, &s)); 510 } 511 512 struct kroute_node * 513 kroute_matchgw(struct kroute_node *kr, struct in6_addr *nh, unsigned int scope) 514 { 515 while (kr) { 516 if (scope == kr->r.scope && 517 IN6_ARE_ADDR_EQUAL(&kr->r.nexthop, nh)) 518 return (kr); 519 kr = kr->next; 520 } 521 522 return (NULL); 523 } 524 525 int 526 kroute_insert(struct kroute_node *kr) 527 { 528 struct kroute_node *krm, *krh; 529 530 if ((krh = RB_INSERT(kroute_tree, &krt, kr)) != NULL) { 531 /* 532 * Multipath route, add at end of list and clone the 533 * ospfd inserted flag. 534 */ 535 krm = krh; 536 kr->r.flags |= krm->r.flags & F_OSPFD_INSERTED; 537 while (krm->next != NULL) 538 krm = krm->next; 539 krm->next = kr; 540 kr->next = NULL; /* to be sure */ 541 } else 542 krh = kr; 543 544 if (!(kr->r.flags & F_KERNEL)) { 545 /* don't validate or redistribute ospf route */ 546 kr->r.flags &= ~F_DOWN; 547 return (0); 548 } 549 550 if (kif_validate(kr->r.ifindex)) 551 kr->r.flags &= ~F_DOWN; 552 else 553 kr->r.flags |= F_DOWN; 554 555 kr_redistribute(krh); 556 return (0); 557 } 558 559 int 560 kroute_remove(struct kroute_node *kr) 561 { 562 struct kroute_node *krm; 563 564 if ((krm = RB_FIND(kroute_tree, &krt, kr)) == NULL) { 565 log_warnx("kroute_remove failed to find %s/%u", 566 log_in6addr(&kr->r.prefix), kr->r.prefixlen); 567 return (-1); 568 } 569 570 if (krm == kr) { 571 /* head element */ 572 if (RB_REMOVE(kroute_tree, &krt, kr) == NULL) { 573 log_warnx("kroute_remove failed for %s/%u", 574 log_in6addr(&kr->r.prefix), kr->r.prefixlen); 575 return (-1); 576 } 577 if (kr->next != NULL) { 578 if (RB_INSERT(kroute_tree, &krt, kr->next) != NULL) { 579 log_warnx("kroute_remove failed to add %s/%u", 580 log_in6addr(&kr->r.prefix), 581 kr->r.prefixlen); 582 return (-1); 583 } 584 } 585 } else { 586 /* somewhere in the list */ 587 while (krm->next != kr && krm->next != NULL) 588 krm = krm->next; 589 if (krm->next == NULL) { 590 log_warnx("kroute_remove multipath list corrupted " 591 "for %s/%u", log_in6addr(&kr->r.prefix), 592 kr->r.prefixlen); 593 return (-1); 594 } 595 krm->next = kr->next; 596 } 597 598 kr_redist_remove(krm, kr); 599 rtlabel_unref(kr->r.rtlabel); 600 601 free(kr); 602 return (0); 603 } 604 605 void 606 kroute_clear(void) 607 { 608 struct kroute_node *kr; 609 610 while ((kr = RB_MIN(kroute_tree, &krt)) != NULL) 611 kroute_remove(kr); 612 } 613 614 struct iface * 615 kif_update(u_short ifindex, int flags, struct if_data *ifd, 616 struct sockaddr_dl *sdl) 617 { 618 struct iface *iface; 619 char ifname[IF_NAMESIZE]; 620 621 if ((iface = if_find(ifindex)) == NULL) { 622 bzero(ifname, sizeof(ifname)); 623 if (sdl && sdl->sdl_family == AF_LINK) { 624 if (sdl->sdl_nlen >= sizeof(ifname)) 625 memcpy(ifname, sdl->sdl_data, 626 sizeof(ifname) - 1); 627 else if (sdl->sdl_nlen > 0) 628 memcpy(ifname, sdl->sdl_data, sdl->sdl_nlen); 629 else 630 return (NULL); 631 } else 632 return (NULL); 633 if ((iface = if_new(ifindex, ifname)) == NULL) 634 return (NULL); 635 iface->cflags |= F_IFACE_AVAIL; 636 } 637 638 if_update(iface, ifd->ifi_mtu, flags, ifd->ifi_type, 639 ifd->ifi_link_state, ifd->ifi_baudrate); 640 641 return (iface); 642 } 643 644 int 645 kif_validate(u_short ifindex) 646 { 647 struct iface *iface; 648 649 if ((iface = if_find(ifindex)) == NULL) { 650 log_warnx("interface with index %u not found", ifindex); 651 return (-1); 652 } 653 654 return ((iface->flags & IFF_UP) && LINK_STATE_IS_UP(iface->linkstate)); 655 } 656 657 struct kroute_node * 658 kroute_match(struct in6_addr *key) 659 { 660 int i; 661 struct kroute_node *kr; 662 struct in6_addr ina; 663 664 /* we will never match the default route */ 665 for (i = 128; i > 0; i--) { 666 inet6applymask(&ina, key, i); 667 if ((kr = kroute_find(&ina, i)) != NULL) 668 return (kr); 669 } 670 671 /* if we don't have a match yet, try to find a default route */ 672 if ((kr = kroute_find(&in6addr_any, 0)) != NULL) 673 return (kr); 674 675 return (NULL); 676 } 677 678 /* misc */ 679 int 680 protect_lo(void) 681 { 682 struct kroute_node *kr; 683 684 /* special protection for loopback */ 685 if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) { 686 log_warn("protect_lo"); 687 return (-1); 688 } 689 memcpy(&kr->r.prefix, &in6addr_loopback, sizeof(kr->r.prefix)); 690 kr->r.prefixlen = 128; 691 kr->r.flags = F_KERNEL|F_CONNECTED; 692 693 if (RB_INSERT(kroute_tree, &krt, kr) != NULL) 694 free(kr); /* kernel route already there, no problem */ 695 696 return (0); 697 } 698 699 #define ROUNDUP(a) \ 700 ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 701 702 void 703 get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) 704 { 705 int i; 706 707 for (i = 0; i < RTAX_MAX; i++) { 708 if (addrs & (1 << i)) { 709 rti_info[i] = sa; 710 sa = (struct sockaddr *)((char *)(sa) + 711 ROUNDUP(sa->sa_len)); 712 } else 713 rti_info[i] = NULL; 714 } 715 } 716 717 void 718 if_change(u_short ifindex, int flags, struct if_data *ifd) 719 { 720 struct kroute_node *kr, *tkr; 721 struct iface *iface; 722 u_int8_t wasvalid, isvalid; 723 724 wasvalid = kif_validate(ifindex); 725 726 if ((iface = kif_update(ifindex, flags, ifd, NULL)) == NULL) { 727 log_warn("if_change: kif_update(%u)", ifindex); 728 return; 729 } 730 731 isvalid = (iface->flags & IFF_UP) && 732 LINK_STATE_IS_UP(iface->linkstate); 733 734 if (wasvalid == isvalid) 735 return; /* nothing changed wrt validity */ 736 737 /* inform engine and rde about state change if interface is used */ 738 if (iface->cflags & F_IFACE_CONFIGURED) { 739 main_imsg_compose_ospfe(IMSG_IFINFO, 0, iface, 740 sizeof(struct iface)); 741 main_imsg_compose_rde(IMSG_IFINFO, 0, iface, 742 sizeof(struct iface)); 743 } 744 745 /* update redistribute list */ 746 RB_FOREACH(kr, kroute_tree, &krt) { 747 for (tkr = kr; tkr != NULL; tkr = tkr->next) { 748 if (tkr->r.ifindex == ifindex) { 749 if (isvalid) 750 tkr->r.flags &= ~F_DOWN; 751 else 752 tkr->r.flags |= F_DOWN; 753 754 } 755 } 756 kr_redistribute(kr); 757 } 758 } 759 760 void 761 if_newaddr(u_short ifindex, struct sockaddr_in6 *ifa, struct sockaddr_in6 *mask, 762 struct sockaddr_in6 *brd) 763 { 764 struct iface *iface; 765 struct iface_addr *ia; 766 struct ifaddrchange ifc; 767 768 if (ifa == NULL || ifa->sin6_family != AF_INET6) 769 return; 770 if ((iface = if_find(ifindex)) == NULL) { 771 log_warnx("if_newaddr: corresponding if %d not found", ifindex); 772 return; 773 } 774 775 /* We only care about link-local and global-scope. */ 776 if (IN6_IS_ADDR_UNSPECIFIED(&ifa->sin6_addr) || 777 IN6_IS_ADDR_LOOPBACK(&ifa->sin6_addr) || 778 IN6_IS_ADDR_MULTICAST(&ifa->sin6_addr) || 779 IN6_IS_ADDR_SITELOCAL(&ifa->sin6_addr) || 780 IN6_IS_ADDR_V4MAPPED(&ifa->sin6_addr) || 781 IN6_IS_ADDR_V4COMPAT(&ifa->sin6_addr)) 782 return; 783 784 clearscope(&ifa->sin6_addr); 785 786 if (IN6_IS_ADDR_LINKLOCAL(&ifa->sin6_addr) || 787 iface->flags & IFF_LOOPBACK) 788 iface->addr = ifa->sin6_addr; 789 790 if ((ia = calloc(1, sizeof(struct iface_addr))) == NULL) 791 fatal("if_newaddr"); 792 793 ia->addr = ifa->sin6_addr; 794 795 if (mask) 796 ia->prefixlen = mask2prefixlen(mask); 797 else 798 ia->prefixlen = 0; 799 if (brd && brd->sin6_family == AF_INET6) 800 ia->dstbrd = brd->sin6_addr; 801 else 802 bzero(&ia->dstbrd, sizeof(ia->dstbrd)); 803 804 switch (iface->type) { 805 case IF_TYPE_BROADCAST: 806 case IF_TYPE_NBMA: 807 log_debug("if_newaddr: ifindex %u, addr %s/%d", 808 ifindex, log_in6addr(&ia->addr), ia->prefixlen); 809 break; 810 case IF_TYPE_VIRTUALLINK: /* FIXME */ 811 break; 812 case IF_TYPE_POINTOPOINT: 813 case IF_TYPE_POINTOMULTIPOINT: 814 log_debug("if_newaddr: ifindex %u, addr %s/%d, " 815 "dest %s", ifindex, log_in6addr(&ia->addr), 816 ia->prefixlen, log_in6addr(&ia->dstbrd)); 817 break; 818 default: 819 fatalx("if_newaddr: unknown interface type"); 820 } 821 822 TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry); 823 /* inform engine and rde if interface is used */ 824 if (iface->cflags & F_IFACE_CONFIGURED) { 825 ifc.addr = ia->addr; 826 ifc.dstbrd = ia->dstbrd; 827 ifc.prefixlen = ia->prefixlen; 828 ifc.ifindex = ifindex; 829 main_imsg_compose_ospfe(IMSG_IFADDRNEW, 0, &ifc, sizeof(ifc)); 830 main_imsg_compose_rde(IMSG_IFADDRNEW, 0, &ifc, sizeof(ifc)); 831 } 832 } 833 834 void 835 if_deladdr(u_short ifindex, struct sockaddr_in6 *ifa, struct sockaddr_in6 *mask, 836 struct sockaddr_in6 *brd) 837 { 838 struct iface *iface; 839 struct iface_addr *ia, *nia; 840 struct ifaddrchange ifc; 841 842 if (ifa == NULL || ifa->sin6_family != AF_INET6) 843 return; 844 if ((iface = if_find(ifindex)) == NULL) { 845 log_warnx("if_deladdr: corresponding if %d not found", ifindex); 846 return; 847 } 848 849 /* We only care about link-local and global-scope. */ 850 if (IN6_IS_ADDR_UNSPECIFIED(&ifa->sin6_addr) || 851 IN6_IS_ADDR_LOOPBACK(&ifa->sin6_addr) || 852 IN6_IS_ADDR_MULTICAST(&ifa->sin6_addr) || 853 IN6_IS_ADDR_SITELOCAL(&ifa->sin6_addr) || 854 IN6_IS_ADDR_V4MAPPED(&ifa->sin6_addr) || 855 IN6_IS_ADDR_V4COMPAT(&ifa->sin6_addr)) 856 return; 857 858 clearscope(&ifa->sin6_addr); 859 860 for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL; ia = nia) { 861 nia = TAILQ_NEXT(ia, entry); 862 863 if (IN6_ARE_ADDR_EQUAL(&ia->addr, &ifa->sin6_addr)) { 864 log_debug("if_deladdr: ifindex %u, addr %s/%d", 865 ifindex, log_in6addr(&ia->addr), ia->prefixlen); 866 TAILQ_REMOVE(&iface->ifa_list, ia, entry); 867 /* inform engine and rde if interface is used */ 868 if (iface->cflags & F_IFACE_CONFIGURED) { 869 ifc.addr = ia->addr; 870 ifc.dstbrd = ia->dstbrd; 871 ifc.prefixlen = ia->prefixlen; 872 ifc.ifindex = ifindex; 873 main_imsg_compose_ospfe(IMSG_IFADDRDEL, 0, &ifc, 874 sizeof(ifc)); 875 main_imsg_compose_rde(IMSG_IFADDRDEL, 0, &ifc, 876 sizeof(ifc)); 877 } 878 free(ia); 879 return; 880 } 881 } 882 } 883 884 void 885 if_announce(void *msg) 886 { 887 struct if_announcemsghdr *ifan; 888 struct iface *iface; 889 890 ifan = msg; 891 892 switch (ifan->ifan_what) { 893 case IFAN_ARRIVAL: 894 if ((iface = if_new(ifan->ifan_index, ifan->ifan_name)) == NULL) 895 fatal("if_announce failed"); 896 iface->cflags |= F_IFACE_AVAIL; 897 break; 898 case IFAN_DEPARTURE: 899 iface = if_find(ifan->ifan_index); 900 if (iface->cflags & F_IFACE_CONFIGURED) { 901 main_imsg_compose_rde(IMSG_IFDELETE, 0, 902 &iface->ifindex, sizeof(iface->ifindex)); 903 main_imsg_compose_ospfe(IMSG_IFDELETE, 0, 904 &iface->ifindex, sizeof(iface->ifindex)); 905 } 906 if_del(iface); 907 break; 908 } 909 } 910 911 /* rtsock */ 912 int 913 send_rtmsg(int fd, int action, struct kroute *kroute) 914 { 915 struct iovec iov[5]; 916 struct rt_msghdr hdr; 917 struct pad { 918 struct sockaddr_in6 addr; 919 char pad[sizeof(long)]; /* thank you IPv6 */ 920 } prefix, nexthop, mask; 921 struct { 922 struct sockaddr_dl addr; 923 char pad[sizeof(long)]; 924 } ifp; 925 struct sockaddr_rtlabel sa_rl; 926 int iovcnt = 0; 927 const char *label; 928 929 if (kr_state.fib_sync == 0) 930 return (0); 931 932 /* initialize header */ 933 bzero(&hdr, sizeof(hdr)); 934 hdr.rtm_version = RTM_VERSION; 935 hdr.rtm_type = action; 936 hdr.rtm_flags = RTF_UP; 937 hdr.rtm_priority = RTP_OSPF; 938 if (action == RTM_CHANGE) 939 hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE; 940 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ 941 hdr.rtm_hdrlen = sizeof(hdr); 942 hdr.rtm_msglen = sizeof(hdr); 943 /* adjust iovec */ 944 iov[iovcnt].iov_base = &hdr; 945 iov[iovcnt++].iov_len = sizeof(hdr); 946 947 bzero(&prefix, sizeof(prefix)); 948 prefix.addr.sin6_len = sizeof(struct sockaddr_in6); 949 prefix.addr.sin6_family = AF_INET6; 950 prefix.addr.sin6_addr = kroute->prefix; 951 /* adjust header */ 952 hdr.rtm_addrs |= RTA_DST; 953 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); 954 /* adjust iovec */ 955 iov[iovcnt].iov_base = &prefix; 956 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); 957 958 if (!IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)) { 959 bzero(&nexthop, sizeof(nexthop)); 960 nexthop.addr.sin6_len = sizeof(struct sockaddr_in6); 961 nexthop.addr.sin6_family = AF_INET6; 962 nexthop.addr.sin6_addr = kroute->nexthop; 963 nexthop.addr.sin6_scope_id = kroute->scope; 964 /* 965 * XXX we should set the sin6_scope_id but the kernel 966 * XXX does not expect it that way. It must be fiddled 967 * XXX into the sin6_addr. Welcome to the typical 968 * XXX IPv6 insanity and all without wine bottles. 969 */ 970 embedscope(&nexthop.addr); 971 972 /* adjust header */ 973 hdr.rtm_flags |= RTF_GATEWAY; 974 hdr.rtm_addrs |= RTA_GATEWAY; 975 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); 976 /* adjust iovec */ 977 iov[iovcnt].iov_base = &nexthop; 978 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); 979 } else if (kroute->ifindex) { 980 /* 981 * We don't have an interface address in that network, 982 * so we install a cloning route. The kernel will then 983 * do neigbor discovery. 984 */ 985 bzero(&ifp, sizeof(ifp)); 986 ifp.addr.sdl_len = sizeof(struct sockaddr_dl); 987 ifp.addr.sdl_family = AF_LINK; 988 989 ifp.addr.sdl_index = kroute->ifindex; 990 /* adjust header */ 991 hdr.rtm_flags |= RTF_CLONING; 992 hdr.rtm_addrs |= RTA_GATEWAY; 993 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_dl)); 994 /* adjust iovec */ 995 iov[iovcnt].iov_base = &ifp; 996 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_dl)); 997 } 998 999 bzero(&mask, sizeof(mask)); 1000 mask.addr.sin6_len = sizeof(struct sockaddr_in6); 1001 mask.addr.sin6_family = AF_INET6; 1002 mask.addr.sin6_addr = *prefixlen2mask(kroute->prefixlen); 1003 /* adjust header */ 1004 if (kroute->prefixlen == 128) 1005 hdr.rtm_flags |= RTF_HOST; 1006 hdr.rtm_addrs |= RTA_NETMASK; 1007 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); 1008 /* adjust iovec */ 1009 iov[iovcnt].iov_base = &mask; 1010 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); 1011 1012 if (kroute->rtlabel != 0) { 1013 sa_rl.sr_len = sizeof(sa_rl); 1014 sa_rl.sr_family = AF_UNSPEC; 1015 label = rtlabel_id2name(kroute->rtlabel); 1016 if (strlcpy(sa_rl.sr_label, label, 1017 sizeof(sa_rl.sr_label)) >= sizeof(sa_rl.sr_label)) { 1018 log_warnx("send_rtmsg: invalid rtlabel"); 1019 return (-1); 1020 } 1021 /* adjust header */ 1022 hdr.rtm_addrs |= RTA_LABEL; 1023 hdr.rtm_msglen += sizeof(sa_rl); 1024 /* adjust iovec */ 1025 iov[iovcnt].iov_base = &sa_rl; 1026 iov[iovcnt++].iov_len = sizeof(sa_rl); 1027 } 1028 1029 retry: 1030 if (writev(fd, iov, iovcnt) == -1) { 1031 if (errno == ESRCH) { 1032 if (hdr.rtm_type == RTM_CHANGE) { 1033 hdr.rtm_type = RTM_ADD; 1034 goto retry; 1035 } else if (hdr.rtm_type == RTM_DELETE) { 1036 log_info("route %s/%u vanished before delete", 1037 log_sockaddr(&prefix), kroute->prefixlen); 1038 return (0); 1039 } 1040 } 1041 log_warn("send_rtmsg: action %u, prefix %s/%u", hdr.rtm_type, 1042 log_sockaddr(&prefix), kroute->prefixlen); 1043 return (0); 1044 } 1045 1046 return (0); 1047 } 1048 1049 int 1050 fetchtable(void) 1051 { 1052 size_t len; 1053 int mib[7]; 1054 char *buf, *next, *lim; 1055 struct rt_msghdr *rtm; 1056 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1057 struct sockaddr_in6 *sa_in6; 1058 struct sockaddr_rtlabel *label; 1059 struct kroute_node *kr; 1060 1061 mib[0] = CTL_NET; 1062 mib[1] = PF_ROUTE; 1063 mib[2] = 0; 1064 mib[3] = AF_INET6; 1065 mib[4] = NET_RT_DUMP; 1066 mib[5] = 0; 1067 mib[6] = 0; /* rtableid */ 1068 1069 if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) { 1070 log_warn("sysctl"); 1071 return (-1); 1072 } 1073 if ((buf = malloc(len)) == NULL) { 1074 log_warn("fetchtable"); 1075 return (-1); 1076 } 1077 if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) { 1078 log_warn("sysctl"); 1079 free(buf); 1080 return (-1); 1081 } 1082 1083 lim = buf + len; 1084 for (next = buf; next < lim; next += rtm->rtm_msglen) { 1085 rtm = (struct rt_msghdr *)next; 1086 if (rtm->rtm_version != RTM_VERSION) 1087 continue; 1088 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); 1089 get_rtaddrs(rtm->rtm_addrs, sa, rti_info); 1090 1091 if ((sa = rti_info[RTAX_DST]) == NULL) 1092 continue; 1093 1094 if (rtm->rtm_flags & RTF_LLINFO) /* arp cache */ 1095 continue; 1096 1097 if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) { 1098 log_warn("fetchtable"); 1099 free(buf); 1100 return (-1); 1101 } 1102 1103 kr->r.flags = F_KERNEL; 1104 1105 switch (sa->sa_family) { 1106 case AF_INET6: 1107 kr->r.prefix = 1108 ((struct sockaddr_in6 *)sa)->sin6_addr; 1109 sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK]; 1110 if (rtm->rtm_flags & RTF_STATIC) 1111 kr->r.flags |= F_STATIC; 1112 if (rtm->rtm_flags & RTF_BLACKHOLE) 1113 kr->r.flags |= F_BLACKHOLE; 1114 if (rtm->rtm_flags & RTF_REJECT) 1115 kr->r.flags |= F_REJECT; 1116 if (rtm->rtm_flags & RTF_DYNAMIC) 1117 kr->r.flags |= F_DYNAMIC; 1118 if (rtm->rtm_flags & RTF_PROTO1) 1119 kr->r.flags |= F_BGPD_INSERTED; 1120 if (sa_in6 != NULL) { 1121 if (sa_in6->sin6_len == 0) 1122 break; 1123 kr->r.prefixlen = 1124 mask2prefixlen(sa_in6); 1125 } else if (rtm->rtm_flags & RTF_HOST) 1126 kr->r.prefixlen = 128; 1127 else 1128 fatalx("classful IPv6 route?!!"); 1129 break; 1130 default: 1131 free(kr); 1132 continue; 1133 } 1134 1135 kr->r.ifindex = rtm->rtm_index; 1136 if ((sa = rti_info[RTAX_GATEWAY]) != NULL) 1137 switch (sa->sa_family) { 1138 case AF_INET6: 1139 sa_in6 = (struct sockaddr_in6 *)sa; 1140 /* 1141 * XXX The kernel provides the scope via the 1142 * XXX kame hack instead of the scope_id field. 1143 */ 1144 recoverscope(sa_in6); 1145 kr->r.nexthop = sa_in6->sin6_addr; 1146 kr->r.scope = sa_in6->sin6_scope_id; 1147 break; 1148 case AF_LINK: 1149 kr->r.flags |= F_CONNECTED; 1150 break; 1151 } 1152 1153 if (rtm->rtm_flags & RTF_PROTO2) { 1154 send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r); 1155 free(kr); 1156 } else { 1157 if ((label = (struct sockaddr_rtlabel *) 1158 rti_info[RTAX_LABEL]) != NULL) { 1159 kr->r.rtlabel = 1160 rtlabel_name2id(label->sr_label); 1161 kr->r.ext_tag = 1162 rtlabel_id2tag(kr->r.rtlabel); 1163 } 1164 kroute_insert(kr); 1165 } 1166 1167 } 1168 free(buf); 1169 return (0); 1170 } 1171 1172 int 1173 fetchifs(u_short ifindex) 1174 { 1175 size_t len; 1176 int mib[6]; 1177 char *buf, *next, *lim; 1178 struct rt_msghdr *rtm; 1179 struct if_msghdr ifm; 1180 struct ifa_msghdr *ifam; 1181 struct iface *iface; 1182 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1183 1184 mib[0] = CTL_NET; 1185 mib[1] = PF_ROUTE; 1186 mib[2] = 0; 1187 mib[3] = AF_INET6; 1188 mib[4] = NET_RT_IFLIST; 1189 mib[5] = ifindex; 1190 1191 if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) { 1192 log_warn("sysctl"); 1193 return (-1); 1194 } 1195 if ((buf = malloc(len)) == NULL) { 1196 log_warn("fetchifs"); 1197 return (-1); 1198 } 1199 if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) { 1200 log_warn("sysctl"); 1201 free(buf); 1202 return (-1); 1203 } 1204 1205 lim = buf + len; 1206 for (next = buf; next < lim; next += rtm->rtm_msglen) { 1207 rtm = (struct rt_msghdr *)next; 1208 if (rtm->rtm_version != RTM_VERSION) 1209 continue; 1210 switch (rtm->rtm_type) { 1211 case RTM_IFINFO: 1212 bcopy(rtm, &ifm, sizeof ifm); 1213 sa = (struct sockaddr *)(next + sizeof(ifm)); 1214 get_rtaddrs(ifm.ifm_addrs, sa, rti_info); 1215 1216 if ((iface = kif_update(ifm.ifm_index, 1217 ifm.ifm_flags, &ifm.ifm_data, 1218 (struct sockaddr_dl *)rti_info[RTAX_IFP])) == NULL) 1219 break; 1220 case RTM_NEWADDR: 1221 ifam = (struct ifa_msghdr *)rtm; 1222 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1223 RTA_BRD)) == 0) 1224 break; 1225 sa = (struct sockaddr *)(ifam + 1); 1226 get_rtaddrs(ifam->ifam_addrs, sa, rti_info); 1227 1228 if_newaddr(ifam->ifam_index, 1229 (struct sockaddr_in6 *)rti_info[RTAX_IFA], 1230 (struct sockaddr_in6 *)rti_info[RTAX_NETMASK], 1231 (struct sockaddr_in6 *)rti_info[RTAX_BRD]); 1232 break; 1233 } 1234 } 1235 free(buf); 1236 return (0); 1237 } 1238 1239 int 1240 dispatch_rtmsg(void) 1241 { 1242 char buf[RT_BUF_SIZE]; 1243 ssize_t n; 1244 char *next, *lim; 1245 struct rt_msghdr *rtm; 1246 struct if_msghdr ifm; 1247 struct ifa_msghdr *ifam; 1248 struct sockaddr *sa, *rti_info[RTAX_MAX]; 1249 struct sockaddr_in6 *sa_in6; 1250 struct sockaddr_rtlabel *label; 1251 struct kroute_node *kr, *okr; 1252 struct in6_addr prefix, nexthop; 1253 u_int8_t prefixlen; 1254 int flags, mpath; 1255 unsigned int scope; 1256 u_short ifindex = 0; 1257 1258 if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) { 1259 log_warn("dispatch_rtmsg: read error"); 1260 return (-1); 1261 } 1262 1263 if (n == 0) { 1264 log_warnx("routing socket closed"); 1265 return (-1); 1266 } 1267 1268 lim = buf + n; 1269 for (next = buf; next < lim; next += rtm->rtm_msglen) { 1270 rtm = (struct rt_msghdr *)next; 1271 if (rtm->rtm_version != RTM_VERSION) 1272 continue; 1273 1274 bzero(&prefix, sizeof(prefix)); 1275 bzero(&nexthop, sizeof(nexthop)); 1276 scope = 0; 1277 prefixlen = 0; 1278 flags = F_KERNEL; 1279 mpath = 0; 1280 1281 if (rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_CHANGE || 1282 rtm->rtm_type == RTM_DELETE) { 1283 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); 1284 get_rtaddrs(rtm->rtm_addrs, sa, rti_info); 1285 1286 if (rtm->rtm_tableid != 0) 1287 continue; 1288 1289 if (rtm->rtm_pid == kr_state.pid) /* caused by us */ 1290 continue; 1291 1292 if (rtm->rtm_errno) /* failed attempts... */ 1293 continue; 1294 1295 if (rtm->rtm_flags & RTF_LLINFO) /* arp cache */ 1296 continue; 1297 1298 #ifdef RTF_MPATH 1299 if (rtm->rtm_flags & RTF_MPATH) 1300 mpath = 1; 1301 #endif 1302 switch (sa->sa_family) { 1303 case AF_INET6: 1304 prefix = 1305 ((struct sockaddr_in6 *)sa)->sin6_addr; 1306 sa_in6 = (struct sockaddr_in6 *) 1307 rti_info[RTAX_NETMASK]; 1308 if (sa_in6 != NULL) { 1309 if (sa_in6->sin6_len != 0) 1310 prefixlen = mask2prefixlen( 1311 sa_in6); 1312 } else if (rtm->rtm_flags & RTF_HOST) 1313 prefixlen = 128; 1314 else 1315 fatalx("classful IPv6 address?!!"); 1316 if (rtm->rtm_flags & RTF_STATIC) 1317 flags |= F_STATIC; 1318 if (rtm->rtm_flags & RTF_BLACKHOLE) 1319 flags |= F_BLACKHOLE; 1320 if (rtm->rtm_flags & RTF_REJECT) 1321 flags |= F_REJECT; 1322 if (rtm->rtm_flags & RTF_DYNAMIC) 1323 flags |= F_DYNAMIC; 1324 if (rtm->rtm_flags & RTF_PROTO1) 1325 flags |= F_BGPD_INSERTED; 1326 break; 1327 default: 1328 continue; 1329 } 1330 1331 ifindex = rtm->rtm_index; 1332 if ((sa = rti_info[RTAX_GATEWAY]) != NULL) { 1333 switch (sa->sa_family) { 1334 case AF_INET6: 1335 sa_in6 = (struct sockaddr_in6 *)sa; 1336 /* 1337 * XXX The kernel provides the scope 1338 * XXX via the kame hack instead of 1339 * XXX the scope_id field. 1340 */ 1341 recoverscope(sa_in6); 1342 nexthop = sa_in6->sin6_addr; 1343 scope = sa_in6->sin6_scope_id; 1344 break; 1345 case AF_LINK: 1346 flags |= F_CONNECTED; 1347 break; 1348 } 1349 } 1350 } 1351 1352 switch (rtm->rtm_type) { 1353 case RTM_ADD: 1354 case RTM_CHANGE: 1355 if (IN6_IS_ADDR_UNSPECIFIED(&nexthop) && 1356 !(flags & F_CONNECTED)) { 1357 log_warnx("dispatch_rtmsg no nexthop for %s/%u", 1358 log_in6addr(&prefix), prefixlen); 1359 continue; 1360 } 1361 1362 if ((okr = kroute_find(&prefix, prefixlen)) != 1363 NULL) { 1364 /* just add new multipath routes */ 1365 if (mpath && rtm->rtm_type == RTM_ADD) 1366 goto add; 1367 /* get the correct route */ 1368 kr = okr; 1369 if (mpath && (kr = kroute_matchgw(okr, 1370 &nexthop, scope)) == NULL) { 1371 log_warnx("dispatch_rtmsg mpath route" 1372 " not found"); 1373 /* add routes we missed out earlier */ 1374 goto add; 1375 } 1376 1377 /* 1378 * ospf route overridden by kernel. Preference 1379 * of the route is not checked because this is 1380 * forced -- most probably by a user. 1381 */ 1382 if (kr->r.flags & F_OSPFD_INSERTED) 1383 flags |= F_OSPFD_INSERTED; 1384 if (kr->r.flags & F_REDISTRIBUTED) 1385 flags |= F_REDISTRIBUTED; 1386 kr->r.nexthop = nexthop; 1387 kr->r.scope = scope; 1388 kr->r.flags = flags; 1389 kr->r.ifindex = ifindex; 1390 1391 rtlabel_unref(kr->r.rtlabel); 1392 kr->r.rtlabel = 0; 1393 kr->r.ext_tag = 0; 1394 if ((label = (struct sockaddr_rtlabel *) 1395 rti_info[RTAX_LABEL]) != NULL) { 1396 kr->r.rtlabel = 1397 rtlabel_name2id(label->sr_label); 1398 kr->r.ext_tag = 1399 rtlabel_id2tag(kr->r.rtlabel); 1400 } 1401 1402 if (kif_validate(kr->r.ifindex)) 1403 kr->r.flags &= ~F_DOWN; 1404 else 1405 kr->r.flags |= F_DOWN; 1406 1407 /* just readd, the RDE will care */ 1408 kr_redistribute(okr); 1409 } else { 1410 add: 1411 if ((kr = calloc(1, 1412 sizeof(struct kroute_node))) == NULL) { 1413 log_warn("dispatch_rtmsg"); 1414 return (-1); 1415 } 1416 kr->r.prefix = prefix; 1417 kr->r.prefixlen = prefixlen; 1418 kr->r.nexthop = nexthop; 1419 kr->r.scope = scope; 1420 kr->r.flags = flags; 1421 kr->r.ifindex = ifindex; 1422 1423 if ((label = (struct sockaddr_rtlabel *) 1424 rti_info[RTAX_LABEL]) != NULL) { 1425 kr->r.rtlabel = 1426 rtlabel_name2id(label->sr_label); 1427 kr->r.ext_tag = 1428 rtlabel_id2tag(kr->r.rtlabel); 1429 } 1430 1431 kroute_insert(kr); 1432 } 1433 break; 1434 case RTM_DELETE: 1435 if ((kr = kroute_find(&prefix, prefixlen)) == 1436 NULL) 1437 continue; 1438 if (!(kr->r.flags & F_KERNEL)) 1439 continue; 1440 /* get the correct route */ 1441 okr = kr; 1442 if (mpath && (kr = kroute_matchgw(kr, &nexthop, 1443 scope)) == NULL) { 1444 log_warnx("dispatch_rtmsg mpath route" 1445 " not found"); 1446 return (-1); 1447 } 1448 /* 1449 * last route is getting removed request the 1450 * ospf route from the RDE to insert instead 1451 */ 1452 if (okr == kr && kr->next == NULL && 1453 kr->r.flags & F_OSPFD_INSERTED) 1454 main_imsg_compose_rde(IMSG_KROUTE_GET, 0, 1455 &kr->r, sizeof(struct kroute)); 1456 if (kroute_remove(kr) == -1) 1457 return (-1); 1458 break; 1459 case RTM_IFINFO: 1460 memcpy(&ifm, next, sizeof(ifm)); 1461 if_change(ifm.ifm_index, ifm.ifm_flags, 1462 &ifm.ifm_data); 1463 break; 1464 case RTM_NEWADDR: 1465 ifam = (struct ifa_msghdr *)rtm; 1466 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1467 RTA_BRD)) == 0) 1468 break; 1469 sa = (struct sockaddr *)(ifam + 1); 1470 get_rtaddrs(ifam->ifam_addrs, sa, rti_info); 1471 1472 if_newaddr(ifam->ifam_index, 1473 (struct sockaddr_in6 *)rti_info[RTAX_IFA], 1474 (struct sockaddr_in6 *)rti_info[RTAX_NETMASK], 1475 (struct sockaddr_in6 *)rti_info[RTAX_BRD]); 1476 break; 1477 case RTM_DELADDR: 1478 ifam = (struct ifa_msghdr *)rtm; 1479 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | 1480 RTA_BRD)) == 0) 1481 break; 1482 sa = (struct sockaddr *)(ifam + 1); 1483 get_rtaddrs(ifam->ifam_addrs, sa, rti_info); 1484 1485 if_deladdr(ifam->ifam_index, 1486 (struct sockaddr_in6 *)rti_info[RTAX_IFA], 1487 (struct sockaddr_in6 *)rti_info[RTAX_NETMASK], 1488 (struct sockaddr_in6 *)rti_info[RTAX_BRD]); 1489 break; 1490 case RTM_IFANNOUNCE: 1491 if_announce(next); 1492 break; 1493 default: 1494 /* ignore for now */ 1495 break; 1496 } 1497 } 1498 return (0); 1499 } 1500