1 /* $OpenBSD: rde.c,v 1.32 2024/11/21 13:38:14 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2015 Renato Westphal <renato@openbsd.org> 5 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 6 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 7 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 #include <sys/types.h> 23 #include <sys/socket.h> 24 #include <net/route.h> 25 26 #include <errno.h> 27 #include <pwd.h> 28 #include <signal.h> 29 #include <stdlib.h> 30 #include <string.h> 31 #include <unistd.h> 32 33 #include "eigrpd.h" 34 #include "eigrpe.h" 35 #include "rde.h" 36 #include "log.h" 37 38 static void rde_sig_handler(int sig, short, void *); 39 static __dead void rde_shutdown(void); 40 static void rde_dispatch_imsg(int, short, void *); 41 static void rde_dispatch_parent(int, short, void *); 42 static struct redistribute *eigrp_redistribute(struct eigrp *, struct kroute *); 43 static void rt_redist_set(struct kroute *, int); 44 static void rt_snap(struct rde_nbr *); 45 static struct ctl_rt *rt_to_ctl(struct rt_node *, struct eigrp_route *); 46 static void rt_dump(struct ctl_show_topology_req *, pid_t); 47 48 struct eigrpd_conf *rdeconf; 49 50 static struct imsgev *iev_eigrpe; 51 static struct imsgev *iev_main; 52 53 static void 54 rde_sig_handler(int sig, short event, void *arg) 55 { 56 /* 57 * signal handler rules don't apply, libevent decouples for us 58 */ 59 60 switch (sig) { 61 case SIGINT: 62 case SIGTERM: 63 rde_shutdown(); 64 /* NOTREACHED */ 65 default: 66 fatalx("unexpected signal"); 67 } 68 } 69 70 /* route decision engine */ 71 void 72 rde(int debug, int verbose) 73 { 74 struct event ev_sigint, ev_sigterm; 75 struct timeval now; 76 struct passwd *pw; 77 78 rdeconf = config_new_empty(); 79 80 log_init(debug); 81 log_verbose(verbose); 82 83 if ((pw = getpwnam(EIGRPD_USER)) == NULL) 84 fatal("getpwnam"); 85 86 if (chroot(pw->pw_dir) == -1) 87 fatal("chroot"); 88 if (chdir("/") == -1) 89 fatal("chdir(\"/\")"); 90 91 setproctitle("route decision engine"); 92 log_procname = "rde"; 93 94 if (setgroups(1, &pw->pw_gid) || 95 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 96 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 97 fatal("can't drop privileges"); 98 99 if (pledge("stdio recvfd", NULL) == -1) 100 fatal("pledge"); 101 102 event_init(); 103 104 /* setup signal handler */ 105 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 106 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 107 signal_add(&ev_sigint, NULL); 108 signal_add(&ev_sigterm, NULL); 109 signal(SIGPIPE, SIG_IGN); 110 signal(SIGHUP, SIG_IGN); 111 112 /* setup pipe and event handler to the parent process */ 113 if ((iev_main = malloc(sizeof(struct imsgev))) == NULL) 114 fatal(NULL); 115 if (imsgbuf_init(&iev_main->ibuf, 3) == -1) 116 fatal(NULL); 117 imsgbuf_allow_fdpass(&iev_main->ibuf); 118 iev_main->handler = rde_dispatch_parent; 119 iev_main->events = EV_READ; 120 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, 121 iev_main->handler, iev_main); 122 event_add(&iev_main->ev, NULL); 123 124 gettimeofday(&now, NULL); 125 global.uptime = now.tv_sec; 126 127 event_dispatch(); 128 129 rde_shutdown(); 130 } 131 132 static __dead void 133 rde_shutdown(void) 134 { 135 /* close pipes */ 136 imsgbuf_clear(&iev_eigrpe->ibuf); 137 close(iev_eigrpe->ibuf.fd); 138 imsgbuf_clear(&iev_main->ibuf); 139 close(iev_main->ibuf.fd); 140 141 config_clear(rdeconf, PROC_RDE_ENGINE); 142 143 free(iev_eigrpe); 144 free(iev_main); 145 146 log_info("route decision engine exiting"); 147 exit(0); 148 } 149 150 int 151 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen) 152 { 153 return (imsg_compose_event(iev_main, type, 0, pid, -1, 154 data, datalen)); 155 } 156 157 int 158 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data, 159 uint16_t datalen) 160 { 161 return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1, 162 data, datalen)); 163 } 164 165 static void 166 rde_dispatch_imsg(int fd, short event, void *bula) 167 { 168 struct imsgev *iev = bula; 169 struct imsgbuf *ibuf; 170 struct imsg imsg; 171 struct rde_nbr *nbr; 172 struct rde_nbr new; 173 struct rinfo rinfo; 174 ssize_t n; 175 int shut = 0, verbose; 176 177 ibuf = &iev->ibuf; 178 179 if (event & EV_READ) { 180 if ((n = imsgbuf_read(ibuf)) == -1) 181 fatal("imsgbuf_read error"); 182 if (n == 0) /* connection closed */ 183 shut = 1; 184 } 185 if (event & EV_WRITE) { 186 if (imsgbuf_write(ibuf) == -1) { 187 if (errno == EPIPE) /* connection closed */ 188 shut = 1; 189 else 190 fatal("imsgbuf_write"); 191 } 192 } 193 194 for (;;) { 195 if ((n = imsg_get(ibuf, &imsg)) == -1) 196 fatal("rde_dispatch_imsg: imsg_get error"); 197 if (n == 0) 198 break; 199 200 switch (imsg.hdr.type) { 201 case IMSG_NEIGHBOR_UP: 202 if (imsg.hdr.len - IMSG_HEADER_SIZE != 203 sizeof(struct rde_nbr)) 204 fatalx("invalid size of neighbor request"); 205 memcpy(&new, imsg.data, sizeof(new)); 206 207 if (rde_nbr_find(imsg.hdr.peerid)) 208 fatalx("rde_dispatch_imsg: " 209 "neighbor already exists"); 210 rde_nbr_new(imsg.hdr.peerid, &new); 211 break; 212 case IMSG_NEIGHBOR_DOWN: 213 nbr = rde_nbr_find(imsg.hdr.peerid); 214 if (nbr == NULL) { 215 log_debug("%s: cannot find rde neighbor", 216 __func__); 217 break; 218 } 219 220 rde_check_link_down_nbr(nbr); 221 rde_flush_queries(); 222 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0); 223 break; 224 case IMSG_RECV_UPDATE_INIT: 225 nbr = rde_nbr_find(imsg.hdr.peerid); 226 if (nbr == NULL) { 227 log_debug("%s: cannot find rde neighbor", 228 __func__); 229 break; 230 } 231 232 rt_snap(nbr); 233 break; 234 case IMSG_RECV_UPDATE: 235 case IMSG_RECV_QUERY: 236 case IMSG_RECV_REPLY: 237 case IMSG_RECV_SIAQUERY: 238 case IMSG_RECV_SIAREPLY: 239 nbr = rde_nbr_find(imsg.hdr.peerid); 240 if (nbr == NULL) { 241 log_debug("%s: cannot find rde neighbor", 242 __func__); 243 break; 244 } 245 246 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo)) 247 fatalx("invalid size of rinfo"); 248 memcpy(&rinfo, imsg.data, sizeof(rinfo)); 249 250 switch (imsg.hdr.type) { 251 case IMSG_RECV_UPDATE: 252 rde_check_update(nbr, &rinfo); 253 break; 254 case IMSG_RECV_QUERY: 255 rde_check_query(nbr, &rinfo, 0); 256 break; 257 case IMSG_RECV_REPLY: 258 rde_check_reply(nbr, &rinfo, 0); 259 break; 260 case IMSG_RECV_SIAQUERY: 261 rde_check_query(nbr, &rinfo, 1); 262 break; 263 case IMSG_RECV_SIAREPLY: 264 rde_check_reply(nbr, &rinfo, 1); 265 break; 266 } 267 break; 268 case IMSG_CTL_SHOW_TOPOLOGY: 269 if (imsg.hdr.len != IMSG_HEADER_SIZE + 270 sizeof(struct ctl_show_topology_req)) { 271 log_warnx("%s: wrong imsg len", __func__); 272 break; 273 } 274 275 rt_dump(imsg.data, imsg.hdr.pid); 276 rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid, 277 NULL, 0); 278 break; 279 case IMSG_CTL_LOG_VERBOSE: 280 /* already checked by eigrpe */ 281 memcpy(&verbose, imsg.data, sizeof(verbose)); 282 log_verbose(verbose); 283 break; 284 default: 285 log_debug("rde_dispatch_imsg: unexpected imsg %d", 286 imsg.hdr.type); 287 break; 288 } 289 imsg_free(&imsg); 290 } 291 if (!shut) 292 imsg_event_add(iev); 293 else { 294 /* this pipe is dead, so remove the event handler */ 295 event_del(&iev->ev); 296 event_loopexit(NULL); 297 } 298 } 299 300 static void 301 rde_dispatch_parent(int fd, short event, void *bula) 302 { 303 static struct eigrpd_conf *nconf; 304 static struct iface *niface; 305 static struct eigrp *neigrp; 306 struct eigrp_iface *nei; 307 struct imsg imsg; 308 struct imsgev *iev = bula; 309 struct imsgbuf *ibuf; 310 struct kif *kif; 311 ssize_t n; 312 int shut = 0; 313 314 ibuf = &iev->ibuf; 315 316 if (event & EV_READ) { 317 if ((n = imsgbuf_read(ibuf)) == -1) 318 fatal("imsgbuf_read error"); 319 if (n == 0) /* connection closed */ 320 shut = 1; 321 } 322 if (event & EV_WRITE) { 323 if (imsgbuf_write(ibuf) == -1) { 324 if (errno == EPIPE) /* connection closed */ 325 shut = 1; 326 else 327 fatal("imsgbuf_write"); 328 } 329 } 330 331 for (;;) { 332 if ((n = imsg_get(ibuf, &imsg)) == -1) 333 fatal("rde_dispatch_parent: imsg_get error"); 334 if (n == 0) 335 break; 336 337 switch (imsg.hdr.type) { 338 case IMSG_IFDOWN: 339 if (imsg.hdr.len != IMSG_HEADER_SIZE + 340 sizeof(struct kif)) 341 fatalx("IFDOWN imsg with wrong len"); 342 kif = imsg.data; 343 rde_check_link_down(kif->ifindex); 344 break; 345 case IMSG_NETWORK_ADD: 346 if (imsg.hdr.len != IMSG_HEADER_SIZE + 347 sizeof(struct kroute)) 348 fatalx("IMSG_NETWORK_ADD imsg with wrong len"); 349 rt_redist_set(imsg.data, 0); 350 break; 351 case IMSG_NETWORK_DEL: 352 if (imsg.hdr.len != IMSG_HEADER_SIZE + 353 sizeof(struct kroute)) 354 fatalx("IMSG_NETWORK_DEL imsg with wrong len"); 355 rt_redist_set(imsg.data, 1); 356 break; 357 case IMSG_SOCKET_IPC: 358 if (iev_eigrpe) { 359 log_warnx("%s: received unexpected imsg fd " 360 "to eigrpe", __func__); 361 break; 362 } 363 if ((fd = imsg_get_fd(&imsg)) == -1) { 364 log_warnx("%s: expected to receive imsg fd to " 365 "eigrpe but didn't receive any", __func__); 366 break; 367 } 368 369 iev_eigrpe = malloc(sizeof(struct imsgev)); 370 if (iev_eigrpe == NULL) 371 fatal(NULL); 372 if (imsgbuf_init(&iev_eigrpe->ibuf, fd) == -1) 373 fatal(NULL); 374 iev_eigrpe->handler = rde_dispatch_imsg; 375 iev_eigrpe->events = EV_READ; 376 event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd, 377 iev_eigrpe->events, iev_eigrpe->handler, 378 iev_eigrpe); 379 event_add(&iev_eigrpe->ev, NULL); 380 break; 381 case IMSG_RECONF_CONF: 382 if ((nconf = malloc(sizeof(struct eigrpd_conf))) == 383 NULL) 384 fatal(NULL); 385 memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf)); 386 387 TAILQ_INIT(&nconf->iface_list); 388 TAILQ_INIT(&nconf->instances); 389 break; 390 case IMSG_RECONF_INSTANCE: 391 if ((neigrp = malloc(sizeof(struct eigrp))) == NULL) 392 fatal(NULL); 393 memcpy(neigrp, imsg.data, sizeof(struct eigrp)); 394 395 SIMPLEQ_INIT(&neigrp->redist_list); 396 TAILQ_INIT(&neigrp->ei_list); 397 RB_INIT(&neigrp->nbrs); 398 RB_INIT(&neigrp->topology); 399 TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry); 400 break; 401 case IMSG_RECONF_IFACE: 402 niface = imsg.data; 403 niface = if_lookup(nconf, niface->ifindex); 404 if (niface) 405 break; 406 407 if ((niface = malloc(sizeof(struct iface))) == NULL) 408 fatal(NULL); 409 memcpy(niface, imsg.data, sizeof(struct iface)); 410 411 TAILQ_INIT(&niface->ei_list); 412 TAILQ_INIT(&niface->addr_list); 413 TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry); 414 break; 415 case IMSG_RECONF_EIGRP_IFACE: 416 if (niface == NULL) 417 break; 418 if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL) 419 fatal(NULL); 420 memcpy(nei, imsg.data, sizeof(struct eigrp_iface)); 421 422 nei->iface = niface; 423 nei->eigrp = neigrp; 424 TAILQ_INIT(&nei->nbr_list); 425 TAILQ_INIT(&nei->update_list); 426 TAILQ_INIT(&nei->query_list); 427 TAILQ_INIT(&nei->summary_list); 428 TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry); 429 TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry); 430 if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) != 431 NULL) 432 fatalx("rde_dispatch_parent: " 433 "RB_INSERT(ifaces_by_id) failed"); 434 break; 435 case IMSG_RECONF_END: 436 merge_config(rdeconf, nconf, PROC_RDE_ENGINE); 437 nconf = NULL; 438 break; 439 default: 440 log_debug("%s: unexpected imsg %d", __func__, 441 imsg.hdr.type); 442 break; 443 } 444 imsg_free(&imsg); 445 } 446 if (!shut) 447 imsg_event_add(iev); 448 else { 449 /* this pipe is dead, so remove the event handler */ 450 event_del(&iev->ev); 451 event_loopexit(NULL); 452 } 453 } 454 455 void 456 rde_instance_init(struct eigrp *eigrp) 457 { 458 struct rde_nbr nbr; 459 460 memset(&nbr, 0, sizeof(nbr)); 461 nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST; 462 eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr); 463 eigrp->rnbr_redist->eigrp = eigrp; 464 nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY; 465 eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr); 466 eigrp->rnbr_summary->eigrp = eigrp; 467 } 468 469 void 470 rde_instance_del(struct eigrp *eigrp) 471 { 472 struct rde_nbr *nbr, *safe; 473 struct rt_node *rn; 474 475 /* clear topology */ 476 while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL) 477 rt_del(rn); 478 479 /* clear nbrs */ 480 RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe) 481 if (nbr->eigrp == eigrp) 482 rde_nbr_del(nbr, 0); 483 rde_nbr_del(eigrp->rnbr_redist, 0); 484 rde_nbr_del(eigrp->rnbr_summary, 0); 485 486 free(eigrp); 487 } 488 489 void 490 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route) 491 { 492 struct eigrp *eigrp = route->nbr->eigrp; 493 struct kroute kr; 494 struct in6_addr lo6 = IN6ADDR_LOOPBACK_INIT; 495 496 log_debug("%s: %s nbr %s", __func__, log_prefix(rn), 497 log_addr(eigrp->af, &route->nbr->addr)); 498 499 memset(&kr, 0, sizeof(kr)); 500 kr.af = eigrp->af; 501 kr.prefix = rn->prefix; 502 kr.prefixlen = rn->prefixlen; 503 if (route->nbr->ei) { 504 kr.nexthop = route->nexthop; 505 kr.ifindex = route->nbr->ei->iface->ifindex; 506 } else { 507 switch (eigrp->af) { 508 case AF_INET: 509 kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK); 510 break; 511 case AF_INET6: 512 kr.nexthop.v6 = lo6; 513 break; 514 default: 515 fatalx("rde_send_delete_kroute: unknown af"); 516 break; 517 } 518 kr.flags = F_BLACKHOLE; 519 } 520 if (route->type == EIGRP_ROUTE_EXTERNAL) 521 kr.priority = rdeconf->fib_priority_external; 522 else { 523 if (route->nbr->flags & F_RDE_NBR_SUMMARY) 524 kr.priority = rdeconf->fib_priority_summary; 525 else 526 kr.priority = rdeconf->fib_priority_internal; 527 } 528 529 rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr)); 530 531 route->flags |= F_EIGRP_ROUTE_INSTALLED; 532 } 533 534 void 535 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route) 536 { 537 struct eigrp *eigrp = route->nbr->eigrp; 538 struct kroute kr; 539 struct in6_addr lo6 = IN6ADDR_LOOPBACK_INIT; 540 541 log_debug("%s: %s nbr %s", __func__, log_prefix(rn), 542 log_addr(eigrp->af, &route->nbr->addr)); 543 544 memset(&kr, 0, sizeof(kr)); 545 kr.af = eigrp->af; 546 kr.prefix = rn->prefix; 547 kr.prefixlen = rn->prefixlen; 548 if (route->nbr->ei) { 549 kr.nexthop = route->nexthop; 550 kr.ifindex = route->nbr->ei->iface->ifindex; 551 } else { 552 switch (eigrp->af) { 553 case AF_INET: 554 kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK); 555 break; 556 case AF_INET6: 557 kr.nexthop.v6 = lo6; 558 break; 559 default: 560 fatalx("rde_send_delete_kroute: unknown af"); 561 break; 562 } 563 kr.flags = F_BLACKHOLE; 564 } 565 if (route->type == EIGRP_ROUTE_EXTERNAL) 566 kr.priority = rdeconf->fib_priority_external; 567 else { 568 if (route->nbr->flags & F_RDE_NBR_SUMMARY) 569 kr.priority = rdeconf->fib_priority_summary; 570 else 571 kr.priority = rdeconf->fib_priority_internal; 572 } 573 574 rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr)); 575 576 route->flags &= ~F_EIGRP_ROUTE_INSTALLED; 577 } 578 579 static struct redistribute * 580 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr) 581 { 582 struct redistribute *r; 583 uint8_t is_default = 0; 584 union eigrpd_addr addr; 585 586 /* only allow the default route via REDIST_DEFAULT */ 587 if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0) 588 is_default = 1; 589 590 SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) { 591 switch (r->type & ~REDIST_NO) { 592 case REDIST_STATIC: 593 if (is_default) 594 continue; 595 if (kr->flags & F_STATIC) 596 return (r->type & REDIST_NO ? NULL : r); 597 break; 598 case REDIST_RIP: 599 if (is_default) 600 continue; 601 if (kr->priority == RTP_RIP) 602 return (r->type & REDIST_NO ? NULL : r); 603 break; 604 case REDIST_OSPF: 605 if (is_default) 606 continue; 607 if (kr->priority == RTP_OSPF) 608 return (r->type & REDIST_NO ? NULL : r); 609 break; 610 case REDIST_CONNECTED: 611 if (is_default) 612 continue; 613 if (kr->flags & F_CONNECTED) 614 return (r->type & REDIST_NO ? NULL : r); 615 break; 616 case REDIST_ADDR: 617 if (eigrp_addrisset(r->af, &r->addr) && 618 r->prefixlen == 0) { 619 if (is_default) 620 return (r->type & REDIST_NO ? NULL : r); 621 else 622 return (0); 623 } 624 625 eigrp_applymask(kr->af, &addr, &kr->prefix, 626 r->prefixlen); 627 if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 && 628 kr->prefixlen >= r->prefixlen) 629 return (r->type & REDIST_NO ? NULL : r); 630 break; 631 case REDIST_DEFAULT: 632 if (is_default) 633 return (r->type & REDIST_NO ? NULL : r); 634 break; 635 } 636 } 637 638 return (NULL); 639 } 640 641 static void 642 rt_redist_set(struct kroute *kr, int withdraw) 643 { 644 struct eigrp *eigrp; 645 struct redistribute *r; 646 struct redist_metric *rmetric; 647 struct rinfo ri; 648 649 TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) { 650 if (eigrp->af != kr->af) 651 continue; 652 653 r = eigrp_redistribute(eigrp, kr); 654 if (r == NULL) 655 continue; 656 657 if (r->metric) 658 rmetric = r->metric; 659 else if (eigrp->dflt_metric) 660 rmetric = eigrp->dflt_metric; 661 else 662 continue; 663 664 memset(&ri, 0, sizeof(ri)); 665 ri.af = kr->af; 666 ri.type = EIGRP_ROUTE_EXTERNAL; 667 ri.prefix = kr->prefix; 668 ri.prefixlen = kr->prefixlen; 669 670 /* metric */ 671 if (withdraw) 672 ri.metric.delay = EIGRP_INFINITE_METRIC; 673 else 674 ri.metric.delay = eigrp_composite_delay(rmetric->delay); 675 ri.metric.bandwidth = 676 eigrp_composite_bandwidth(rmetric->bandwidth); 677 metric_encode_mtu(ri.metric.mtu, rmetric->mtu); 678 ri.metric.hop_count = 0; 679 ri.metric.reliability = rmetric->reliability; 680 ri.metric.load = rmetric->load; 681 ri.metric.tag = 0; 682 ri.metric.flags = 0; 683 684 /* external metric */ 685 ri.emetric.routerid = htonl(rdeconf->rtr_id.s_addr); 686 ri.emetric.as = r->emetric.as; 687 ri.emetric.tag = r->emetric.tag; 688 ri.emetric.metric = r->emetric.metric; 689 if (kr->priority == rdeconf->fib_priority_internal) 690 ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP; 691 else if (kr->priority == RTP_STATIC) 692 ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC; 693 else if (kr->priority == RTP_RIP) 694 ri.emetric.protocol = EIGRP_EXT_PROTO_RIP; 695 else if (kr->priority == RTP_OSPF) 696 ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF; 697 else 698 ri.emetric.protocol = EIGRP_EXT_PROTO_CONN; 699 ri.emetric.flags = 0; 700 701 rde_check_update(eigrp->rnbr_redist, &ri); 702 } 703 } 704 705 void 706 rt_summary_set(struct eigrp *eigrp, struct summary_addr *summary, 707 struct classic_metric *metric) 708 { 709 struct rinfo ri; 710 711 memset(&ri, 0, sizeof(ri)); 712 ri.af = eigrp->af; 713 ri.type = EIGRP_ROUTE_INTERNAL; 714 ri.prefix = summary->prefix; 715 ri.prefixlen = summary->prefixlen; 716 ri.metric = *metric; 717 718 rde_check_update(eigrp->rnbr_summary, &ri); 719 } 720 721 /* send all known routing information to new neighbor */ 722 static void 723 rt_snap(struct rde_nbr *nbr) 724 { 725 struct eigrp *eigrp = nbr->eigrp; 726 struct rt_node *rn; 727 struct rinfo ri; 728 729 RB_FOREACH(rn, rt_tree, &eigrp->topology) 730 if (rn->state == DUAL_STA_PASSIVE && 731 !rde_summary_check(nbr->ei, &rn->prefix, rn->prefixlen)) { 732 rinfo_fill_successor(rn, &ri); 733 rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE, 734 nbr->peerid, 0, &ri, sizeof(ri)); 735 } 736 737 rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0, 738 NULL, 0); 739 } 740 741 static struct ctl_rt * 742 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route) 743 { 744 static struct ctl_rt rtctl; 745 746 memset(&rtctl, 0, sizeof(rtctl)); 747 rtctl.af = route->nbr->eigrp->af; 748 rtctl.as = route->nbr->eigrp->as; 749 rtctl.prefix = rn->prefix; 750 rtctl.prefixlen = rn->prefixlen; 751 rtctl.type = route->type; 752 rtctl.nexthop = route->nexthop; 753 if (route->nbr->flags & F_RDE_NBR_REDIST) 754 strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname)); 755 else if (route->nbr->flags & F_RDE_NBR_SUMMARY) 756 strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname)); 757 else 758 memcpy(rtctl.ifname, route->nbr->ei->iface->name, 759 sizeof(rtctl.ifname)); 760 rtctl.distance = route->distance; 761 rtctl.rdistance = route->rdistance; 762 rtctl.fdistance = rn->successor.fdistance; 763 rtctl.state = rn->state; 764 /* metric */ 765 rtctl.metric.delay = eigrp_real_delay(route->metric.delay); 766 /* translate to microseconds */ 767 rtctl.metric.delay *= 10; 768 rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth); 769 rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu); 770 rtctl.metric.hop_count = route->metric.hop_count; 771 rtctl.metric.reliability = route->metric.reliability; 772 rtctl.metric.load = route->metric.load; 773 /* external metric */ 774 rtctl.emetric = route->emetric; 775 776 if (route->nbr == rn->successor.nbr) 777 rtctl.flags |= F_CTL_RT_SUCCESSOR; 778 else if (route->rdistance < rn->successor.fdistance) 779 rtctl.flags |= F_CTL_RT_FSUCCESSOR; 780 781 return (&rtctl); 782 } 783 784 static void 785 rt_dump(struct ctl_show_topology_req *treq, pid_t pid) 786 { 787 struct eigrp *eigrp; 788 struct rt_node *rn; 789 struct eigrp_route *route; 790 struct ctl_rt *rtctl; 791 int first = 1; 792 793 TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) { 794 RB_FOREACH(rn, rt_tree, &eigrp->topology) { 795 if (eigrp_addrisset(treq->af, &treq->prefix) && 796 eigrp_addrcmp(treq->af, &treq->prefix, 797 &rn->prefix)) 798 continue; 799 800 if (treq->prefixlen && 801 (treq->prefixlen != rn->prefixlen)) 802 continue; 803 804 first = 1; 805 TAILQ_FOREACH(route, &rn->routes, entry) { 806 if (treq->flags & F_CTL_ACTIVE && 807 !(rn->state & DUAL_STA_ACTIVE_ALL)) 808 continue; 809 if (!(treq->flags & F_CTL_ALLLINKS) && 810 route->rdistance >= rn->successor.fdistance) 811 continue; 812 813 rtctl = rt_to_ctl(rn, route); 814 if (first) { 815 rtctl->flags |= F_CTL_RT_FIRST; 816 first = 0; 817 } 818 rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY, 819 0, pid, rtctl, sizeof(*rtctl)); 820 } 821 } 822 } 823 } 824