1 /* $OpenBSD: rde.c,v 1.118 2024/11/21 13:38:14 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/types.h> 22 #include <sys/socket.h> 23 #include <sys/queue.h> 24 #include <netinet/in.h> 25 #include <arpa/inet.h> 26 #include <err.h> 27 #include <errno.h> 28 #include <stdlib.h> 29 #include <signal.h> 30 #include <string.h> 31 #include <pwd.h> 32 #include <unistd.h> 33 #include <event.h> 34 35 #include "ospf.h" 36 #include "ospfd.h" 37 #include "ospfe.h" 38 #include "log.h" 39 #include "rde.h" 40 41 void rde_sig_handler(int sig, short, void *); 42 __dead void rde_shutdown(void); 43 void rde_dispatch_imsg(int, short, void *); 44 void rde_dispatch_parent(int, short, void *); 45 void rde_dump_area(struct area *, int, pid_t); 46 47 void rde_send_summary(pid_t); 48 void rde_send_summary_area(struct area *, pid_t); 49 void rde_nbr_init(u_int32_t); 50 void rde_nbr_free(void); 51 struct rde_nbr *rde_nbr_find(u_int32_t); 52 struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *); 53 void rde_nbr_del(struct rde_nbr *); 54 55 void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *); 56 int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *); 57 void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *); 58 void rde_req_list_free(struct rde_nbr *); 59 60 struct iface *rde_asext_lookup(u_int32_t, int); 61 void rde_asext_get(struct kroute *); 62 void rde_asext_put(struct kroute *); 63 void rde_asext_free(void); 64 struct lsa *orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t); 65 struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int); 66 67 struct ospfd_conf *rdeconf = NULL, *nconf = NULL; 68 static struct imsgev *iev_ospfe; 69 static struct imsgev *iev_main; 70 struct rde_nbr *nbrself; 71 struct lsa_tree asext_tree; 72 73 void 74 rde_sig_handler(int sig, short event, void *arg) 75 { 76 /* 77 * signal handler rules don't apply, libevent decouples for us 78 */ 79 80 switch (sig) { 81 case SIGINT: 82 case SIGTERM: 83 rde_shutdown(); 84 /* NOTREACHED */ 85 default: 86 fatalx("unexpected signal"); 87 } 88 } 89 90 /* route decision engine */ 91 pid_t 92 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2], 93 int pipe_parent2ospfe[2]) 94 { 95 struct event ev_sigint, ev_sigterm; 96 struct timeval now; 97 struct area *area; 98 struct iface *iface; 99 struct passwd *pw; 100 pid_t pid; 101 102 switch (pid = fork()) { 103 case -1: 104 fatal("cannot fork"); 105 /* NOTREACHED */ 106 case 0: 107 break; 108 default: 109 return (pid); 110 } 111 112 /* cleanup a bit */ 113 kif_clear(); 114 115 rdeconf = xconf; 116 117 if ((pw = getpwnam(OSPFD_USER)) == NULL) 118 fatal("getpwnam"); 119 120 if (chroot(pw->pw_dir) == -1) 121 fatal("chroot"); 122 if (chdir("/") == -1) 123 fatal("chdir(\"/\")"); 124 125 setproctitle("route decision engine"); 126 /* 127 * XXX needed with fork+exec 128 * log_init(debug, LOG_DAEMON); 129 * log_setverbose(verbose); 130 */ 131 132 ospfd_process = PROC_RDE_ENGINE; 133 log_procinit(log_procnames[ospfd_process]); 134 135 if (setgroups(1, &pw->pw_gid) || 136 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 137 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 138 fatal("can't drop privileges"); 139 140 if (pledge("stdio", NULL) == -1) 141 fatal("pledge"); 142 143 event_init(); 144 rde_nbr_init(NBR_HASHSIZE); 145 lsa_init(&asext_tree); 146 147 /* setup signal handler */ 148 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 149 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 150 signal_add(&ev_sigint, NULL); 151 signal_add(&ev_sigterm, NULL); 152 signal(SIGPIPE, SIG_IGN); 153 signal(SIGHUP, SIG_IGN); 154 155 /* setup pipes */ 156 close(pipe_ospfe2rde[0]); 157 close(pipe_parent2rde[0]); 158 close(pipe_parent2ospfe[0]); 159 close(pipe_parent2ospfe[1]); 160 161 if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL || 162 (iev_main = malloc(sizeof(struct imsgev))) == NULL) 163 fatal(NULL); 164 if (imsgbuf_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]) == -1) 165 fatal(NULL); 166 iev_ospfe->handler = rde_dispatch_imsg; 167 if (imsgbuf_init(&iev_main->ibuf, pipe_parent2rde[1]) == -1) 168 fatal(NULL); 169 iev_main->handler = rde_dispatch_parent; 170 171 /* setup event handler */ 172 iev_ospfe->events = EV_READ; 173 event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events, 174 iev_ospfe->handler, iev_ospfe); 175 event_add(&iev_ospfe->ev, NULL); 176 177 iev_main->events = EV_READ; 178 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, 179 iev_main->handler, iev_main); 180 event_add(&iev_main->ev, NULL); 181 182 evtimer_set(&rdeconf->ev, spf_timer, rdeconf); 183 cand_list_init(); 184 rt_init(); 185 186 /* remove unneeded stuff from config */ 187 LIST_FOREACH(area, &rdeconf->area_list, entry) 188 LIST_FOREACH(iface, &area->iface_list, entry) 189 md_list_clr(&iface->auth_md_list); 190 191 conf_clear_redist_list(&rdeconf->redist_list); 192 193 gettimeofday(&now, NULL); 194 rdeconf->uptime = now.tv_sec; 195 196 event_dispatch(); 197 198 rde_shutdown(); 199 /* NOTREACHED */ 200 201 return (0); 202 } 203 204 __dead void 205 rde_shutdown(void) 206 { 207 struct area *a; 208 struct vertex *v, *nv; 209 210 /* close pipes */ 211 imsgbuf_clear(&iev_ospfe->ibuf); 212 close(iev_ospfe->ibuf.fd); 213 imsgbuf_clear(&iev_main->ibuf); 214 close(iev_main->ibuf.fd); 215 216 stop_spf_timer(rdeconf); 217 cand_list_clr(); 218 rt_clear(); 219 220 while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) { 221 LIST_REMOVE(a, entry); 222 area_del(a); 223 } 224 for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) { 225 nv = RB_NEXT(lsa_tree, &asext_tree, v); 226 vertex_free(v); 227 } 228 rde_asext_free(); 229 rde_nbr_free(); 230 231 free(iev_ospfe); 232 free(iev_main); 233 free(rdeconf); 234 235 log_info("route decision engine exiting"); 236 _exit(0); 237 } 238 239 int 240 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data, 241 u_int16_t datalen) 242 { 243 return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1, 244 data, datalen)); 245 } 246 247 void 248 rde_dispatch_imsg(int fd, short event, void *bula) 249 { 250 struct imsgev *iev = bula; 251 struct imsgbuf *ibuf; 252 struct imsg imsg; 253 struct in_addr aid; 254 struct ls_req_hdr req_hdr; 255 struct lsa_hdr lsa_hdr, *db_hdr; 256 struct rde_nbr rn, *nbr; 257 struct timespec tp; 258 struct lsa *lsa; 259 struct area *area; 260 struct in_addr addr; 261 struct vertex *v; 262 char *buf; 263 ssize_t n; 264 time_t now; 265 int r, state, self, error, shut = 0, verbose; 266 u_int16_t l; 267 268 ibuf = &iev->ibuf; 269 270 if (event & EV_READ) { 271 if ((n = imsgbuf_read(ibuf)) == -1) 272 fatal("imsgbuf_read error"); 273 if (n == 0) /* connection closed */ 274 shut = 1; 275 } 276 if (event & EV_WRITE) { 277 if (imsgbuf_write(ibuf) == -1) { 278 if (errno == EPIPE) /* connection closed */ 279 shut = 1; 280 else 281 fatal("imsgbuf_write"); 282 } 283 } 284 285 clock_gettime(CLOCK_MONOTONIC, &tp); 286 now = tp.tv_sec; 287 288 for (;;) { 289 if ((n = imsg_get(ibuf, &imsg)) == -1) 290 fatal("rde_dispatch_imsg: imsg_get error"); 291 if (n == 0) 292 break; 293 294 switch (imsg.hdr.type) { 295 case IMSG_NEIGHBOR_UP: 296 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) 297 fatalx("invalid size of OE request"); 298 memcpy(&rn, imsg.data, sizeof(rn)); 299 300 if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL) 301 fatalx("rde_dispatch_imsg: " 302 "neighbor already exists"); 303 break; 304 case IMSG_NEIGHBOR_DOWN: 305 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid)); 306 break; 307 case IMSG_NEIGHBOR_ADDR: 308 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(addr)) 309 fatalx("invalid size of OE request"); 310 memcpy(&addr, imsg.data, sizeof(addr)); 311 312 nbr = rde_nbr_find(imsg.hdr.peerid); 313 if (nbr == NULL) 314 break; 315 316 nbr->addr.s_addr = addr.s_addr; 317 break; 318 case IMSG_NEIGHBOR_CHANGE: 319 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) 320 fatalx("invalid size of OE request"); 321 memcpy(&state, imsg.data, sizeof(state)); 322 323 nbr = rde_nbr_find(imsg.hdr.peerid); 324 if (nbr == NULL) 325 break; 326 327 nbr->state = state; 328 if (nbr->state & NBR_STA_FULL) 329 rde_req_list_free(nbr); 330 break; 331 case IMSG_NEIGHBOR_CAPA: 332 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t)) 333 fatalx("invalid size of OE request"); 334 nbr = rde_nbr_find(imsg.hdr.peerid); 335 if (nbr == NULL) 336 break; 337 nbr->capa_options = *(u_int8_t *)imsg.data; 338 break; 339 case IMSG_AREA_CHANGE: 340 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) 341 fatalx("invalid size of OE request"); 342 343 LIST_FOREACH(area, &rdeconf->area_list, entry) { 344 if (area->id.s_addr == imsg.hdr.peerid) 345 break; 346 } 347 if (area == NULL) 348 break; 349 memcpy(&state, imsg.data, sizeof(state)); 350 area->active = state; 351 break; 352 case IMSG_DB_SNAPSHOT: 353 nbr = rde_nbr_find(imsg.hdr.peerid); 354 if (nbr == NULL) 355 break; 356 357 lsa_snap(nbr); 358 359 imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid, 360 0, -1, NULL, 0); 361 break; 362 case IMSG_DD: 363 nbr = rde_nbr_find(imsg.hdr.peerid); 364 if (nbr == NULL) 365 break; 366 367 buf = imsg.data; 368 error = 0; 369 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 370 l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) { 371 memcpy(&lsa_hdr, buf, sizeof(lsa_hdr)); 372 buf += sizeof(lsa_hdr); 373 374 if (lsa_hdr.type == LSA_TYPE_EXTERNAL && 375 nbr->area->stub) { 376 error = 1; 377 break; 378 } 379 v = lsa_find(nbr->iface, lsa_hdr.type, 380 lsa_hdr.ls_id, lsa_hdr.adv_rtr); 381 if (v == NULL) 382 db_hdr = NULL; 383 else 384 db_hdr = &v->lsa->hdr; 385 386 if (lsa_newer(&lsa_hdr, db_hdr) > 0) { 387 /* 388 * only request LSAs that are 389 * newer or missing 390 */ 391 rde_req_list_add(nbr, &lsa_hdr); 392 imsg_compose_event(iev_ospfe, IMSG_DD, 393 imsg.hdr.peerid, 0, -1, &lsa_hdr, 394 sizeof(lsa_hdr)); 395 } 396 } 397 if (l != 0 && !error) 398 log_warnx("rde_dispatch_imsg: peerid %u, " 399 "trailing garbage in Database Description " 400 "packet", imsg.hdr.peerid); 401 402 if (!error) 403 imsg_compose_event(iev_ospfe, IMSG_DD_END, 404 imsg.hdr.peerid, 0, -1, NULL, 0); 405 else 406 imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA, 407 imsg.hdr.peerid, 0, -1, NULL, 0); 408 break; 409 case IMSG_LS_REQ: 410 nbr = rde_nbr_find(imsg.hdr.peerid); 411 if (nbr == NULL) 412 break; 413 414 buf = imsg.data; 415 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 416 l >= sizeof(req_hdr); l -= sizeof(req_hdr)) { 417 memcpy(&req_hdr, buf, sizeof(req_hdr)); 418 buf += sizeof(req_hdr); 419 420 if ((v = lsa_find(nbr->iface, 421 ntohl(req_hdr.type), req_hdr.ls_id, 422 req_hdr.adv_rtr)) == NULL) { 423 log_debug("rde_dispatch_imsg: " 424 "requested LSA not found"); 425 imsg_compose_event(iev_ospfe, 426 IMSG_LS_BADREQ, imsg.hdr.peerid, 427 0, -1, NULL, 0); 428 continue; 429 } 430 imsg_compose_event(iev_ospfe, IMSG_LS_UPD, 431 imsg.hdr.peerid, 0, -1, v->lsa, 432 ntohs(v->lsa->hdr.len)); 433 } 434 if (l != 0) 435 log_warnx("rde_dispatch_imsg: peerid %u, " 436 "trailing garbage in LS Request " 437 "packet", imsg.hdr.peerid); 438 break; 439 case IMSG_LS_UPD: 440 nbr = rde_nbr_find(imsg.hdr.peerid); 441 if (nbr == NULL) 442 break; 443 444 lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE); 445 if (lsa == NULL) 446 fatal(NULL); 447 memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 448 449 if (!lsa_check(nbr, lsa, 450 imsg.hdr.len - IMSG_HEADER_SIZE)) { 451 free(lsa); 452 break; 453 } 454 455 v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id, 456 lsa->hdr.adv_rtr); 457 if (v == NULL) 458 db_hdr = NULL; 459 else 460 db_hdr = &v->lsa->hdr; 461 462 if (nbr->self) { 463 lsa_merge(nbr, lsa, v); 464 /* lsa_merge frees the right lsa */ 465 break; 466 } 467 468 r = lsa_newer(&lsa->hdr, db_hdr); 469 if (r > 0) { 470 /* new LSA newer than DB */ 471 if (v && v->flooded && 472 v->changed + MIN_LS_ARRIVAL >= now) { 473 free(lsa); 474 break; 475 } 476 477 rde_req_list_del(nbr, &lsa->hdr); 478 479 if (!(self = lsa_self(nbr, lsa, v))) 480 if (lsa_add(nbr, lsa)) 481 /* delayed lsa */ 482 break; 483 484 /* flood and perhaps ack LSA */ 485 imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD, 486 imsg.hdr.peerid, 0, -1, lsa, 487 ntohs(lsa->hdr.len)); 488 489 /* reflood self originated LSA */ 490 if (self && v) 491 imsg_compose_event(iev_ospfe, 492 IMSG_LS_FLOOD, v->peerid, 0, -1, 493 v->lsa, ntohs(v->lsa->hdr.len)); 494 /* new LSA was not added so free it */ 495 if (self) 496 free(lsa); 497 } else if (r < 0) { 498 /* 499 * point 6 of "The Flooding Procedure" 500 * We are violating the RFC here because 501 * it does not make sense to reset a session 502 * because an equal LSA is already in the table. 503 * Only if the LSA sent is older than the one 504 * in the table we should reset the session. 505 */ 506 if (rde_req_list_exists(nbr, &lsa->hdr)) { 507 imsg_compose_event(iev_ospfe, 508 IMSG_LS_BADREQ, imsg.hdr.peerid, 509 0, -1, NULL, 0); 510 free(lsa); 511 break; 512 } 513 514 /* lsa no longer needed */ 515 free(lsa); 516 517 /* new LSA older than DB */ 518 if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM && 519 ntohs(db_hdr->age) == MAX_AGE) 520 /* seq-num wrap */ 521 break; 522 523 if (v->changed + MIN_LS_ARRIVAL >= now) 524 break; 525 526 /* directly send current LSA, no ack */ 527 imsg_compose_event(iev_ospfe, IMSG_LS_UPD, 528 imsg.hdr.peerid, 0, -1, v->lsa, 529 ntohs(v->lsa->hdr.len)); 530 } else { 531 /* LSA equal send direct ack */ 532 imsg_compose_event(iev_ospfe, IMSG_LS_ACK, 533 imsg.hdr.peerid, 0, -1, &lsa->hdr, 534 sizeof(lsa->hdr)); 535 free(lsa); 536 } 537 break; 538 case IMSG_LS_MAXAGE: 539 nbr = rde_nbr_find(imsg.hdr.peerid); 540 if (nbr == NULL) 541 break; 542 543 if (imsg.hdr.len != IMSG_HEADER_SIZE + 544 sizeof(struct lsa_hdr)) 545 fatalx("invalid size of OE request"); 546 memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr)); 547 548 if (rde_nbr_loading(nbr->area)) 549 break; 550 551 v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id, 552 lsa_hdr.adv_rtr); 553 if (v == NULL) 554 db_hdr = NULL; 555 else 556 db_hdr = &v->lsa->hdr; 557 558 /* 559 * only delete LSA if the one in the db is not newer 560 */ 561 if (lsa_newer(db_hdr, &lsa_hdr) <= 0) 562 lsa_del(nbr, &lsa_hdr); 563 break; 564 case IMSG_CTL_SHOW_DATABASE: 565 case IMSG_CTL_SHOW_DB_EXT: 566 case IMSG_CTL_SHOW_DB_NET: 567 case IMSG_CTL_SHOW_DB_RTR: 568 case IMSG_CTL_SHOW_DB_SELF: 569 case IMSG_CTL_SHOW_DB_SUM: 570 case IMSG_CTL_SHOW_DB_ASBR: 571 case IMSG_CTL_SHOW_DB_OPAQ: 572 if (imsg.hdr.len != IMSG_HEADER_SIZE && 573 imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) { 574 log_warnx("rde_dispatch_imsg: wrong imsg len"); 575 break; 576 } 577 if (imsg.hdr.len == IMSG_HEADER_SIZE) { 578 LIST_FOREACH(area, &rdeconf->area_list, entry) { 579 rde_dump_area(area, imsg.hdr.type, 580 imsg.hdr.pid); 581 } 582 lsa_dump(&asext_tree, imsg.hdr.type, 583 imsg.hdr.pid); 584 } else { 585 memcpy(&aid, imsg.data, sizeof(aid)); 586 if ((area = area_find(rdeconf, aid)) != NULL) { 587 rde_dump_area(area, imsg.hdr.type, 588 imsg.hdr.pid); 589 if (!area->stub) 590 lsa_dump(&asext_tree, 591 imsg.hdr.type, 592 imsg.hdr.pid); 593 } 594 } 595 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 596 imsg.hdr.pid, -1, NULL, 0); 597 break; 598 case IMSG_CTL_SHOW_RIB: 599 LIST_FOREACH(area, &rdeconf->area_list, entry) { 600 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 601 0, imsg.hdr.pid, -1, area, sizeof(*area)); 602 603 rt_dump(area->id, imsg.hdr.pid, RIB_RTR); 604 rt_dump(area->id, imsg.hdr.pid, RIB_NET); 605 } 606 aid.s_addr = 0; 607 rt_dump(aid, imsg.hdr.pid, RIB_EXT); 608 609 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 610 imsg.hdr.pid, -1, NULL, 0); 611 break; 612 case IMSG_CTL_SHOW_SUM: 613 rde_send_summary(imsg.hdr.pid); 614 LIST_FOREACH(area, &rdeconf->area_list, entry) 615 rde_send_summary_area(area, imsg.hdr.pid); 616 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 617 imsg.hdr.pid, -1, NULL, 0); 618 break; 619 case IMSG_CTL_LOG_VERBOSE: 620 /* already checked by ospfe */ 621 memcpy(&verbose, imsg.data, sizeof(verbose)); 622 log_setverbose(verbose); 623 break; 624 default: 625 log_debug("rde_dispatch_imsg: unexpected imsg %d", 626 imsg.hdr.type); 627 break; 628 } 629 imsg_free(&imsg); 630 } 631 if (!shut) 632 imsg_event_add(iev); 633 else { 634 /* this pipe is dead, so remove the event handler */ 635 event_del(&iev->ev); 636 event_loopexit(NULL); 637 } 638 } 639 640 void 641 rde_dispatch_parent(int fd, short event, void *bula) 642 { 643 static struct area *narea; 644 struct iface *niface; 645 struct imsg imsg; 646 struct kroute rr; 647 struct imsgev *iev = bula; 648 struct imsgbuf *ibuf; 649 struct redistribute *nred; 650 ssize_t n; 651 int shut = 0; 652 653 ibuf = &iev->ibuf; 654 655 if (event & EV_READ) { 656 if ((n = imsgbuf_read(ibuf)) == -1) 657 fatal("imsgbuf_read error"); 658 if (n == 0) /* connection closed */ 659 shut = 1; 660 } 661 if (event & EV_WRITE) { 662 if (imsgbuf_write(ibuf) == -1) { 663 if (errno == EPIPE) /* connection closed */ 664 shut = 1; 665 else 666 fatal("imsgbuf_write"); 667 } 668 } 669 670 for (;;) { 671 if ((n = imsg_get(ibuf, &imsg)) == -1) 672 fatal("rde_dispatch_parent: imsg_get error"); 673 if (n == 0) 674 break; 675 676 switch (imsg.hdr.type) { 677 case IMSG_NETWORK_ADD: 678 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 679 log_warnx("rde_dispatch_parent: " 680 "wrong imsg len"); 681 break; 682 } 683 memcpy(&rr, imsg.data, sizeof(rr)); 684 rde_asext_get(&rr); 685 break; 686 case IMSG_NETWORK_DEL: 687 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 688 log_warnx("rde_dispatch_parent: " 689 "wrong imsg len"); 690 break; 691 } 692 memcpy(&rr, imsg.data, sizeof(rr)); 693 rde_asext_put(&rr); 694 break; 695 case IMSG_RECONF_CONF: 696 if ((nconf = malloc(sizeof(struct ospfd_conf))) == 697 NULL) 698 fatal(NULL); 699 memcpy(nconf, imsg.data, sizeof(struct ospfd_conf)); 700 701 LIST_INIT(&nconf->area_list); 702 LIST_INIT(&nconf->cand_list); 703 break; 704 case IMSG_RECONF_AREA: 705 if ((narea = area_new()) == NULL) 706 fatal(NULL); 707 memcpy(narea, imsg.data, sizeof(struct area)); 708 709 LIST_INIT(&narea->iface_list); 710 LIST_INIT(&narea->nbr_list); 711 RB_INIT(&narea->lsa_tree); 712 SIMPLEQ_INIT(&narea->redist_list); 713 714 LIST_INSERT_HEAD(&nconf->area_list, narea, entry); 715 break; 716 case IMSG_RECONF_REDIST: 717 if ((nred= malloc(sizeof(struct redistribute))) == NULL) 718 fatal(NULL); 719 memcpy(nred, imsg.data, sizeof(struct redistribute)); 720 721 SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry); 722 break; 723 case IMSG_RECONF_IFACE: 724 if ((niface = malloc(sizeof(struct iface))) == NULL) 725 fatal(NULL); 726 memcpy(niface, imsg.data, sizeof(struct iface)); 727 728 LIST_INIT(&niface->nbr_list); 729 TAILQ_INIT(&niface->ls_ack_list); 730 TAILQ_INIT(&niface->auth_md_list); 731 RB_INIT(&niface->lsa_tree); 732 733 niface->area = narea; 734 LIST_INSERT_HEAD(&narea->iface_list, niface, entry); 735 736 break; 737 case IMSG_RECONF_END: 738 merge_config(rdeconf, nconf); 739 nconf = NULL; 740 break; 741 default: 742 log_debug("rde_dispatch_parent: unexpected imsg %d", 743 imsg.hdr.type); 744 break; 745 } 746 imsg_free(&imsg); 747 } 748 if (!shut) 749 imsg_event_add(iev); 750 else { 751 /* this pipe is dead, so remove the event handler */ 752 event_del(&iev->ev); 753 event_loopexit(NULL); 754 } 755 } 756 757 void 758 rde_dump_area(struct area *area, int imsg_type, pid_t pid) 759 { 760 struct iface *iface; 761 762 /* dump header */ 763 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1, 764 area, sizeof(*area)); 765 766 /* dump link local lsa */ 767 LIST_FOREACH(iface, &area->iface_list, entry) { 768 imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE, 769 0, pid, -1, iface, sizeof(*iface)); 770 lsa_dump(&iface->lsa_tree, imsg_type, pid); 771 } 772 773 /* dump area lsa */ 774 lsa_dump(&area->lsa_tree, imsg_type, pid); 775 } 776 777 u_int32_t 778 rde_router_id(void) 779 { 780 return (rdeconf->rtr_id.s_addr); 781 } 782 783 struct area * 784 rde_backbone_area(void) 785 { 786 struct in_addr id; 787 788 id.s_addr = INADDR_ANY; 789 790 return (area_find(rdeconf, id)); 791 } 792 793 void 794 rde_send_change_kroute(struct rt_node *r) 795 { 796 int krcount = 0; 797 struct kroute kr; 798 struct rt_nexthop *rn; 799 struct ibuf *wbuf; 800 801 if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0, 802 sizeof(kr))) == NULL) { 803 return; 804 } 805 806 TAILQ_FOREACH(rn, &r->nexthop, entry) { 807 if (rn->invalid) 808 continue; 809 if (rn->connected) 810 /* skip self-originated routes */ 811 continue; 812 krcount++; 813 814 bzero(&kr, sizeof(kr)); 815 kr.prefix.s_addr = r->prefix.s_addr; 816 kr.nexthop.s_addr = rn->nexthop.s_addr; 817 kr.prefixlen = r->prefixlen; 818 kr.ext_tag = r->ext_tag; 819 imsg_add(wbuf, &kr, sizeof(kr)); 820 } 821 if (krcount == 0) { 822 /* no valid nexthop or self originated, so remove */ 823 ibuf_free(wbuf); 824 rde_send_delete_kroute(r); 825 return; 826 } 827 imsg_close(&iev_main->ibuf, wbuf); 828 imsg_event_add(iev_main); 829 } 830 831 void 832 rde_send_delete_kroute(struct rt_node *r) 833 { 834 struct kroute kr; 835 836 bzero(&kr, sizeof(kr)); 837 kr.prefix.s_addr = r->prefix.s_addr; 838 kr.prefixlen = r->prefixlen; 839 840 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, 841 &kr, sizeof(kr)); 842 } 843 844 void 845 rde_send_summary(pid_t pid) 846 { 847 static struct ctl_sum sumctl; 848 struct timeval now; 849 struct area *area; 850 struct vertex *v; 851 852 bzero(&sumctl, sizeof(struct ctl_sum)); 853 854 sumctl.rtr_id.s_addr = rde_router_id(); 855 sumctl.spf_delay = rdeconf->spf_delay; 856 sumctl.spf_hold_time = rdeconf->spf_hold_time; 857 858 LIST_FOREACH(area, &rdeconf->area_list, entry) 859 sumctl.num_area++; 860 861 RB_FOREACH(v, lsa_tree, &asext_tree) { 862 sumctl.num_ext_lsa++; 863 sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum); 864 } 865 866 gettimeofday(&now, NULL); 867 if (rdeconf->uptime < now.tv_sec) 868 sumctl.uptime = now.tv_sec - rdeconf->uptime; 869 else 870 sumctl.uptime = 0; 871 872 sumctl.rfc1583compat = rdeconf->rfc1583compat; 873 874 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl, 875 sizeof(sumctl)); 876 } 877 878 void 879 rde_send_summary_area(struct area *area, pid_t pid) 880 { 881 static struct ctl_sum_area sumareactl; 882 struct iface *iface; 883 struct rde_nbr *nbr; 884 struct lsa_tree *tree = &area->lsa_tree; 885 struct vertex *v; 886 887 bzero(&sumareactl, sizeof(struct ctl_sum_area)); 888 889 sumareactl.area.s_addr = area->id.s_addr; 890 sumareactl.num_spf_calc = area->num_spf_calc; 891 892 LIST_FOREACH(iface, &area->iface_list, entry) 893 sumareactl.num_iface++; 894 895 LIST_FOREACH(nbr, &area->nbr_list, entry) 896 if (nbr->state == NBR_STA_FULL && !nbr->self) 897 sumareactl.num_adj_nbr++; 898 899 RB_FOREACH(v, lsa_tree, tree) { 900 sumareactl.num_lsa++; 901 sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum); 902 } 903 904 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl, 905 sizeof(sumareactl)); 906 } 907 908 LIST_HEAD(rde_nbr_head, rde_nbr); 909 910 struct nbr_table { 911 struct rde_nbr_head *hashtbl; 912 u_int32_t hashmask; 913 } rdenbrtable; 914 915 #define RDE_NBR_HASH(x) \ 916 &rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] 917 918 void 919 rde_nbr_init(u_int32_t hashsize) 920 { 921 struct rde_nbr_head *head; 922 u_int32_t hs, i; 923 924 for (hs = 1; hs < hashsize; hs <<= 1) 925 ; 926 rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head)); 927 if (rdenbrtable.hashtbl == NULL) 928 fatal("rde_nbr_init"); 929 930 for (i = 0; i < hs; i++) 931 LIST_INIT(&rdenbrtable.hashtbl[i]); 932 933 rdenbrtable.hashmask = hs - 1; 934 935 if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL) 936 fatal("rde_nbr_init"); 937 938 nbrself->id.s_addr = rde_router_id(); 939 nbrself->peerid = NBR_IDSELF; 940 nbrself->state = NBR_STA_DOWN; 941 nbrself->self = 1; 942 head = RDE_NBR_HASH(NBR_IDSELF); 943 LIST_INSERT_HEAD(head, nbrself, hash); 944 } 945 946 void 947 rde_nbr_free(void) 948 { 949 free(nbrself); 950 free(rdenbrtable.hashtbl); 951 } 952 953 struct rde_nbr * 954 rde_nbr_find(u_int32_t peerid) 955 { 956 struct rde_nbr_head *head; 957 struct rde_nbr *nbr; 958 959 head = RDE_NBR_HASH(peerid); 960 961 LIST_FOREACH(nbr, head, hash) { 962 if (nbr->peerid == peerid) 963 return (nbr); 964 } 965 966 return (NULL); 967 } 968 969 struct rde_nbr * 970 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new) 971 { 972 struct rde_nbr_head *head; 973 struct rde_nbr *nbr; 974 struct area *area; 975 struct iface *iface; 976 977 if (rde_nbr_find(peerid)) 978 return (NULL); 979 if ((area = area_find(rdeconf, new->area_id)) == NULL) 980 fatalx("rde_nbr_new: unknown area"); 981 982 LIST_FOREACH(iface, &area->iface_list, entry) { 983 if (iface->ifindex == new->ifindex) 984 break; 985 } 986 if (iface == NULL) 987 fatalx("rde_nbr_new: unknown interface"); 988 989 if ((nbr = calloc(1, sizeof(*nbr))) == NULL) 990 fatal("rde_nbr_new"); 991 992 memcpy(nbr, new, sizeof(*nbr)); 993 nbr->peerid = peerid; 994 nbr->area = area; 995 nbr->iface = iface; 996 997 TAILQ_INIT(&nbr->req_list); 998 999 head = RDE_NBR_HASH(peerid); 1000 LIST_INSERT_HEAD(head, nbr, hash); 1001 LIST_INSERT_HEAD(&area->nbr_list, nbr, entry); 1002 1003 return (nbr); 1004 } 1005 1006 void 1007 rde_nbr_iface_del(struct iface *iface) 1008 { 1009 struct rde_nbr_head *head; 1010 struct rde_nbr *nbr, *xnbr; 1011 u_int32_t i; 1012 1013 for (i = 0; i <= rdenbrtable.hashmask; i++) { 1014 head = &rdenbrtable.hashtbl[i]; 1015 LIST_FOREACH_SAFE(nbr, head, hash, xnbr) { 1016 if (nbr->iface == iface) 1017 rde_nbr_del(nbr); 1018 } 1019 } 1020 } 1021 1022 void 1023 rde_nbr_del(struct rde_nbr *nbr) 1024 { 1025 if (nbr == NULL) 1026 return; 1027 1028 rde_req_list_free(nbr); 1029 1030 LIST_REMOVE(nbr, entry); 1031 LIST_REMOVE(nbr, hash); 1032 1033 free(nbr); 1034 } 1035 1036 int 1037 rde_nbr_loading(struct area *area) 1038 { 1039 struct rde_nbr *nbr; 1040 int checkall = 0; 1041 1042 if (area == NULL) { 1043 area = LIST_FIRST(&rdeconf->area_list); 1044 checkall = 1; 1045 } 1046 1047 while (area != NULL) { 1048 LIST_FOREACH(nbr, &area->nbr_list, entry) { 1049 if (nbr->self) 1050 continue; 1051 if (nbr->state & NBR_STA_XCHNG || 1052 nbr->state & NBR_STA_LOAD) 1053 return (1); 1054 } 1055 if (!checkall) 1056 break; 1057 area = LIST_NEXT(area, entry); 1058 } 1059 1060 return (0); 1061 } 1062 1063 struct rde_nbr * 1064 rde_nbr_self(struct area *area) 1065 { 1066 struct rde_nbr *nbr; 1067 1068 LIST_FOREACH(nbr, &area->nbr_list, entry) 1069 if (nbr->self) 1070 return (nbr); 1071 1072 /* this may not happen */ 1073 fatalx("rde_nbr_self: area without self"); 1074 return (NULL); 1075 } 1076 1077 /* 1078 * LSA req list 1079 */ 1080 void 1081 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa) 1082 { 1083 struct rde_req_entry *le; 1084 1085 if ((le = calloc(1, sizeof(*le))) == NULL) 1086 fatal("rde_req_list_add"); 1087 1088 TAILQ_INSERT_TAIL(&nbr->req_list, le, entry); 1089 le->type = lsa->type; 1090 le->ls_id = lsa->ls_id; 1091 le->adv_rtr = lsa->adv_rtr; 1092 } 1093 1094 int 1095 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1096 { 1097 struct rde_req_entry *le; 1098 1099 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1100 if ((lsa_hdr->type == le->type) && 1101 (lsa_hdr->ls_id == le->ls_id) && 1102 (lsa_hdr->adv_rtr == le->adv_rtr)) 1103 return (1); 1104 } 1105 return (0); 1106 } 1107 1108 void 1109 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1110 { 1111 struct rde_req_entry *le; 1112 1113 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1114 if ((lsa_hdr->type == le->type) && 1115 (lsa_hdr->ls_id == le->ls_id) && 1116 (lsa_hdr->adv_rtr == le->adv_rtr)) { 1117 TAILQ_REMOVE(&nbr->req_list, le, entry); 1118 free(le); 1119 return; 1120 } 1121 } 1122 } 1123 1124 void 1125 rde_req_list_free(struct rde_nbr *nbr) 1126 { 1127 struct rde_req_entry *le; 1128 1129 while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) { 1130 TAILQ_REMOVE(&nbr->req_list, le, entry); 1131 free(le); 1132 } 1133 } 1134 1135 /* 1136 * as-external LSA handling 1137 */ 1138 struct asext_node { 1139 RB_ENTRY(asext_node) entry; 1140 struct kroute r; 1141 u_int32_t ls_id; 1142 }; 1143 1144 static __inline int asext_compare(struct asext_node *, struct asext_node *); 1145 struct asext_node *asext_find(u_int32_t, u_int8_t); 1146 1147 RB_HEAD(asext_tree, asext_node) ast; 1148 RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare) 1149 RB_GENERATE(asext_tree, asext_node, entry, asext_compare) 1150 1151 static __inline int 1152 asext_compare(struct asext_node *a, struct asext_node *b) 1153 { 1154 if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr)) 1155 return (-1); 1156 if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr)) 1157 return (1); 1158 if (a->r.prefixlen < b->r.prefixlen) 1159 return (-1); 1160 if (a->r.prefixlen > b->r.prefixlen) 1161 return (1); 1162 return (0); 1163 } 1164 1165 struct asext_node * 1166 asext_find(u_int32_t addr, u_int8_t prefixlen) 1167 { 1168 struct asext_node a; 1169 1170 a.r.prefix.s_addr = addr; 1171 a.r.prefixlen = prefixlen; 1172 1173 return (RB_FIND(asext_tree, &ast, &a)); 1174 } 1175 1176 struct iface * 1177 rde_asext_lookup(u_int32_t prefix, int plen) 1178 { 1179 struct area *area; 1180 struct iface *iface; 1181 1182 LIST_FOREACH(area, &rdeconf->area_list, entry) { 1183 LIST_FOREACH(iface, &area->iface_list, entry) { 1184 if ((iface->addr.s_addr & iface->mask.s_addr) == 1185 (prefix & iface->mask.s_addr) && (plen == -1 || 1186 iface->mask.s_addr == prefixlen2mask(plen))) 1187 return (iface); 1188 } 1189 } 1190 return (NULL); 1191 } 1192 1193 void 1194 rde_asext_get(struct kroute *kr) 1195 { 1196 struct asext_node *an, *oan; 1197 struct vertex *v; 1198 struct lsa *lsa; 1199 u_int32_t mask; 1200 1201 if (rde_asext_lookup(kr->prefix.s_addr, kr->prefixlen)) { 1202 /* already announced as (stub) net LSA */ 1203 log_debug("rde_asext_get: %s/%d is net LSA", 1204 inet_ntoa(kr->prefix), kr->prefixlen); 1205 return; 1206 } 1207 1208 an = asext_find(kr->prefix.s_addr, kr->prefixlen); 1209 if (an == NULL) { 1210 if ((an = calloc(1, sizeof(*an))) == NULL) 1211 fatal("rde_asext_get"); 1212 bcopy(kr, &an->r, sizeof(*kr)); 1213 an->ls_id = kr->prefix.s_addr; 1214 RB_INSERT(asext_tree, &ast, an); 1215 } else { 1216 /* the bcopy does not change the lookup key so it is save */ 1217 bcopy(kr, &an->r, sizeof(*kr)); 1218 } 1219 1220 /* 1221 * ls_id must be unique, for overlapping routes this may 1222 * not be true. In this case a unique ls_id needs to be found. 1223 * The algorithm will change the ls_id of the less specific 1224 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24 1225 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16 1226 * will change the ls_id to 10.0.255.255 and see if that is unique. 1227 */ 1228 oan = an; 1229 mask = prefixlen2mask(oan->r.prefixlen); 1230 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, 1231 rdeconf->rtr_id.s_addr); 1232 while (v && v->lsa->data.asext.mask != mask) { 1233 /* conflict needs to be resolved. change less specific lsa */ 1234 if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) { 1235 /* lsa to insert is more specific, fix other lsa */ 1236 mask = v->lsa->data.asext.mask; 1237 oan = asext_find(v->lsa->hdr.ls_id & mask, 1238 mask2prefixlen(mask)); 1239 if (oan == NULL) 1240 fatalx("as-ext LSA DB corrupted"); 1241 } 1242 /* oan is less specific and needs new ls_id */ 1243 if (oan->ls_id == oan->r.prefix.s_addr) 1244 oan->ls_id |= ~mask; 1245 else { 1246 u_int32_t tmp = ntohl(oan->ls_id); 1247 oan->ls_id = htonl(tmp - 1); 1248 if (oan->ls_id == oan->r.prefix.s_addr) { 1249 log_warnx("prefix %s/%d can not be " 1250 "redistributed, no unique ls_id found.", 1251 inet_ntoa(kr->prefix), kr->prefixlen); 1252 RB_REMOVE(asext_tree, &ast, an); 1253 free(an); 1254 return; 1255 } 1256 } 1257 mask = prefixlen2mask(oan->r.prefixlen); 1258 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, 1259 rdeconf->rtr_id.s_addr); 1260 } 1261 1262 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id, 1263 rdeconf->rtr_id.s_addr); 1264 lsa = orig_asext_lsa(kr, an->ls_id, DEFAULT_AGE); 1265 lsa_merge(nbrself, lsa, v); 1266 1267 if (oan != an) { 1268 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, 1269 rdeconf->rtr_id.s_addr); 1270 lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE); 1271 lsa_merge(nbrself, lsa, v); 1272 } 1273 } 1274 1275 void 1276 rde_asext_put(struct kroute *kr) 1277 { 1278 struct asext_node *an; 1279 struct vertex *v; 1280 struct lsa *lsa; 1281 1282 /* 1283 * just try to remove the LSA. If the prefix is announced as 1284 * stub net LSA asext_find() will fail and nothing will happen. 1285 */ 1286 an = asext_find(kr->prefix.s_addr, kr->prefixlen); 1287 if (an == NULL) { 1288 log_debug("rde_asext_put: NO SUCH LSA %s/%d", 1289 inet_ntoa(kr->prefix), kr->prefixlen); 1290 return; 1291 } 1292 1293 /* inherit metric and ext_tag from the current LSA, 1294 * some routers don't like to get withdraws that are 1295 * different from what they have in their table. 1296 */ 1297 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id, 1298 rdeconf->rtr_id.s_addr); 1299 if (v != NULL) { 1300 kr->metric = ntohl(v->lsa->data.asext.metric); 1301 kr->ext_tag = ntohl(v->lsa->data.asext.ext_tag); 1302 } 1303 1304 /* remove by reflooding with MAX_AGE */ 1305 lsa = orig_asext_lsa(kr, an->ls_id, MAX_AGE); 1306 lsa_merge(nbrself, lsa, v); 1307 1308 RB_REMOVE(asext_tree, &ast, an); 1309 free(an); 1310 } 1311 1312 void 1313 rde_asext_free(void) 1314 { 1315 struct asext_node *an, *nan; 1316 1317 for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) { 1318 nan = RB_NEXT(asext_tree, &ast, an); 1319 RB_REMOVE(asext_tree, &ast, an); 1320 free(an); 1321 } 1322 } 1323 1324 struct lsa * 1325 orig_asext_lsa(struct kroute *kr, u_int32_t ls_id, u_int16_t age) 1326 { 1327 struct lsa *lsa; 1328 struct iface *iface; 1329 u_int16_t len; 1330 1331 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext); 1332 if ((lsa = calloc(1, len)) == NULL) 1333 fatal("orig_asext_lsa"); 1334 1335 log_debug("orig_asext_lsa: %s/%d age %d", 1336 inet_ntoa(kr->prefix), kr->prefixlen, age); 1337 1338 /* LSA header */ 1339 lsa->hdr.age = htons(age); 1340 lsa->hdr.opts = area_ospf_options(NULL); 1341 lsa->hdr.type = LSA_TYPE_EXTERNAL; 1342 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1343 /* update of seqnum is done by lsa_merge */ 1344 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1345 lsa->hdr.len = htons(len); 1346 1347 /* prefix and mask */ 1348 lsa->hdr.ls_id = ls_id; 1349 lsa->data.asext.mask = prefixlen2mask(kr->prefixlen); 1350 1351 /* 1352 * nexthop -- on connected routes we are the nexthop, 1353 * in other cases we may announce the true nexthop if the 1354 * nexthop is reachable via an OSPF enabled interface but only 1355 * broadcast & NBMA interfaces are considered in that case. 1356 * It does not make sense to announce the nexthop of a point-to-point 1357 * link since the traffic has to go through this box anyway. 1358 * Some implementations actually check that there are multiple 1359 * neighbors on the particular segment, we skip that check. 1360 */ 1361 iface = rde_asext_lookup(kr->nexthop.s_addr, -1); 1362 if (kr->flags & F_CONNECTED) 1363 lsa->data.asext.fw_addr = 0; 1364 else if (iface && (iface->type == IF_TYPE_BROADCAST || 1365 iface->type == IF_TYPE_NBMA)) 1366 lsa->data.asext.fw_addr = kr->nexthop.s_addr; 1367 else 1368 lsa->data.asext.fw_addr = 0; 1369 1370 lsa->data.asext.metric = htonl(kr->metric); 1371 lsa->data.asext.ext_tag = htonl(kr->ext_tag); 1372 1373 lsa->hdr.ls_chksum = 0; 1374 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1375 1376 return (lsa); 1377 } 1378 1379 /* 1380 * summary LSA stuff 1381 */ 1382 void 1383 rde_summary_update(struct rt_node *rte, struct area *area) 1384 { 1385 struct rt_nexthop *rn; 1386 struct rt_node *nr; 1387 struct vertex *v = NULL; 1388 struct lsa *lsa; 1389 u_int8_t type = 0; 1390 1391 /* first check if we actually need to announce this route */ 1392 if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E)) 1393 return; 1394 /* route is invalid, lsa_remove_invalid_sums() will do the cleanup */ 1395 if (rte->cost >= LS_INFINITY) 1396 return; 1397 /* never create summaries for as-ext LSA */ 1398 if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT) 1399 return; 1400 /* no need for summary LSA in the originating area */ 1401 if (rte->area.s_addr == area->id.s_addr) 1402 return; 1403 /* no need to originate inter-area routes to the backbone */ 1404 if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY) 1405 return; 1406 /* nexthop check, nexthop part of area -> no summary */ 1407 TAILQ_FOREACH(rn, &rte->nexthop, entry) { 1408 if (rn->invalid) 1409 continue; 1410 nr = rt_lookup(DT_NET, rn->nexthop.s_addr); 1411 if (nr && nr->area.s_addr == area->id.s_addr) 1412 continue; 1413 break; 1414 } 1415 if (rn == NULL) 1416 /* all nexthops belong to this area or are invalid */ 1417 return; 1418 1419 /* TODO AS border router specific checks */ 1420 /* TODO inter-area network route stuff */ 1421 /* TODO intra-area stuff -- condense LSA ??? */ 1422 1423 if (rte->d_type == DT_NET) { 1424 type = LSA_TYPE_SUM_NETWORK; 1425 } else if (rte->d_type == DT_RTR) { 1426 if (area->stub) 1427 /* do not redistribute type 4 LSA into stub areas */ 1428 return; 1429 type = LSA_TYPE_SUM_ROUTER; 1430 } else 1431 fatalx("rde_summary_update: unknown route type"); 1432 1433 /* update lsa but only if it was changed */ 1434 v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id()); 1435 lsa = orig_sum_lsa(rte, area, type, rte->invalid); 1436 lsa_merge(rde_nbr_self(area), lsa, v); 1437 1438 if (v == NULL) 1439 v = lsa_find_area(area, type, rte->prefix.s_addr, 1440 rde_router_id()); 1441 1442 /* suppressed/deleted routes are not found in the second lsa_find */ 1443 if (v) 1444 v->cost = rte->cost; 1445 } 1446 1447 struct lsa * 1448 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid) 1449 { 1450 struct lsa *lsa; 1451 u_int16_t len; 1452 1453 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum); 1454 if ((lsa = calloc(1, len)) == NULL) 1455 fatal("orig_sum_lsa"); 1456 1457 /* LSA header */ 1458 lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE); 1459 lsa->hdr.opts = area_ospf_options(area); 1460 lsa->hdr.type = type; 1461 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1462 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1463 lsa->hdr.len = htons(len); 1464 1465 /* prefix and mask */ 1466 /* 1467 * TODO ls_id must be unique, for overlapping routes this may 1468 * not be true. In this case a hack needs to be done to 1469 * make the ls_id unique. 1470 */ 1471 lsa->hdr.ls_id = rte->prefix.s_addr; 1472 if (type == LSA_TYPE_SUM_NETWORK) 1473 lsa->data.sum.mask = prefixlen2mask(rte->prefixlen); 1474 else 1475 lsa->data.sum.mask = 0; /* must be zero per RFC */ 1476 1477 lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK); 1478 1479 lsa->hdr.ls_chksum = 0; 1480 lsa->hdr.ls_chksum = 1481 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1482 1483 return (lsa); 1484 } 1485