1 /* $OpenBSD: rde.c,v 1.94 2011/05/09 12:24:41 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/types.h> 22 #include <sys/socket.h> 23 #include <sys/queue.h> 24 #include <netinet/in.h> 25 #include <arpa/inet.h> 26 #include <err.h> 27 #include <errno.h> 28 #include <stdlib.h> 29 #include <signal.h> 30 #include <string.h> 31 #include <pwd.h> 32 #include <unistd.h> 33 #include <event.h> 34 35 #include "ospf.h" 36 #include "ospfd.h" 37 #include "ospfe.h" 38 #include "log.h" 39 #include "rde.h" 40 41 void rde_sig_handler(int sig, short, void *); 42 void rde_shutdown(void); 43 void rde_dispatch_imsg(int, short, void *); 44 void rde_dispatch_parent(int, short, void *); 45 void rde_dump_area(struct area *, int, pid_t); 46 47 void rde_send_summary(pid_t); 48 void rde_send_summary_area(struct area *, pid_t); 49 void rde_nbr_init(u_int32_t); 50 void rde_nbr_free(void); 51 struct rde_nbr *rde_nbr_find(u_int32_t); 52 struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *); 53 void rde_nbr_del(struct rde_nbr *); 54 55 void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *); 56 int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *); 57 void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *); 58 void rde_req_list_free(struct rde_nbr *); 59 60 struct iface *rde_asext_lookup(u_int32_t, int); 61 void rde_asext_get(struct kroute *); 62 void rde_asext_put(struct kroute *); 63 void rde_asext_free(void); 64 struct lsa *orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t); 65 struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int); 66 67 struct ospfd_conf *rdeconf = NULL, *nconf = NULL; 68 struct imsgev *iev_ospfe; 69 struct imsgev *iev_main; 70 struct rde_nbr *nbrself; 71 struct lsa_tree asext_tree; 72 73 /* ARGSUSED */ 74 void 75 rde_sig_handler(int sig, short event, void *arg) 76 { 77 /* 78 * signal handler rules don't apply, libevent decouples for us 79 */ 80 81 switch (sig) { 82 case SIGINT: 83 case SIGTERM: 84 rde_shutdown(); 85 /* NOTREACHED */ 86 default: 87 fatalx("unexpected signal"); 88 } 89 } 90 91 /* route decision engine */ 92 pid_t 93 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2], 94 int pipe_parent2ospfe[2]) 95 { 96 struct event ev_sigint, ev_sigterm; 97 struct timeval now; 98 struct area *area; 99 struct iface *iface; 100 struct passwd *pw; 101 struct redistribute *r; 102 pid_t pid; 103 104 switch (pid = fork()) { 105 case -1: 106 fatal("cannot fork"); 107 /* NOTREACHED */ 108 case 0: 109 break; 110 default: 111 return (pid); 112 } 113 114 rdeconf = xconf; 115 116 if ((pw = getpwnam(OSPFD_USER)) == NULL) 117 fatal("getpwnam"); 118 119 if (chroot(pw->pw_dir) == -1) 120 fatal("chroot"); 121 if (chdir("/") == -1) 122 fatal("chdir(\"/\")"); 123 124 setproctitle("route decision engine"); 125 ospfd_process = PROC_RDE_ENGINE; 126 127 if (setgroups(1, &pw->pw_gid) || 128 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 129 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 130 fatal("can't drop privileges"); 131 132 event_init(); 133 rde_nbr_init(NBR_HASHSIZE); 134 lsa_init(&asext_tree); 135 136 /* setup signal handler */ 137 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 138 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 139 signal_add(&ev_sigint, NULL); 140 signal_add(&ev_sigterm, NULL); 141 signal(SIGPIPE, SIG_IGN); 142 signal(SIGHUP, SIG_IGN); 143 144 /* setup pipes */ 145 close(pipe_ospfe2rde[0]); 146 close(pipe_parent2rde[0]); 147 close(pipe_parent2ospfe[0]); 148 close(pipe_parent2ospfe[1]); 149 150 if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL || 151 (iev_main = malloc(sizeof(struct imsgev))) == NULL) 152 fatal(NULL); 153 imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]); 154 iev_ospfe->handler = rde_dispatch_imsg; 155 imsg_init(&iev_main->ibuf, pipe_parent2rde[1]); 156 iev_main->handler = rde_dispatch_parent; 157 158 /* setup event handler */ 159 iev_ospfe->events = EV_READ; 160 event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events, 161 iev_ospfe->handler, iev_ospfe); 162 event_add(&iev_ospfe->ev, NULL); 163 164 iev_main->events = EV_READ; 165 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, 166 iev_main->handler, iev_main); 167 event_add(&iev_main->ev, NULL); 168 169 evtimer_set(&rdeconf->ev, spf_timer, rdeconf); 170 cand_list_init(); 171 rt_init(); 172 173 /* remove unneded stuff from config */ 174 LIST_FOREACH(area, &rdeconf->area_list, entry) 175 LIST_FOREACH(iface, &area->iface_list, entry) 176 md_list_clr(&iface->auth_md_list); 177 178 while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { 179 SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); 180 free(r); 181 } 182 183 gettimeofday(&now, NULL); 184 rdeconf->uptime = now.tv_sec; 185 186 event_dispatch(); 187 188 rde_shutdown(); 189 /* NOTREACHED */ 190 191 return (0); 192 } 193 194 void 195 rde_shutdown(void) 196 { 197 struct area *a; 198 struct vertex *v, *nv; 199 200 stop_spf_timer(rdeconf); 201 cand_list_clr(); 202 rt_clear(); 203 204 while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) { 205 LIST_REMOVE(a, entry); 206 area_del(a); 207 } 208 for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) { 209 nv = RB_NEXT(lsa_tree, &asext_tree, v); 210 vertex_free(v); 211 } 212 rde_asext_free(); 213 rde_nbr_free(); 214 kr_shutdown(); 215 216 msgbuf_clear(&iev_ospfe->ibuf.w); 217 free(iev_ospfe); 218 msgbuf_clear(&iev_main->ibuf.w); 219 free(iev_main); 220 free(rdeconf); 221 222 log_info("route decision engine exiting"); 223 _exit(0); 224 } 225 226 int 227 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data, 228 u_int16_t datalen) 229 { 230 return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1, 231 data, datalen)); 232 } 233 234 /* ARGSUSED */ 235 void 236 rde_dispatch_imsg(int fd, short event, void *bula) 237 { 238 struct imsgev *iev = bula; 239 struct imsgbuf *ibuf; 240 struct imsg imsg; 241 struct in_addr aid; 242 struct ls_req_hdr req_hdr; 243 struct lsa_hdr lsa_hdr, *db_hdr; 244 struct rde_nbr rn, *nbr; 245 struct timespec tp; 246 struct lsa *lsa; 247 struct area *area; 248 struct vertex *v; 249 char *buf; 250 ssize_t n; 251 time_t now; 252 int r, state, self, error, shut = 0, verbose; 253 u_int16_t l; 254 255 ibuf = &iev->ibuf; 256 257 if (event & EV_READ) { 258 if ((n = imsg_read(ibuf)) == -1) 259 fatal("imsg_read error"); 260 if (n == 0) /* connection closed */ 261 shut = 1; 262 } 263 if (event & EV_WRITE) { 264 if (msgbuf_write(&ibuf->w) == -1) 265 fatal("msgbuf_write"); 266 } 267 268 clock_gettime(CLOCK_MONOTONIC, &tp); 269 now = tp.tv_sec; 270 271 for (;;) { 272 if ((n = imsg_get(ibuf, &imsg)) == -1) 273 fatal("rde_dispatch_imsg: imsg_read error"); 274 if (n == 0) 275 break; 276 277 switch (imsg.hdr.type) { 278 case IMSG_NEIGHBOR_UP: 279 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) 280 fatalx("invalid size of OE request"); 281 memcpy(&rn, imsg.data, sizeof(rn)); 282 283 if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL) 284 fatalx("rde_dispatch_imsg: " 285 "neighbor already exists"); 286 break; 287 case IMSG_NEIGHBOR_DOWN: 288 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid)); 289 break; 290 case IMSG_NEIGHBOR_CHANGE: 291 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) 292 fatalx("invalid size of OE request"); 293 memcpy(&state, imsg.data, sizeof(state)); 294 295 nbr = rde_nbr_find(imsg.hdr.peerid); 296 if (nbr == NULL) 297 break; 298 299 if (state != nbr->state && 300 (nbr->state & NBR_STA_FULL || 301 state & NBR_STA_FULL)) 302 area_track(nbr->area, state); 303 304 nbr->state = state; 305 if (nbr->state & NBR_STA_FULL) 306 rde_req_list_free(nbr); 307 break; 308 case IMSG_NEIGHBOR_CAPA: 309 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t)) 310 fatalx("invalid size of OE request"); 311 nbr = rde_nbr_find(imsg.hdr.peerid); 312 if (nbr == NULL) 313 break; 314 nbr->capa_options = *(u_int8_t *)imsg.data; 315 break; 316 case IMSG_DB_SNAPSHOT: 317 nbr = rde_nbr_find(imsg.hdr.peerid); 318 if (nbr == NULL) 319 break; 320 321 lsa_snap(nbr); 322 323 imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid, 324 0, -1, NULL, 0); 325 break; 326 case IMSG_DD: 327 nbr = rde_nbr_find(imsg.hdr.peerid); 328 if (nbr == NULL) 329 break; 330 331 buf = imsg.data; 332 error = 0; 333 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 334 l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) { 335 memcpy(&lsa_hdr, buf, sizeof(lsa_hdr)); 336 buf += sizeof(lsa_hdr); 337 338 if (lsa_hdr.type == LSA_TYPE_EXTERNAL && 339 nbr->area->stub) { 340 error = 1; 341 break; 342 } 343 v = lsa_find(nbr->iface, lsa_hdr.type, 344 lsa_hdr.ls_id, lsa_hdr.adv_rtr); 345 if (v == NULL) 346 db_hdr = NULL; 347 else 348 db_hdr = &v->lsa->hdr; 349 350 if (lsa_newer(&lsa_hdr, db_hdr) > 0) { 351 /* 352 * only request LSAs that are 353 * newer or missing 354 */ 355 rde_req_list_add(nbr, &lsa_hdr); 356 imsg_compose_event(iev_ospfe, IMSG_DD, 357 imsg.hdr.peerid, 0, -1, &lsa_hdr, 358 sizeof(lsa_hdr)); 359 } 360 } 361 if (l != 0 && !error) 362 log_warnx("rde_dispatch_imsg: peerid %lu, " 363 "trailing garbage in Database Description " 364 "packet", imsg.hdr.peerid); 365 366 if (!error) 367 imsg_compose_event(iev_ospfe, IMSG_DD_END, 368 imsg.hdr.peerid, 0, -1, NULL, 0); 369 else 370 imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA, 371 imsg.hdr.peerid, 0, -1, NULL, 0); 372 break; 373 case IMSG_LS_REQ: 374 nbr = rde_nbr_find(imsg.hdr.peerid); 375 if (nbr == NULL) 376 break; 377 378 buf = imsg.data; 379 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 380 l >= sizeof(req_hdr); l -= sizeof(req_hdr)) { 381 memcpy(&req_hdr, buf, sizeof(req_hdr)); 382 buf += sizeof(req_hdr); 383 384 if ((v = lsa_find(nbr->iface, 385 ntohl(req_hdr.type), req_hdr.ls_id, 386 req_hdr.adv_rtr)) == NULL) { 387 log_debug("rde_dispatch_imsg: " 388 "requested LSA not found"); 389 imsg_compose_event(iev_ospfe, 390 IMSG_LS_BADREQ, imsg.hdr.peerid, 391 0, -1, NULL, 0); 392 continue; 393 } 394 imsg_compose_event(iev_ospfe, IMSG_LS_UPD, 395 imsg.hdr.peerid, 0, -1, v->lsa, 396 ntohs(v->lsa->hdr.len)); 397 } 398 if (l != 0) 399 log_warnx("rde_dispatch_imsg: peerid %lu, " 400 "trailing garbage in LS Request " 401 "packet", imsg.hdr.peerid); 402 break; 403 case IMSG_LS_UPD: 404 nbr = rde_nbr_find(imsg.hdr.peerid); 405 if (nbr == NULL) 406 break; 407 408 lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE); 409 if (lsa == NULL) 410 fatal(NULL); 411 memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 412 413 if (!lsa_check(nbr, lsa, 414 imsg.hdr.len - IMSG_HEADER_SIZE)) { 415 free(lsa); 416 break; 417 } 418 419 v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id, 420 lsa->hdr.adv_rtr); 421 if (v == NULL) 422 db_hdr = NULL; 423 else 424 db_hdr = &v->lsa->hdr; 425 426 if (nbr->self) { 427 lsa_merge(nbr, lsa, v); 428 /* lsa_merge frees the right lsa */ 429 break; 430 } 431 432 r = lsa_newer(&lsa->hdr, db_hdr); 433 if (r > 0) { 434 /* new LSA newer than DB */ 435 if (v && v->flooded && 436 v->changed + MIN_LS_ARRIVAL >= now) { 437 free(lsa); 438 break; 439 } 440 441 rde_req_list_del(nbr, &lsa->hdr); 442 443 if (!(self = lsa_self(nbr, lsa, v))) 444 if (lsa_add(nbr, lsa)) 445 /* delayed lsa */ 446 break; 447 448 /* flood and perhaps ack LSA */ 449 imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD, 450 imsg.hdr.peerid, 0, -1, lsa, 451 ntohs(lsa->hdr.len)); 452 453 /* reflood self originated LSA */ 454 if (self && v) 455 imsg_compose_event(iev_ospfe, 456 IMSG_LS_FLOOD, v->peerid, 0, -1, 457 v->lsa, ntohs(v->lsa->hdr.len)); 458 /* new LSA was not added so free it */ 459 if (self) 460 free(lsa); 461 } else if (r < 0) { 462 /* 463 * point 6 of "The Flooding Procedure" 464 * We are violating the RFC here because 465 * it does not make sense to reset a session 466 * because an equal LSA is already in the table. 467 * Only if the LSA sent is older than the one 468 * in the table we should reset the session. 469 */ 470 if (rde_req_list_exists(nbr, &lsa->hdr)) { 471 imsg_compose_event(iev_ospfe, 472 IMSG_LS_BADREQ, imsg.hdr.peerid, 473 0, -1, NULL, 0); 474 free(lsa); 475 break; 476 } 477 478 /* lsa no longer needed */ 479 free(lsa); 480 481 /* new LSA older than DB */ 482 if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM && 483 ntohs(db_hdr->age) == MAX_AGE) 484 /* seq-num wrap */ 485 break; 486 487 if (v->changed + MIN_LS_ARRIVAL >= now) 488 break; 489 490 /* directly send current LSA, no ack */ 491 imsg_compose_event(iev_ospfe, IMSG_LS_UPD, 492 imsg.hdr.peerid, 0, -1, v->lsa, 493 ntohs(v->lsa->hdr.len)); 494 } else { 495 /* LSA equal send direct ack */ 496 imsg_compose_event(iev_ospfe, IMSG_LS_ACK, 497 imsg.hdr.peerid, 0, -1, &lsa->hdr, 498 sizeof(lsa->hdr)); 499 free(lsa); 500 } 501 break; 502 case IMSG_LS_MAXAGE: 503 nbr = rde_nbr_find(imsg.hdr.peerid); 504 if (nbr == NULL) 505 break; 506 507 if (imsg.hdr.len != IMSG_HEADER_SIZE + 508 sizeof(struct lsa_hdr)) 509 fatalx("invalid size of OE request"); 510 memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr)); 511 512 if (rde_nbr_loading(nbr->area)) 513 break; 514 515 v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id, 516 lsa_hdr.adv_rtr); 517 if (v == NULL) 518 db_hdr = NULL; 519 else 520 db_hdr = &v->lsa->hdr; 521 522 /* 523 * only delete LSA if the one in the db is not newer 524 */ 525 if (lsa_newer(db_hdr, &lsa_hdr) <= 0) 526 lsa_del(nbr, &lsa_hdr); 527 break; 528 case IMSG_CTL_SHOW_DATABASE: 529 case IMSG_CTL_SHOW_DB_EXT: 530 case IMSG_CTL_SHOW_DB_NET: 531 case IMSG_CTL_SHOW_DB_RTR: 532 case IMSG_CTL_SHOW_DB_SELF: 533 case IMSG_CTL_SHOW_DB_SUM: 534 case IMSG_CTL_SHOW_DB_ASBR: 535 case IMSG_CTL_SHOW_DB_OPAQ: 536 if (imsg.hdr.len != IMSG_HEADER_SIZE && 537 imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) { 538 log_warnx("rde_dispatch_imsg: wrong imsg len"); 539 break; 540 } 541 if (imsg.hdr.len == IMSG_HEADER_SIZE) { 542 LIST_FOREACH(area, &rdeconf->area_list, entry) { 543 rde_dump_area(area, imsg.hdr.type, 544 imsg.hdr.pid); 545 } 546 lsa_dump(&asext_tree, imsg.hdr.type, 547 imsg.hdr.pid); 548 } else { 549 memcpy(&aid, imsg.data, sizeof(aid)); 550 if ((area = area_find(rdeconf, aid)) != NULL) { 551 rde_dump_area(area, imsg.hdr.type, 552 imsg.hdr.pid); 553 if (!area->stub) 554 lsa_dump(&asext_tree, 555 imsg.hdr.type, 556 imsg.hdr.pid); 557 } 558 } 559 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 560 imsg.hdr.pid, -1, NULL, 0); 561 break; 562 case IMSG_CTL_SHOW_RIB: 563 LIST_FOREACH(area, &rdeconf->area_list, entry) { 564 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 565 0, imsg.hdr.pid, -1, area, sizeof(*area)); 566 567 rt_dump(area->id, imsg.hdr.pid, RIB_RTR); 568 rt_dump(area->id, imsg.hdr.pid, RIB_NET); 569 } 570 aid.s_addr = 0; 571 rt_dump(aid, imsg.hdr.pid, RIB_EXT); 572 573 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 574 imsg.hdr.pid, -1, NULL, 0); 575 break; 576 case IMSG_CTL_SHOW_SUM: 577 rde_send_summary(imsg.hdr.pid); 578 LIST_FOREACH(area, &rdeconf->area_list, entry) 579 rde_send_summary_area(area, imsg.hdr.pid); 580 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 581 imsg.hdr.pid, -1, NULL, 0); 582 break; 583 case IMSG_CTL_LOG_VERBOSE: 584 /* already checked by ospfe */ 585 memcpy(&verbose, imsg.data, sizeof(verbose)); 586 log_verbose(verbose); 587 break; 588 default: 589 log_debug("rde_dispatch_imsg: unexpected imsg %d", 590 imsg.hdr.type); 591 break; 592 } 593 imsg_free(&imsg); 594 } 595 if (!shut) 596 imsg_event_add(iev); 597 else { 598 /* this pipe is dead, so remove the event handler */ 599 event_del(&iev->ev); 600 event_loopexit(NULL); 601 } 602 } 603 604 /* ARGSUSED */ 605 void 606 rde_dispatch_parent(int fd, short event, void *bula) 607 { 608 static struct area *narea; 609 struct iface *niface; 610 struct imsg imsg; 611 struct kroute rr; 612 struct imsgev *iev = bula; 613 struct imsgbuf *ibuf; 614 struct redistribute *nred; 615 ssize_t n; 616 int shut = 0; 617 618 ibuf = &iev->ibuf; 619 620 if (event & EV_READ) { 621 if ((n = imsg_read(ibuf)) == -1) 622 fatal("imsg_read error"); 623 if (n == 0) /* connection closed */ 624 shut = 1; 625 } 626 if (event & EV_WRITE) { 627 if (msgbuf_write(&ibuf->w) == -1) 628 fatal("msgbuf_write"); 629 } 630 631 for (;;) { 632 if ((n = imsg_get(ibuf, &imsg)) == -1) 633 fatal("rde_dispatch_parent: imsg_read error"); 634 if (n == 0) 635 break; 636 637 switch (imsg.hdr.type) { 638 case IMSG_NETWORK_ADD: 639 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 640 log_warnx("rde_dispatch_parent: " 641 "wrong imsg len"); 642 break; 643 } 644 memcpy(&rr, imsg.data, sizeof(rr)); 645 rde_asext_get(&rr); 646 break; 647 case IMSG_NETWORK_DEL: 648 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 649 log_warnx("rde_dispatch_parent: " 650 "wrong imsg len"); 651 break; 652 } 653 memcpy(&rr, imsg.data, sizeof(rr)); 654 rde_asext_put(&rr); 655 break; 656 case IMSG_RECONF_CONF: 657 if ((nconf = malloc(sizeof(struct ospfd_conf))) == 658 NULL) 659 fatal(NULL); 660 memcpy(nconf, imsg.data, sizeof(struct ospfd_conf)); 661 662 LIST_INIT(&nconf->area_list); 663 LIST_INIT(&nconf->cand_list); 664 break; 665 case IMSG_RECONF_AREA: 666 if ((narea = area_new()) == NULL) 667 fatal(NULL); 668 memcpy(narea, imsg.data, sizeof(struct area)); 669 670 LIST_INIT(&narea->iface_list); 671 LIST_INIT(&narea->nbr_list); 672 RB_INIT(&narea->lsa_tree); 673 SIMPLEQ_INIT(&narea->redist_list); 674 675 LIST_INSERT_HEAD(&nconf->area_list, narea, entry); 676 break; 677 case IMSG_RECONF_REDIST: 678 if ((nred= malloc(sizeof(struct redistribute))) == NULL) 679 fatal(NULL); 680 memcpy(nred, imsg.data, sizeof(struct redistribute)); 681 682 SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry); 683 break; 684 case IMSG_RECONF_IFACE: 685 if ((niface = malloc(sizeof(struct iface))) == NULL) 686 fatal(NULL); 687 memcpy(niface, imsg.data, sizeof(struct iface)); 688 689 LIST_INIT(&niface->nbr_list); 690 TAILQ_INIT(&niface->ls_ack_list); 691 TAILQ_INIT(&niface->auth_md_list); 692 RB_INIT(&niface->lsa_tree); 693 694 niface->area = narea; 695 LIST_INSERT_HEAD(&narea->iface_list, niface, entry); 696 697 break; 698 case IMSG_RECONF_END: 699 merge_config(rdeconf, nconf); 700 nconf = NULL; 701 break; 702 default: 703 log_debug("rde_dispatch_parent: unexpected imsg %d", 704 imsg.hdr.type); 705 break; 706 } 707 imsg_free(&imsg); 708 } 709 if (!shut) 710 imsg_event_add(iev); 711 else { 712 /* this pipe is dead, so remove the event handler */ 713 event_del(&iev->ev); 714 event_loopexit(NULL); 715 } 716 } 717 718 void 719 rde_dump_area(struct area *area, int imsg_type, pid_t pid) 720 { 721 struct iface *iface; 722 723 /* dump header */ 724 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1, 725 area, sizeof(*area)); 726 727 /* dump link local lsa */ 728 LIST_FOREACH(iface, &area->iface_list, entry) { 729 imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE, 730 0, pid, -1, iface, sizeof(*iface)); 731 lsa_dump(&iface->lsa_tree, imsg_type, pid); 732 } 733 734 /* dump area lsa */ 735 lsa_dump(&area->lsa_tree, imsg_type, pid); 736 } 737 738 u_int32_t 739 rde_router_id(void) 740 { 741 return (rdeconf->rtr_id.s_addr); 742 } 743 744 struct area * 745 rde_backbone_area(void) 746 { 747 struct in_addr id; 748 749 id.s_addr = INADDR_ANY; 750 751 return (area_find(rdeconf, id)); 752 } 753 754 void 755 rde_send_change_kroute(struct rt_node *r) 756 { 757 int krcount = 0; 758 struct kroute kr; 759 struct rt_nexthop *rn; 760 struct ibuf *wbuf; 761 762 if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0, 763 sizeof(kr))) == NULL) { 764 return; 765 } 766 767 TAILQ_FOREACH(rn, &r->nexthop, entry) { 768 if (rn->invalid) 769 continue; 770 krcount++; 771 772 bzero(&kr, sizeof(kr)); 773 kr.prefix.s_addr = r->prefix.s_addr; 774 kr.nexthop.s_addr = rn->nexthop.s_addr; 775 kr.prefixlen = r->prefixlen; 776 kr.ext_tag = r->ext_tag; 777 imsg_add(wbuf, &kr, sizeof(kr)); 778 } 779 if (krcount == 0) 780 fatalx("rde_send_change_kroute: no valid nexthop found"); 781 imsg_close(&iev_main->ibuf, wbuf); 782 imsg_event_add(iev_main); 783 } 784 785 void 786 rde_send_delete_kroute(struct rt_node *r) 787 { 788 struct kroute kr; 789 790 bzero(&kr, sizeof(kr)); 791 kr.prefix.s_addr = r->prefix.s_addr; 792 kr.prefixlen = r->prefixlen; 793 794 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, 795 &kr, sizeof(kr)); 796 } 797 798 void 799 rde_send_summary(pid_t pid) 800 { 801 static struct ctl_sum sumctl; 802 struct timeval now; 803 struct area *area; 804 struct vertex *v; 805 806 bzero(&sumctl, sizeof(struct ctl_sum)); 807 808 sumctl.rtr_id.s_addr = rde_router_id(); 809 sumctl.spf_delay = rdeconf->spf_delay; 810 sumctl.spf_hold_time = rdeconf->spf_hold_time; 811 812 LIST_FOREACH(area, &rdeconf->area_list, entry) 813 sumctl.num_area++; 814 815 RB_FOREACH(v, lsa_tree, &asext_tree) { 816 sumctl.num_ext_lsa++; 817 sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum); 818 819 } 820 821 gettimeofday(&now, NULL); 822 if (rdeconf->uptime < now.tv_sec) 823 sumctl.uptime = now.tv_sec - rdeconf->uptime; 824 else 825 sumctl.uptime = 0; 826 827 sumctl.rfc1583compat = rdeconf->rfc1583compat; 828 829 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl, 830 sizeof(sumctl)); 831 } 832 833 void 834 rde_send_summary_area(struct area *area, pid_t pid) 835 { 836 static struct ctl_sum_area sumareactl; 837 struct iface *iface; 838 struct rde_nbr *nbr; 839 struct lsa_tree *tree = &area->lsa_tree; 840 struct vertex *v; 841 842 bzero(&sumareactl, sizeof(struct ctl_sum_area)); 843 844 sumareactl.area.s_addr = area->id.s_addr; 845 sumareactl.num_spf_calc = area->num_spf_calc; 846 847 LIST_FOREACH(iface, &area->iface_list, entry) 848 sumareactl.num_iface++; 849 850 LIST_FOREACH(nbr, &area->nbr_list, entry) 851 if (nbr->state == NBR_STA_FULL && !nbr->self) 852 sumareactl.num_adj_nbr++; 853 854 RB_FOREACH(v, lsa_tree, tree) { 855 sumareactl.num_lsa++; 856 sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum); 857 } 858 859 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl, 860 sizeof(sumareactl)); 861 } 862 863 LIST_HEAD(rde_nbr_head, rde_nbr); 864 865 struct nbr_table { 866 struct rde_nbr_head *hashtbl; 867 u_int32_t hashmask; 868 } rdenbrtable; 869 870 #define RDE_NBR_HASH(x) \ 871 &rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] 872 873 void 874 rde_nbr_init(u_int32_t hashsize) 875 { 876 struct rde_nbr_head *head; 877 u_int32_t hs, i; 878 879 for (hs = 1; hs < hashsize; hs <<= 1) 880 ; 881 rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head)); 882 if (rdenbrtable.hashtbl == NULL) 883 fatal("rde_nbr_init"); 884 885 for (i = 0; i < hs; i++) 886 LIST_INIT(&rdenbrtable.hashtbl[i]); 887 888 rdenbrtable.hashmask = hs - 1; 889 890 if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL) 891 fatal("rde_nbr_init"); 892 893 nbrself->id.s_addr = rde_router_id(); 894 nbrself->peerid = NBR_IDSELF; 895 nbrself->state = NBR_STA_DOWN; 896 nbrself->self = 1; 897 head = RDE_NBR_HASH(NBR_IDSELF); 898 LIST_INSERT_HEAD(head, nbrself, hash); 899 } 900 901 void 902 rde_nbr_free(void) 903 { 904 free(nbrself); 905 free(rdenbrtable.hashtbl); 906 } 907 908 struct rde_nbr * 909 rde_nbr_find(u_int32_t peerid) 910 { 911 struct rde_nbr_head *head; 912 struct rde_nbr *nbr; 913 914 head = RDE_NBR_HASH(peerid); 915 916 LIST_FOREACH(nbr, head, hash) { 917 if (nbr->peerid == peerid) 918 return (nbr); 919 } 920 921 return (NULL); 922 } 923 924 struct rde_nbr * 925 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new) 926 { 927 struct rde_nbr_head *head; 928 struct rde_nbr *nbr; 929 struct area *area; 930 struct iface *iface; 931 932 if (rde_nbr_find(peerid)) 933 return (NULL); 934 if ((area = area_find(rdeconf, new->area_id)) == NULL) 935 fatalx("rde_nbr_new: unknown area"); 936 937 LIST_FOREACH(iface, &area->iface_list, entry) { 938 if (iface->ifindex == new->ifindex) 939 break; 940 } 941 if (iface == NULL) 942 fatalx("rde_nbr_new: unknown interface"); 943 944 if ((nbr = calloc(1, sizeof(*nbr))) == NULL) 945 fatal("rde_nbr_new"); 946 947 memcpy(nbr, new, sizeof(*nbr)); 948 nbr->peerid = peerid; 949 nbr->area = area; 950 nbr->iface = iface; 951 952 TAILQ_INIT(&nbr->req_list); 953 954 head = RDE_NBR_HASH(peerid); 955 LIST_INSERT_HEAD(head, nbr, hash); 956 LIST_INSERT_HEAD(&area->nbr_list, nbr, entry); 957 958 return (nbr); 959 } 960 961 void 962 rde_nbr_del(struct rde_nbr *nbr) 963 { 964 if (nbr == NULL) 965 return; 966 967 rde_req_list_free(nbr); 968 969 LIST_REMOVE(nbr, entry); 970 LIST_REMOVE(nbr, hash); 971 972 free(nbr); 973 } 974 975 int 976 rde_nbr_loading(struct area *area) 977 { 978 struct rde_nbr *nbr; 979 int checkall = 0; 980 981 if (area == NULL) { 982 area = LIST_FIRST(&rdeconf->area_list); 983 checkall = 1; 984 } 985 986 while (area != NULL) { 987 LIST_FOREACH(nbr, &area->nbr_list, entry) { 988 if (nbr->self) 989 continue; 990 if (nbr->state & NBR_STA_XCHNG || 991 nbr->state & NBR_STA_LOAD) 992 return (1); 993 } 994 if (!checkall) 995 break; 996 area = LIST_NEXT(area, entry); 997 } 998 999 return (0); 1000 } 1001 1002 struct rde_nbr * 1003 rde_nbr_self(struct area *area) 1004 { 1005 struct rde_nbr *nbr; 1006 1007 LIST_FOREACH(nbr, &area->nbr_list, entry) 1008 if (nbr->self) 1009 return (nbr); 1010 1011 /* this may not happen */ 1012 fatalx("rde_nbr_self: area without self"); 1013 return (NULL); 1014 } 1015 1016 /* 1017 * LSA req list 1018 */ 1019 void 1020 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa) 1021 { 1022 struct rde_req_entry *le; 1023 1024 if ((le = calloc(1, sizeof(*le))) == NULL) 1025 fatal("rde_req_list_add"); 1026 1027 TAILQ_INSERT_TAIL(&nbr->req_list, le, entry); 1028 le->type = lsa->type; 1029 le->ls_id = lsa->ls_id; 1030 le->adv_rtr = lsa->adv_rtr; 1031 } 1032 1033 int 1034 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1035 { 1036 struct rde_req_entry *le; 1037 1038 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1039 if ((lsa_hdr->type == le->type) && 1040 (lsa_hdr->ls_id == le->ls_id) && 1041 (lsa_hdr->adv_rtr == le->adv_rtr)) 1042 return (1); 1043 } 1044 return (0); 1045 } 1046 1047 void 1048 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1049 { 1050 struct rde_req_entry *le; 1051 1052 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1053 if ((lsa_hdr->type == le->type) && 1054 (lsa_hdr->ls_id == le->ls_id) && 1055 (lsa_hdr->adv_rtr == le->adv_rtr)) { 1056 TAILQ_REMOVE(&nbr->req_list, le, entry); 1057 free(le); 1058 return; 1059 } 1060 } 1061 } 1062 1063 void 1064 rde_req_list_free(struct rde_nbr *nbr) 1065 { 1066 struct rde_req_entry *le; 1067 1068 while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) { 1069 TAILQ_REMOVE(&nbr->req_list, le, entry); 1070 free(le); 1071 } 1072 } 1073 1074 /* 1075 * as-external LSA handling 1076 */ 1077 struct asext_node { 1078 RB_ENTRY(asext_node) entry; 1079 struct kroute r; 1080 u_int32_t ls_id; 1081 }; 1082 1083 static __inline int asext_compare(struct asext_node *, struct asext_node *); 1084 struct asext_node *asext_find(u_int32_t, u_int8_t); 1085 1086 RB_HEAD(asext_tree, asext_node) ast; 1087 RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare) 1088 RB_GENERATE(asext_tree, asext_node, entry, asext_compare) 1089 1090 static __inline int 1091 asext_compare(struct asext_node *a, struct asext_node *b) 1092 { 1093 if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr)) 1094 return (-1); 1095 if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr)) 1096 return (1); 1097 if (a->r.prefixlen < b->r.prefixlen) 1098 return (-1); 1099 if (a->r.prefixlen > b->r.prefixlen) 1100 return (1); 1101 return (0); 1102 } 1103 1104 struct asext_node * 1105 asext_find(u_int32_t addr, u_int8_t prefixlen) 1106 { 1107 struct asext_node a; 1108 1109 a.r.prefix.s_addr = addr; 1110 a.r.prefixlen = prefixlen; 1111 1112 return (RB_FIND(asext_tree, &ast, &a)); 1113 } 1114 1115 struct iface * 1116 rde_asext_lookup(u_int32_t prefix, int plen) 1117 { 1118 struct area *area; 1119 struct iface *iface; 1120 1121 LIST_FOREACH(area, &rdeconf->area_list, entry) { 1122 LIST_FOREACH(iface, &area->iface_list, entry) { 1123 if ((iface->addr.s_addr & iface->mask.s_addr) == 1124 (prefix & iface->mask.s_addr) && (plen == -1 || 1125 iface->mask.s_addr == prefixlen2mask(plen))) 1126 return (iface); 1127 } 1128 } 1129 return (NULL); 1130 } 1131 1132 void 1133 rde_asext_get(struct kroute *rr) 1134 { 1135 struct asext_node *an, *oan; 1136 struct vertex *v; 1137 struct lsa *lsa; 1138 u_int32_t mask; 1139 1140 if (rde_asext_lookup(rr->prefix.s_addr, rr->prefixlen)) { 1141 /* already announced as (stub) net LSA */ 1142 log_debug("rde_asext_get: %s/%d is net LSA", 1143 inet_ntoa(rr->prefix), rr->prefixlen); 1144 return; 1145 } 1146 1147 an = asext_find(rr->prefix.s_addr, rr->prefixlen); 1148 if (an == NULL) { 1149 if ((an = calloc(1, sizeof(*an))) == NULL) 1150 fatal("rde_asext_get"); 1151 bcopy(rr, &an->r, sizeof(*rr)); 1152 an->ls_id = rr->prefix.s_addr; 1153 RB_INSERT(asext_tree, &ast, an); 1154 } else { 1155 /* the bcopy does not change the lookup key so it is save */ 1156 bcopy(rr, &an->r, sizeof(*rr)); 1157 } 1158 1159 /* 1160 * ls_id must be unique, for overlapping routes this may 1161 * not be true. In this case a unique ls_id needs to be found. 1162 * The algorithm will change the ls_id of the less specific 1163 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24 1164 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16 1165 * will change the ls_id to 10.0.255.255 and see if that is unique. 1166 */ 1167 oan = an; 1168 mask = prefixlen2mask(oan->r.prefixlen); 1169 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, 1170 rdeconf->rtr_id.s_addr); 1171 while (v && v->lsa->data.asext.mask != mask) { 1172 /* conflict needs to be resolved. change less specific lsa */ 1173 if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) { 1174 /* lsa to insert is more specific, fix other lsa */ 1175 mask = v->lsa->data.asext.mask; 1176 oan = asext_find(v->lsa->hdr.ls_id & mask, 1177 mask2prefixlen(mask)); 1178 if (oan == NULL) 1179 fatalx("as-ext LSA DB corrupted"); 1180 } 1181 /* oan is less specific and needs new ls_id */ 1182 if (oan->ls_id == oan->r.prefix.s_addr) 1183 oan->ls_id |= ~mask; 1184 else { 1185 u_int32_t tmp = ntohl(oan->ls_id); 1186 oan->ls_id = htonl(tmp - 1); 1187 if (oan->ls_id == oan->r.prefix.s_addr) { 1188 log_warnx("prefix %s/%d can not be " 1189 "redistributed, no unique ls_id found.", 1190 inet_ntoa(rr->prefix), rr->prefixlen); 1191 RB_REMOVE(asext_tree, &ast, an); 1192 free(an); 1193 return; 1194 } 1195 } 1196 mask = prefixlen2mask(oan->r.prefixlen); 1197 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, 1198 rdeconf->rtr_id.s_addr); 1199 } 1200 1201 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id, 1202 rdeconf->rtr_id.s_addr); 1203 lsa = orig_asext_lsa(rr, an->ls_id, DEFAULT_AGE); 1204 lsa_merge(nbrself, lsa, v); 1205 1206 if (oan != an) { 1207 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, 1208 rdeconf->rtr_id.s_addr); 1209 lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE); 1210 lsa_merge(nbrself, lsa, v); 1211 } 1212 } 1213 1214 void 1215 rde_asext_put(struct kroute *rr) 1216 { 1217 struct asext_node *an; 1218 struct vertex *v; 1219 struct lsa *lsa; 1220 1221 /* 1222 * just try to remove the LSA. If the prefix is announced as 1223 * stub net LSA asext_find() will fail and nothing will happen. 1224 */ 1225 an = asext_find(rr->prefix.s_addr, rr->prefixlen); 1226 if (an == NULL) { 1227 log_debug("rde_asext_put: NO SUCH LSA %s/%d", 1228 inet_ntoa(rr->prefix), rr->prefixlen); 1229 return; 1230 } 1231 1232 /* inherit metric and ext_tag from the current LSA, 1233 * some routers don't like to get withdraws that are 1234 * different from what they have in their table. 1235 */ 1236 v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id, 1237 rdeconf->rtr_id.s_addr); 1238 if (v != NULL) { 1239 rr->metric = ntohl(v->lsa->data.asext.metric); 1240 rr->ext_tag = ntohl(v->lsa->data.asext.ext_tag); 1241 } 1242 1243 /* remove by reflooding with MAX_AGE */ 1244 lsa = orig_asext_lsa(rr, an->ls_id, MAX_AGE); 1245 lsa_merge(nbrself, lsa, v); 1246 1247 RB_REMOVE(asext_tree, &ast, an); 1248 free(an); 1249 } 1250 1251 void 1252 rde_asext_free(void) 1253 { 1254 struct asext_node *an, *nan; 1255 1256 for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) { 1257 nan = RB_NEXT(asext_tree, &ast, an); 1258 RB_REMOVE(asext_tree, &ast, an); 1259 free(an); 1260 } 1261 } 1262 1263 struct lsa * 1264 orig_asext_lsa(struct kroute *rr, u_int32_t ls_id, u_int16_t age) 1265 { 1266 struct lsa *lsa; 1267 struct iface *iface; 1268 u_int16_t len; 1269 1270 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext); 1271 if ((lsa = calloc(1, len)) == NULL) 1272 fatal("orig_asext_lsa"); 1273 1274 log_debug("orig_asext_lsa: %s/%d age %d", 1275 inet_ntoa(rr->prefix), rr->prefixlen, age); 1276 1277 /* LSA header */ 1278 lsa->hdr.age = htons(age); 1279 lsa->hdr.opts = area_ospf_options(NULL); 1280 lsa->hdr.type = LSA_TYPE_EXTERNAL; 1281 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1282 /* update of seqnum is done by lsa_merge */ 1283 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1284 lsa->hdr.len = htons(len); 1285 1286 /* prefix and mask */ 1287 lsa->hdr.ls_id = ls_id; 1288 lsa->data.asext.mask = prefixlen2mask(rr->prefixlen); 1289 1290 /* 1291 * nexthop -- on connected routes we are the nexthop, 1292 * in other cases we may announce the true nexthop if the 1293 * nexthop is reachable via an OSPF enabled interface but only 1294 * broadcast & NBMA interfaces are considered in that case. 1295 * It does not make sense to announce the nexthop of a point-to-point 1296 * link since the traffic has to go through this box anyway. 1297 * Some implementations actually check that there are multiple 1298 * neighbors on the particular segment, we skip that check. 1299 */ 1300 iface = rde_asext_lookup(rr->nexthop.s_addr, -1); 1301 if (rr->flags & F_FORCED_NEXTHOP) 1302 lsa->data.asext.fw_addr = rr->nexthop.s_addr; 1303 else if (rr->flags & F_CONNECTED) 1304 lsa->data.asext.fw_addr = 0; 1305 else if (iface && (iface->type == IF_TYPE_BROADCAST || 1306 iface->type == IF_TYPE_NBMA)) 1307 lsa->data.asext.fw_addr = rr->nexthop.s_addr; 1308 else 1309 lsa->data.asext.fw_addr = 0; 1310 1311 lsa->data.asext.metric = htonl(rr->metric); 1312 lsa->data.asext.ext_tag = htonl(rr->ext_tag); 1313 1314 lsa->hdr.ls_chksum = 0; 1315 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1316 1317 return (lsa); 1318 } 1319 1320 /* 1321 * summary LSA stuff 1322 */ 1323 void 1324 rde_summary_update(struct rt_node *rte, struct area *area) 1325 { 1326 struct rt_nexthop *rn; 1327 struct rt_node *nr; 1328 struct vertex *v = NULL; 1329 struct lsa *lsa; 1330 u_int8_t type = 0; 1331 1332 /* first check if we actually need to announce this route */ 1333 if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E)) 1334 return; 1335 /* never create summaries for as-ext LSA */ 1336 if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT) 1337 return; 1338 /* no need for summary LSA in the originating area */ 1339 if (rte->area.s_addr == area->id.s_addr) 1340 return; 1341 /* no need to originate inter-area routes to the backbone */ 1342 if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY) 1343 return; 1344 /* nexthop check, nexthop part of area -> no summary */ 1345 TAILQ_FOREACH(rn, &rte->nexthop, entry) { 1346 nr = rt_lookup(DT_NET, rn->nexthop.s_addr); 1347 if (nr && nr->area.s_addr == area->id.s_addr) 1348 continue; 1349 break; 1350 } 1351 if (rn == NULL) /* all nexthops belong to this area */ 1352 return; 1353 1354 if (rte->cost >= LS_INFINITY) 1355 return; 1356 /* TODO AS border router specific checks */ 1357 /* TODO inter-area network route stuff */ 1358 /* TODO intra-area stuff -- condense LSA ??? */ 1359 1360 if (rte->d_type == DT_NET) { 1361 type = LSA_TYPE_SUM_NETWORK; 1362 } else if (rte->d_type == DT_RTR) { 1363 if (area->stub) 1364 /* do not redistribute type 4 LSA into stub areas */ 1365 return; 1366 type = LSA_TYPE_SUM_ROUTER; 1367 } else 1368 fatalx("rde_summary_update: unknown route type"); 1369 1370 /* update lsa but only if it was changed */ 1371 v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id()); 1372 lsa = orig_sum_lsa(rte, area, type, rte->invalid); 1373 lsa_merge(rde_nbr_self(area), lsa, v); 1374 1375 if (v == NULL) 1376 v = lsa_find_area(area, type, rte->prefix.s_addr, 1377 rde_router_id()); 1378 1379 /* suppressed/deleted routes are not found in the second lsa_find */ 1380 if (v) 1381 v->cost = rte->cost; 1382 } 1383 1384 struct lsa * 1385 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid) 1386 { 1387 struct lsa *lsa; 1388 u_int16_t len; 1389 1390 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum); 1391 if ((lsa = calloc(1, len)) == NULL) 1392 fatal("orig_sum_lsa"); 1393 1394 /* LSA header */ 1395 lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE); 1396 lsa->hdr.opts = area_ospf_options(area); 1397 lsa->hdr.type = type; 1398 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1399 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1400 lsa->hdr.len = htons(len); 1401 1402 /* prefix and mask */ 1403 /* 1404 * TODO ls_id must be unique, for overlapping routes this may 1405 * not be true. In this case a hack needs to be done to 1406 * make the ls_id unique. 1407 */ 1408 lsa->hdr.ls_id = rte->prefix.s_addr; 1409 if (type == LSA_TYPE_SUM_NETWORK) 1410 lsa->data.sum.mask = prefixlen2mask(rte->prefixlen); 1411 else 1412 lsa->data.sum.mask = 0; /* must be zero per RFC */ 1413 1414 lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK); 1415 1416 lsa->hdr.ls_chksum = 0; 1417 lsa->hdr.ls_chksum = 1418 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1419 1420 return (lsa); 1421 } 1422