1 /* $OpenBSD: rde.c,v 1.66 2016/06/06 15:57:44 benno Exp $ */ 2 3 /* 4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/types.h> 22 #include <sys/socket.h> 23 #include <sys/queue.h> 24 #include <net/if_types.h> 25 #include <netinet/in.h> 26 #include <arpa/inet.h> 27 #include <err.h> 28 #include <errno.h> 29 #include <stdlib.h> 30 #include <signal.h> 31 #include <string.h> 32 #include <pwd.h> 33 #include <unistd.h> 34 #include <event.h> 35 36 #include "ospf6.h" 37 #include "ospf6d.h" 38 #include "ospfe.h" 39 #include "log.h" 40 #include "rde.h" 41 42 #define MINIMUM(a, b) (((a) < (b)) ? (a) : (b)) 43 44 void rde_sig_handler(int sig, short, void *); 45 void rde_shutdown(void); 46 void rde_dispatch_imsg(int, short, void *); 47 void rde_dispatch_parent(int, short, void *); 48 void rde_dump_area(struct area *, int, pid_t); 49 50 void rde_send_summary(pid_t); 51 void rde_send_summary_area(struct area *, pid_t); 52 void rde_nbr_init(u_int32_t); 53 void rde_nbr_free(void); 54 struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *); 55 void rde_nbr_del(struct rde_nbr *); 56 57 void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *); 58 int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *); 59 void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *); 60 void rde_req_list_free(struct rde_nbr *); 61 62 struct lsa *rde_asext_get(struct rroute *); 63 struct lsa *rde_asext_put(struct rroute *); 64 65 int comp_asext(struct lsa *, struct lsa *); 66 struct lsa *orig_asext_lsa(struct rroute *, u_int16_t); 67 struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int); 68 struct lsa *orig_intra_lsa_net(struct area *, struct iface *, 69 struct vertex *); 70 struct lsa *orig_intra_lsa_rtr(struct area *, struct vertex *); 71 void append_prefix_lsa(struct lsa **, u_int16_t *, 72 struct lsa_prefix *); 73 74 /* A 32-bit value != any ifindex. 75 * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */ 76 #define LS_ID_INTRA_RTR 0x01000000 77 78 /* Tree of prefixes with global scope on given a link, 79 * see orig_intra_lsa_*() */ 80 struct prefix_node { 81 RB_ENTRY(prefix_node) entry; 82 struct lsa_prefix *prefix; 83 }; 84 RB_HEAD(prefix_tree, prefix_node); 85 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare); 86 int prefix_compare(struct prefix_node *, struct prefix_node *); 87 void prefix_tree_add(struct prefix_tree *, struct lsa_link *); 88 89 struct ospfd_conf *rdeconf = NULL, *nconf = NULL; 90 struct imsgev *iev_ospfe; 91 struct imsgev *iev_main; 92 struct rde_nbr *nbrself; 93 struct lsa_tree asext_tree; 94 95 /* ARGSUSED */ 96 void 97 rde_sig_handler(int sig, short event, void *arg) 98 { 99 /* 100 * signal handler rules don't apply, libevent decouples for us 101 */ 102 103 switch (sig) { 104 case SIGINT: 105 case SIGTERM: 106 rde_shutdown(); 107 /* NOTREACHED */ 108 default: 109 fatalx("unexpected signal"); 110 } 111 } 112 113 /* route decision engine */ 114 pid_t 115 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2], 116 int pipe_parent2ospfe[2]) 117 { 118 struct event ev_sigint, ev_sigterm; 119 struct timeval now; 120 struct passwd *pw; 121 struct redistribute *r; 122 pid_t pid; 123 124 switch (pid = fork()) { 125 case -1: 126 fatal("cannot fork"); 127 /* NOTREACHED */ 128 case 0: 129 break; 130 default: 131 return (pid); 132 } 133 134 rdeconf = xconf; 135 136 if ((pw = getpwnam(OSPF6D_USER)) == NULL) 137 fatal("getpwnam"); 138 139 if (chroot(pw->pw_dir) == -1) 140 fatal("chroot"); 141 if (chdir("/") == -1) 142 fatal("chdir(\"/\")"); 143 144 setproctitle("route decision engine"); 145 ospfd_process = PROC_RDE_ENGINE; 146 147 if (setgroups(1, &pw->pw_gid) || 148 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 149 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 150 fatal("can't drop privileges"); 151 152 event_init(); 153 rde_nbr_init(NBR_HASHSIZE); 154 lsa_init(&asext_tree); 155 156 /* setup signal handler */ 157 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 158 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 159 signal_add(&ev_sigint, NULL); 160 signal_add(&ev_sigterm, NULL); 161 signal(SIGPIPE, SIG_IGN); 162 signal(SIGHUP, SIG_IGN); 163 164 /* setup pipes */ 165 close(pipe_ospfe2rde[0]); 166 close(pipe_parent2rde[0]); 167 close(pipe_parent2ospfe[0]); 168 close(pipe_parent2ospfe[1]); 169 170 if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL || 171 (iev_main = malloc(sizeof(struct imsgev))) == NULL) 172 fatal(NULL); 173 imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]); 174 iev_ospfe->handler = rde_dispatch_imsg; 175 imsg_init(&iev_main->ibuf, pipe_parent2rde[1]); 176 iev_main->handler = rde_dispatch_parent; 177 178 /* setup event handler */ 179 iev_ospfe->events = EV_READ; 180 event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events, 181 iev_ospfe->handler, iev_ospfe); 182 event_add(&iev_ospfe->ev, NULL); 183 184 iev_main->events = EV_READ; 185 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, 186 iev_main->handler, iev_main); 187 event_add(&iev_main->ev, NULL); 188 189 evtimer_set(&rdeconf->ev, spf_timer, rdeconf); 190 cand_list_init(); 191 rt_init(); 192 193 while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { 194 SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); 195 free(r); 196 } 197 198 gettimeofday(&now, NULL); 199 rdeconf->uptime = now.tv_sec; 200 201 event_dispatch(); 202 203 rde_shutdown(); 204 /* NOTREACHED */ 205 206 return (0); 207 } 208 209 void 210 rde_shutdown(void) 211 { 212 struct area *a; 213 214 stop_spf_timer(rdeconf); 215 cand_list_clr(); 216 rt_clear(); 217 218 while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) { 219 LIST_REMOVE(a, entry); 220 area_del(a); 221 } 222 rde_nbr_free(); 223 224 msgbuf_clear(&iev_ospfe->ibuf.w); 225 free(iev_ospfe); 226 msgbuf_clear(&iev_main->ibuf.w); 227 free(iev_main); 228 free(rdeconf); 229 230 log_info("route decision engine exiting"); 231 _exit(0); 232 } 233 234 int 235 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data, 236 u_int16_t datalen) 237 { 238 return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1, 239 data, datalen)); 240 } 241 242 /* ARGSUSED */ 243 void 244 rde_dispatch_imsg(int fd, short event, void *bula) 245 { 246 struct imsgev *iev = bula; 247 struct imsgbuf *ibuf = &iev->ibuf; 248 struct imsg imsg; 249 struct in_addr aid; 250 struct ls_req_hdr req_hdr; 251 struct lsa_hdr lsa_hdr, *db_hdr; 252 struct rde_nbr rn, *nbr; 253 struct timespec tp; 254 struct lsa *lsa; 255 struct area *area; 256 struct vertex *v; 257 char *buf; 258 ssize_t n; 259 time_t now; 260 int r, state, self, shut = 0, verbose; 261 u_int16_t l; 262 263 if (event & EV_READ) { 264 if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) 265 fatal("imsg_read error"); 266 if (n == 0) /* connection closed */ 267 shut = 1; 268 } 269 if (event & EV_WRITE) { 270 if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) 271 fatal("msgbuf_write"); 272 if (n == 0) /* connection closed */ 273 shut = 1; 274 } 275 276 clock_gettime(CLOCK_MONOTONIC, &tp); 277 now = tp.tv_sec; 278 279 for (;;) { 280 if ((n = imsg_get(ibuf, &imsg)) == -1) 281 fatal("rde_dispatch_imsg: imsg_get error"); 282 if (n == 0) 283 break; 284 285 switch (imsg.hdr.type) { 286 case IMSG_NEIGHBOR_UP: 287 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) 288 fatalx("invalid size of OE request"); 289 memcpy(&rn, imsg.data, sizeof(rn)); 290 291 if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL) 292 fatalx("rde_dispatch_imsg: " 293 "neighbor already exists"); 294 break; 295 case IMSG_NEIGHBOR_DOWN: 296 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid)); 297 break; 298 case IMSG_NEIGHBOR_CHANGE: 299 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) 300 fatalx("invalid size of OE request"); 301 memcpy(&state, imsg.data, sizeof(state)); 302 303 nbr = rde_nbr_find(imsg.hdr.peerid); 304 if (nbr == NULL) 305 break; 306 307 if (state != nbr->state && 308 (nbr->state & NBR_STA_FULL || 309 state & NBR_STA_FULL)) { 310 nbr->state = state; 311 area_track(nbr->area, state); 312 orig_intra_area_prefix_lsas(nbr->area); 313 } 314 315 nbr->state = state; 316 if (nbr->state & NBR_STA_FULL) 317 rde_req_list_free(nbr); 318 break; 319 case IMSG_DB_SNAPSHOT: 320 nbr = rde_nbr_find(imsg.hdr.peerid); 321 if (nbr == NULL) 322 break; 323 324 lsa_snap(nbr, imsg.hdr.peerid); 325 326 imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid, 327 0, -1, NULL, 0); 328 break; 329 case IMSG_DD: 330 nbr = rde_nbr_find(imsg.hdr.peerid); 331 if (nbr == NULL) 332 break; 333 334 buf = imsg.data; 335 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 336 l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) { 337 memcpy(&lsa_hdr, buf, sizeof(lsa_hdr)); 338 buf += sizeof(lsa_hdr); 339 340 v = lsa_find(nbr->iface, lsa_hdr.type, 341 lsa_hdr.ls_id, lsa_hdr.adv_rtr); 342 if (v == NULL) 343 db_hdr = NULL; 344 else 345 db_hdr = &v->lsa->hdr; 346 347 if (lsa_newer(&lsa_hdr, db_hdr) > 0) { 348 /* 349 * only request LSAs that are 350 * newer or missing 351 */ 352 rde_req_list_add(nbr, &lsa_hdr); 353 imsg_compose_event(iev_ospfe, IMSG_DD, 354 imsg.hdr.peerid, 0, -1, &lsa_hdr, 355 sizeof(lsa_hdr)); 356 } 357 } 358 if (l != 0) 359 log_warnx("rde_dispatch_imsg: peerid %u, " 360 "trailing garbage in Database Description " 361 "packet", imsg.hdr.peerid); 362 363 imsg_compose_event(iev_ospfe, IMSG_DD_END, 364 imsg.hdr.peerid, 0, -1, NULL, 0); 365 break; 366 case IMSG_LS_REQ: 367 nbr = rde_nbr_find(imsg.hdr.peerid); 368 if (nbr == NULL) 369 break; 370 371 buf = imsg.data; 372 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 373 l >= sizeof(req_hdr); l -= sizeof(req_hdr)) { 374 memcpy(&req_hdr, buf, sizeof(req_hdr)); 375 buf += sizeof(req_hdr); 376 377 if ((v = lsa_find(nbr->iface, 378 req_hdr.type, req_hdr.ls_id, 379 req_hdr.adv_rtr)) == NULL) { 380 imsg_compose_event(iev_ospfe, 381 IMSG_LS_BADREQ, imsg.hdr.peerid, 382 0, -1, NULL, 0); 383 continue; 384 } 385 imsg_compose_event(iev_ospfe, IMSG_LS_UPD, 386 imsg.hdr.peerid, 0, -1, v->lsa, 387 ntohs(v->lsa->hdr.len)); 388 } 389 if (l != 0) 390 log_warnx("rde_dispatch_imsg: peerid %u, " 391 "trailing garbage in LS Request " 392 "packet", imsg.hdr.peerid); 393 break; 394 case IMSG_LS_UPD: 395 nbr = rde_nbr_find(imsg.hdr.peerid); 396 if (nbr == NULL) 397 break; 398 399 lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE); 400 if (lsa == NULL) 401 fatal(NULL); 402 memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 403 404 if (!lsa_check(nbr, lsa, 405 imsg.hdr.len - IMSG_HEADER_SIZE)) { 406 free(lsa); 407 break; 408 } 409 410 v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id, 411 lsa->hdr.adv_rtr); 412 if (v == NULL) 413 db_hdr = NULL; 414 else 415 db_hdr = &v->lsa->hdr; 416 417 if (nbr->self) { 418 lsa_merge(nbr, lsa, v); 419 /* lsa_merge frees the right lsa */ 420 break; 421 } 422 423 r = lsa_newer(&lsa->hdr, db_hdr); 424 if (r > 0) { 425 /* new LSA newer than DB */ 426 if (v && v->flooded && 427 v->changed + MIN_LS_ARRIVAL >= now) { 428 free(lsa); 429 break; 430 } 431 432 rde_req_list_del(nbr, &lsa->hdr); 433 434 self = lsa_self(lsa); 435 if (self) { 436 if (v == NULL) 437 /* LSA is no longer announced, 438 * remove by premature aging. */ 439 lsa_flush(nbr, lsa); 440 else 441 lsa_reflood(v, lsa); 442 } else if (lsa_add(nbr, lsa)) 443 /* delayed lsa, don't flood yet */ 444 break; 445 446 /* flood and perhaps ack LSA */ 447 imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD, 448 imsg.hdr.peerid, 0, -1, lsa, 449 ntohs(lsa->hdr.len)); 450 451 /* reflood self originated LSA */ 452 if (self && v) 453 imsg_compose_event(iev_ospfe, 454 IMSG_LS_FLOOD, v->peerid, 0, -1, 455 v->lsa, ntohs(v->lsa->hdr.len)); 456 /* new LSA was not added so free it */ 457 if (self) 458 free(lsa); 459 } else if (r < 0) { 460 /* 461 * point 6 of "The Flooding Procedure" 462 * We are violating the RFC here because 463 * it does not make sense to reset a session 464 * because an equal LSA is already in the table. 465 * Only if the LSA sent is older than the one 466 * in the table we should reset the session. 467 */ 468 if (rde_req_list_exists(nbr, &lsa->hdr)) { 469 imsg_compose_event(iev_ospfe, 470 IMSG_LS_BADREQ, imsg.hdr.peerid, 471 0, -1, NULL, 0); 472 free(lsa); 473 break; 474 } 475 476 /* lsa no longer needed */ 477 free(lsa); 478 479 /* new LSA older than DB */ 480 if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM && 481 ntohs(db_hdr->age) == MAX_AGE) 482 /* seq-num wrap */ 483 break; 484 485 if (v->changed + MIN_LS_ARRIVAL >= now) 486 break; 487 488 /* directly send current LSA, no ack */ 489 imsg_compose_event(iev_ospfe, IMSG_LS_UPD, 490 imsg.hdr.peerid, 0, -1, v->lsa, 491 ntohs(v->lsa->hdr.len)); 492 } else { 493 /* LSA equal send direct ack */ 494 imsg_compose_event(iev_ospfe, IMSG_LS_ACK, 495 imsg.hdr.peerid, 0, -1, &lsa->hdr, 496 sizeof(lsa->hdr)); 497 free(lsa); 498 } 499 break; 500 case IMSG_LS_MAXAGE: 501 nbr = rde_nbr_find(imsg.hdr.peerid); 502 if (nbr == NULL) 503 break; 504 505 if (imsg.hdr.len != IMSG_HEADER_SIZE + 506 sizeof(struct lsa_hdr)) 507 fatalx("invalid size of OE request"); 508 memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr)); 509 510 if (rde_nbr_loading(nbr->area)) 511 break; 512 513 v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id, 514 lsa_hdr.adv_rtr); 515 if (v == NULL) 516 db_hdr = NULL; 517 else 518 db_hdr = &v->lsa->hdr; 519 520 /* 521 * only delete LSA if the one in the db is not newer 522 */ 523 if (lsa_newer(db_hdr, &lsa_hdr) <= 0) 524 lsa_del(nbr, &lsa_hdr); 525 break; 526 case IMSG_CTL_SHOW_DATABASE: 527 case IMSG_CTL_SHOW_DB_EXT: 528 case IMSG_CTL_SHOW_DB_LINK: 529 case IMSG_CTL_SHOW_DB_NET: 530 case IMSG_CTL_SHOW_DB_RTR: 531 case IMSG_CTL_SHOW_DB_INTRA: 532 case IMSG_CTL_SHOW_DB_SELF: 533 case IMSG_CTL_SHOW_DB_SUM: 534 case IMSG_CTL_SHOW_DB_ASBR: 535 if (imsg.hdr.len != IMSG_HEADER_SIZE && 536 imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) { 537 log_warnx("rde_dispatch_imsg: wrong imsg len"); 538 break; 539 } 540 if (imsg.hdr.len == IMSG_HEADER_SIZE) { 541 LIST_FOREACH(area, &rdeconf->area_list, entry) { 542 rde_dump_area(area, imsg.hdr.type, 543 imsg.hdr.pid); 544 } 545 lsa_dump(&asext_tree, imsg.hdr.type, 546 imsg.hdr.pid); 547 } else { 548 memcpy(&aid, imsg.data, sizeof(aid)); 549 if ((area = area_find(rdeconf, aid)) != NULL) { 550 rde_dump_area(area, imsg.hdr.type, 551 imsg.hdr.pid); 552 if (!area->stub) 553 lsa_dump(&asext_tree, 554 imsg.hdr.type, 555 imsg.hdr.pid); 556 } 557 } 558 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 559 imsg.hdr.pid, -1, NULL, 0); 560 break; 561 case IMSG_CTL_SHOW_RIB: 562 LIST_FOREACH(area, &rdeconf->area_list, entry) { 563 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 564 0, imsg.hdr.pid, -1, area, sizeof(*area)); 565 566 rt_dump(area->id, imsg.hdr.pid, RIB_RTR); 567 rt_dump(area->id, imsg.hdr.pid, RIB_NET); 568 } 569 aid.s_addr = 0; 570 rt_dump(aid, imsg.hdr.pid, RIB_EXT); 571 572 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 573 imsg.hdr.pid, -1, NULL, 0); 574 break; 575 case IMSG_CTL_SHOW_SUM: 576 rde_send_summary(imsg.hdr.pid); 577 LIST_FOREACH(area, &rdeconf->area_list, entry) 578 rde_send_summary_area(area, imsg.hdr.pid); 579 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, 580 imsg.hdr.pid, -1, NULL, 0); 581 break; 582 case IMSG_IFINFO: 583 if (imsg.hdr.len != IMSG_HEADER_SIZE + 584 sizeof(int)) 585 fatalx("IFINFO imsg with wrong len"); 586 587 nbr = rde_nbr_find(imsg.hdr.peerid); 588 if (nbr == NULL) 589 fatalx("IFINFO imsg with bad peerid"); 590 memcpy(&nbr->iface->state, imsg.data, sizeof(int)); 591 592 /* Resend LSAs if interface state changes. */ 593 orig_intra_area_prefix_lsas(nbr->area); 594 break; 595 case IMSG_CTL_LOG_VERBOSE: 596 /* already checked by ospfe */ 597 memcpy(&verbose, imsg.data, sizeof(verbose)); 598 log_verbose(verbose); 599 break; 600 default: 601 log_debug("rde_dispatch_imsg: unexpected imsg %d", 602 imsg.hdr.type); 603 break; 604 } 605 imsg_free(&imsg); 606 } 607 if (!shut) 608 imsg_event_add(iev); 609 else { 610 /* this pipe is dead, so remove the event handler */ 611 event_del(&iev->ev); 612 event_loopexit(NULL); 613 } 614 } 615 616 /* ARGSUSED */ 617 void 618 rde_dispatch_parent(int fd, short event, void *bula) 619 { 620 static struct area *narea; 621 struct area *area; 622 struct iface *iface, *ifp; 623 struct ifaddrchange *ifc; 624 struct iface_addr *ia, *nia; 625 struct imsg imsg; 626 struct kroute kr; 627 struct rroute rr; 628 struct imsgev *iev = bula; 629 struct imsgbuf *ibuf = &iev->ibuf; 630 struct lsa *lsa; 631 struct vertex *v; 632 struct rt_node *rn; 633 ssize_t n; 634 int shut = 0, wasvalid; 635 unsigned int ifindex; 636 637 if (event & EV_READ) { 638 if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) 639 fatal("imsg_read error"); 640 if (n == 0) /* connection closed */ 641 shut = 1; 642 } 643 if (event & EV_WRITE) { 644 if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) 645 fatal("msgbuf_write"); 646 if (n == 0) /* connection closed */ 647 shut = 1; 648 } 649 650 for (;;) { 651 if ((n = imsg_get(ibuf, &imsg)) == -1) 652 fatal("rde_dispatch_parent: imsg_get error"); 653 if (n == 0) 654 break; 655 656 switch (imsg.hdr.type) { 657 case IMSG_NETWORK_ADD: 658 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 659 log_warnx("rde_dispatch_parent: " 660 "wrong imsg len"); 661 break; 662 } 663 memcpy(&rr, imsg.data, sizeof(rr)); 664 665 if ((lsa = rde_asext_get(&rr)) != NULL) { 666 v = lsa_find(NULL, lsa->hdr.type, 667 lsa->hdr.ls_id, lsa->hdr.adv_rtr); 668 669 lsa_merge(nbrself, lsa, v); 670 } 671 break; 672 case IMSG_NETWORK_DEL: 673 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 674 log_warnx("rde_dispatch_parent: " 675 "wrong imsg len"); 676 break; 677 } 678 memcpy(&rr, imsg.data, sizeof(rr)); 679 680 if ((lsa = rde_asext_put(&rr)) != NULL) { 681 v = lsa_find(NULL, lsa->hdr.type, 682 lsa->hdr.ls_id, lsa->hdr.adv_rtr); 683 684 /* 685 * if v == NULL no LSA is in the table and 686 * nothing has to be done. 687 */ 688 if (v) 689 lsa_merge(nbrself, lsa, v); 690 else 691 free(lsa); 692 } 693 break; 694 case IMSG_KROUTE_GET: 695 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { 696 log_warnx("rde_dispatch_parent: " 697 "wrong imsg len"); 698 break; 699 } 700 memcpy(&kr, imsg.data, sizeof(kr)); 701 702 if ((rn = rt_find(&kr.prefix, kr.prefixlen, 703 DT_NET)) != NULL) 704 rde_send_change_kroute(rn); 705 else 706 /* should not happen */ 707 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 708 0, 0, -1, &kr, sizeof(kr)); 709 break; 710 case IMSG_IFINFO: 711 if (imsg.hdr.len != IMSG_HEADER_SIZE + 712 sizeof(struct iface)) 713 fatalx("IFINFO imsg with wrong len"); 714 715 ifp = imsg.data; 716 iface = if_find(ifp->ifindex); 717 if (iface == NULL) 718 fatalx("interface lost in rde"); 719 720 wasvalid = (iface->flags & IFF_UP) && 721 LINK_STATE_IS_UP(iface->linkstate); 722 723 if_update(iface, ifp->mtu, ifp->flags, ifp->if_type, 724 ifp->linkstate, ifp->baudrate); 725 726 /* Resend LSAs if interface state changes. */ 727 if (wasvalid != (iface->flags & IFF_UP) && 728 LINK_STATE_IS_UP(iface->linkstate)) { 729 area = area_find(rdeconf, iface->area_id); 730 if (!area) 731 fatalx("interface lost area"); 732 orig_intra_area_prefix_lsas(area); 733 } 734 break; 735 case IMSG_IFADD: 736 if ((iface = malloc(sizeof(struct iface))) == NULL) 737 fatal(NULL); 738 memcpy(iface, imsg.data, sizeof(struct iface)); 739 740 LIST_INIT(&iface->nbr_list); 741 TAILQ_INIT(&iface->ls_ack_list); 742 RB_INIT(&iface->lsa_tree); 743 744 area = area_find(rdeconf, iface->area_id); 745 LIST_INSERT_HEAD(&area->iface_list, iface, entry); 746 break; 747 case IMSG_IFDELETE: 748 if (imsg.hdr.len != IMSG_HEADER_SIZE + 749 sizeof(ifindex)) 750 fatalx("IFDELETE imsg with wrong len"); 751 752 memcpy(&ifindex, imsg.data, sizeof(ifindex)); 753 iface = if_find(ifindex); 754 if (iface == NULL) 755 fatalx("interface lost in rde"); 756 757 LIST_REMOVE(iface, entry); 758 if_del(iface); 759 break; 760 case IMSG_IFADDRNEW: 761 if (imsg.hdr.len != IMSG_HEADER_SIZE + 762 sizeof(struct ifaddrchange)) 763 fatalx("IFADDRNEW imsg with wrong len"); 764 ifc = imsg.data; 765 766 iface = if_find(ifc->ifindex); 767 if (iface == NULL) 768 fatalx("IFADDRNEW interface lost in rde"); 769 770 if ((ia = calloc(1, sizeof(struct iface_addr))) == 771 NULL) 772 fatal("rde_dispatch_parent IFADDRNEW"); 773 ia->addr = ifc->addr; 774 ia->dstbrd = ifc->dstbrd; 775 ia->prefixlen = ifc->prefixlen; 776 777 TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry); 778 area = area_find(rdeconf, iface->area_id); 779 if (area) 780 orig_intra_area_prefix_lsas(area); 781 break; 782 case IMSG_IFADDRDEL: 783 if (imsg.hdr.len != IMSG_HEADER_SIZE + 784 sizeof(struct ifaddrchange)) 785 fatalx("IFADDRDEL imsg with wrong len"); 786 ifc = imsg.data; 787 788 iface = if_find(ifc->ifindex); 789 if (iface == NULL) 790 fatalx("IFADDRDEL interface lost in rde"); 791 792 for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL; 793 ia = nia) { 794 nia = TAILQ_NEXT(ia, entry); 795 796 if (IN6_ARE_ADDR_EQUAL(&ia->addr, 797 &ifc->addr)) { 798 TAILQ_REMOVE(&iface->ifa_list, ia, 799 entry); 800 free(ia); 801 break; 802 } 803 } 804 area = area_find(rdeconf, iface->area_id); 805 if (area) 806 orig_intra_area_prefix_lsas(area); 807 break; 808 case IMSG_RECONF_CONF: 809 if ((nconf = malloc(sizeof(struct ospfd_conf))) == 810 NULL) 811 fatal(NULL); 812 memcpy(nconf, imsg.data, sizeof(struct ospfd_conf)); 813 814 LIST_INIT(&nconf->area_list); 815 LIST_INIT(&nconf->cand_list); 816 break; 817 case IMSG_RECONF_AREA: 818 if ((narea = area_new()) == NULL) 819 fatal(NULL); 820 memcpy(narea, imsg.data, sizeof(struct area)); 821 822 LIST_INIT(&narea->iface_list); 823 LIST_INIT(&narea->nbr_list); 824 RB_INIT(&narea->lsa_tree); 825 826 LIST_INSERT_HEAD(&nconf->area_list, narea, entry); 827 break; 828 case IMSG_RECONF_END: 829 merge_config(rdeconf, nconf); 830 nconf = NULL; 831 break; 832 default: 833 log_debug("rde_dispatch_parent: unexpected imsg %d", 834 imsg.hdr.type); 835 break; 836 } 837 imsg_free(&imsg); 838 } 839 if (!shut) 840 imsg_event_add(iev); 841 else { 842 /* this pipe is dead, so remove the event handler */ 843 event_del(&iev->ev); 844 event_loopexit(NULL); 845 } 846 } 847 848 void 849 rde_dump_area(struct area *area, int imsg_type, pid_t pid) 850 { 851 struct iface *iface; 852 853 /* dump header */ 854 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1, 855 area, sizeof(*area)); 856 857 /* dump link local lsa */ 858 LIST_FOREACH(iface, &area->iface_list, entry) { 859 imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE, 860 0, pid, -1, iface, sizeof(*iface)); 861 lsa_dump(&iface->lsa_tree, imsg_type, pid); 862 } 863 864 /* dump area lsa */ 865 lsa_dump(&area->lsa_tree, imsg_type, pid); 866 } 867 868 u_int32_t 869 rde_router_id(void) 870 { 871 return (rdeconf->rtr_id.s_addr); 872 } 873 874 void 875 rde_send_change_kroute(struct rt_node *r) 876 { 877 struct kroute kr; 878 struct rt_nexthop *rn; 879 880 TAILQ_FOREACH(rn, &r->nexthop, entry) { 881 if (!rn->invalid) 882 break; 883 } 884 if (!rn) 885 fatalx("rde_send_change_kroute: no valid nexthop found"); 886 887 bzero(&kr, sizeof(kr)); 888 kr.prefix = r->prefix; 889 kr.nexthop = rn->nexthop; 890 if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) || 891 IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop)) 892 kr.scope = rn->ifindex; 893 kr.ifindex = rn->ifindex; 894 kr.prefixlen = r->prefixlen; 895 kr.ext_tag = r->ext_tag; 896 897 imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1, 898 &kr, sizeof(kr)); 899 } 900 901 void 902 rde_send_delete_kroute(struct rt_node *r) 903 { 904 struct kroute kr; 905 906 bzero(&kr, sizeof(kr)); 907 kr.prefix = r->prefix; 908 kr.prefixlen = r->prefixlen; 909 910 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, 911 &kr, sizeof(kr)); 912 } 913 914 void 915 rde_send_summary(pid_t pid) 916 { 917 static struct ctl_sum sumctl; 918 struct timeval now; 919 struct area *area; 920 struct vertex *v; 921 922 bzero(&sumctl, sizeof(struct ctl_sum)); 923 924 sumctl.rtr_id.s_addr = rde_router_id(); 925 sumctl.spf_delay = rdeconf->spf_delay; 926 sumctl.spf_hold_time = rdeconf->spf_hold_time; 927 928 LIST_FOREACH(area, &rdeconf->area_list, entry) 929 sumctl.num_area++; 930 931 RB_FOREACH(v, lsa_tree, &asext_tree) 932 sumctl.num_ext_lsa++; 933 934 gettimeofday(&now, NULL); 935 if (rdeconf->uptime < now.tv_sec) 936 sumctl.uptime = now.tv_sec - rdeconf->uptime; 937 else 938 sumctl.uptime = 0; 939 940 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl, 941 sizeof(sumctl)); 942 } 943 944 void 945 rde_send_summary_area(struct area *area, pid_t pid) 946 { 947 static struct ctl_sum_area sumareactl; 948 struct iface *iface; 949 struct rde_nbr *nbr; 950 struct lsa_tree *tree = &area->lsa_tree; 951 struct vertex *v; 952 953 bzero(&sumareactl, sizeof(struct ctl_sum_area)); 954 955 sumareactl.area.s_addr = area->id.s_addr; 956 sumareactl.num_spf_calc = area->num_spf_calc; 957 958 LIST_FOREACH(iface, &area->iface_list, entry) 959 sumareactl.num_iface++; 960 961 LIST_FOREACH(nbr, &area->nbr_list, entry) 962 if (nbr->state == NBR_STA_FULL && !nbr->self) 963 sumareactl.num_adj_nbr++; 964 965 RB_FOREACH(v, lsa_tree, tree) 966 sumareactl.num_lsa++; 967 968 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl, 969 sizeof(sumareactl)); 970 } 971 972 LIST_HEAD(rde_nbr_head, rde_nbr); 973 974 struct nbr_table { 975 struct rde_nbr_head *hashtbl; 976 u_int32_t hashmask; 977 } rdenbrtable; 978 979 #define RDE_NBR_HASH(x) \ 980 &rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] 981 982 void 983 rde_nbr_init(u_int32_t hashsize) 984 { 985 struct rde_nbr_head *head; 986 u_int32_t hs, i; 987 988 for (hs = 1; hs < hashsize; hs <<= 1) 989 ; 990 rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head)); 991 if (rdenbrtable.hashtbl == NULL) 992 fatal("rde_nbr_init"); 993 994 for (i = 0; i < hs; i++) 995 LIST_INIT(&rdenbrtable.hashtbl[i]); 996 997 rdenbrtable.hashmask = hs - 1; 998 999 if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL) 1000 fatal("rde_nbr_init"); 1001 1002 nbrself->id.s_addr = rde_router_id(); 1003 nbrself->peerid = NBR_IDSELF; 1004 nbrself->state = NBR_STA_DOWN; 1005 nbrself->self = 1; 1006 head = RDE_NBR_HASH(NBR_IDSELF); 1007 LIST_INSERT_HEAD(head, nbrself, hash); 1008 } 1009 1010 void 1011 rde_nbr_free(void) 1012 { 1013 free(nbrself); 1014 free(rdenbrtable.hashtbl); 1015 } 1016 1017 struct rde_nbr * 1018 rde_nbr_find(u_int32_t peerid) 1019 { 1020 struct rde_nbr_head *head; 1021 struct rde_nbr *nbr; 1022 1023 head = RDE_NBR_HASH(peerid); 1024 1025 LIST_FOREACH(nbr, head, hash) { 1026 if (nbr->peerid == peerid) 1027 return (nbr); 1028 } 1029 1030 return (NULL); 1031 } 1032 1033 struct rde_nbr * 1034 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new) 1035 { 1036 struct rde_nbr_head *head; 1037 struct rde_nbr *nbr; 1038 struct area *area; 1039 struct iface *iface; 1040 1041 if (rde_nbr_find(peerid)) 1042 return (NULL); 1043 if ((area = area_find(rdeconf, new->area_id)) == NULL) 1044 fatalx("rde_nbr_new: unknown area"); 1045 1046 if ((iface = if_find(new->ifindex)) == NULL) 1047 fatalx("rde_nbr_new: unknown interface"); 1048 1049 if ((nbr = calloc(1, sizeof(*nbr))) == NULL) 1050 fatal("rde_nbr_new"); 1051 1052 memcpy(nbr, new, sizeof(*nbr)); 1053 nbr->peerid = peerid; 1054 nbr->area = area; 1055 nbr->iface = iface; 1056 1057 TAILQ_INIT(&nbr->req_list); 1058 1059 head = RDE_NBR_HASH(peerid); 1060 LIST_INSERT_HEAD(head, nbr, hash); 1061 LIST_INSERT_HEAD(&area->nbr_list, nbr, entry); 1062 1063 return (nbr); 1064 } 1065 1066 void 1067 rde_nbr_del(struct rde_nbr *nbr) 1068 { 1069 if (nbr == NULL) 1070 return; 1071 1072 rde_req_list_free(nbr); 1073 1074 LIST_REMOVE(nbr, entry); 1075 LIST_REMOVE(nbr, hash); 1076 1077 free(nbr); 1078 } 1079 1080 int 1081 rde_nbr_loading(struct area *area) 1082 { 1083 struct rde_nbr *nbr; 1084 int checkall = 0; 1085 1086 if (area == NULL) { 1087 area = LIST_FIRST(&rdeconf->area_list); 1088 checkall = 1; 1089 } 1090 1091 while (area != NULL) { 1092 LIST_FOREACH(nbr, &area->nbr_list, entry) { 1093 if (nbr->self) 1094 continue; 1095 if (nbr->state & NBR_STA_XCHNG || 1096 nbr->state & NBR_STA_LOAD) 1097 return (1); 1098 } 1099 if (!checkall) 1100 break; 1101 area = LIST_NEXT(area, entry); 1102 } 1103 1104 return (0); 1105 } 1106 1107 struct rde_nbr * 1108 rde_nbr_self(struct area *area) 1109 { 1110 struct rde_nbr *nbr; 1111 1112 LIST_FOREACH(nbr, &area->nbr_list, entry) 1113 if (nbr->self) 1114 return (nbr); 1115 1116 /* this may not happen */ 1117 fatalx("rde_nbr_self: area without self"); 1118 return (NULL); 1119 } 1120 1121 /* 1122 * LSA req list 1123 */ 1124 void 1125 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa) 1126 { 1127 struct rde_req_entry *le; 1128 1129 if ((le = calloc(1, sizeof(*le))) == NULL) 1130 fatal("rde_req_list_add"); 1131 1132 TAILQ_INSERT_TAIL(&nbr->req_list, le, entry); 1133 le->type = lsa->type; 1134 le->ls_id = lsa->ls_id; 1135 le->adv_rtr = lsa->adv_rtr; 1136 } 1137 1138 int 1139 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1140 { 1141 struct rde_req_entry *le; 1142 1143 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1144 if ((lsa_hdr->type == le->type) && 1145 (lsa_hdr->ls_id == le->ls_id) && 1146 (lsa_hdr->adv_rtr == le->adv_rtr)) 1147 return (1); 1148 } 1149 return (0); 1150 } 1151 1152 void 1153 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1154 { 1155 struct rde_req_entry *le; 1156 1157 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1158 if ((lsa_hdr->type == le->type) && 1159 (lsa_hdr->ls_id == le->ls_id) && 1160 (lsa_hdr->adv_rtr == le->adv_rtr)) { 1161 TAILQ_REMOVE(&nbr->req_list, le, entry); 1162 free(le); 1163 return; 1164 } 1165 } 1166 } 1167 1168 void 1169 rde_req_list_free(struct rde_nbr *nbr) 1170 { 1171 struct rde_req_entry *le; 1172 1173 while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) { 1174 TAILQ_REMOVE(&nbr->req_list, le, entry); 1175 free(le); 1176 } 1177 } 1178 1179 /* 1180 * as-external LSA handling 1181 */ 1182 struct lsa * 1183 rde_asext_get(struct rroute *rr) 1184 { 1185 struct area *area; 1186 struct iface *iface; 1187 struct iface_addr *ia; 1188 struct in6_addr addr; 1189 1190 LIST_FOREACH(area, &rdeconf->area_list, entry) 1191 LIST_FOREACH(iface, &area->iface_list, entry) 1192 TAILQ_FOREACH(ia, &iface->ifa_list, entry) { 1193 if (IN6_IS_ADDR_LINKLOCAL(&ia->addr)) 1194 continue; 1195 1196 inet6applymask(&addr, &ia->addr, 1197 rr->kr.prefixlen); 1198 if (!memcmp(&addr, &rr->kr.prefix, 1199 sizeof(addr)) && rr->kr.prefixlen == 1200 ia->prefixlen) { 1201 /* already announced as Prefix LSA */ 1202 log_debug("rde_asext_get: %s/%d is " 1203 "part of prefix LSA", 1204 log_in6addr(&rr->kr.prefix), 1205 rr->kr.prefixlen); 1206 return (NULL); 1207 } 1208 } 1209 1210 /* update of seqnum is done by lsa_merge */ 1211 return (orig_asext_lsa(rr, DEFAULT_AGE)); 1212 } 1213 1214 struct lsa * 1215 rde_asext_put(struct rroute *rr) 1216 { 1217 /* 1218 * just try to remove the LSA. If the prefix is announced as 1219 * stub net LSA lsa_find() will fail later and nothing will happen. 1220 */ 1221 1222 /* remove by reflooding with MAX_AGE */ 1223 return (orig_asext_lsa(rr, MAX_AGE)); 1224 } 1225 1226 /* 1227 * summary LSA stuff 1228 */ 1229 void 1230 rde_summary_update(struct rt_node *rte, struct area *area) 1231 { 1232 struct vertex *v = NULL; 1233 //XXX struct lsa *lsa; 1234 u_int16_t type = 0; 1235 1236 /* first check if we actually need to announce this route */ 1237 if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E)) 1238 return; 1239 /* never create summaries for as-ext LSA */ 1240 if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT) 1241 return; 1242 /* no need for summary LSA in the originating area */ 1243 if (rte->area.s_addr == area->id.s_addr) 1244 return; 1245 /* no need to originate inter-area routes to the backbone */ 1246 if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY) 1247 return; 1248 /* TODO nexthop check, nexthop part of area -> no summary */ 1249 if (rte->cost >= LS_INFINITY) 1250 return; 1251 /* TODO AS border router specific checks */ 1252 /* TODO inter-area network route stuff */ 1253 /* TODO intra-area stuff -- condense LSA ??? */ 1254 1255 if (rte->d_type == DT_NET) { 1256 type = LSA_TYPE_INTER_A_PREFIX; 1257 } else if (rte->d_type == DT_RTR) { 1258 type = LSA_TYPE_INTER_A_ROUTER; 1259 } else 1260 1261 #if 0 /* XXX a lot todo */ 1262 /* update lsa but only if it was changed */ 1263 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id()); 1264 lsa = orig_sum_lsa(rte, area, type, rte->invalid); 1265 lsa_merge(rde_nbr_self(area), lsa, v); 1266 1267 if (v == NULL) 1268 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id()); 1269 #endif 1270 1271 /* suppressed/deleted routes are not found in the second lsa_find */ 1272 if (v) 1273 v->cost = rte->cost; 1274 } 1275 1276 /* 1277 * Functions for self-originated LSAs 1278 */ 1279 1280 /* Prefix LSAs have variable size. We have to be careful to copy the right 1281 * amount of bytes, and to realloc() the right amount of memory. */ 1282 void 1283 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix) 1284 { 1285 struct lsa_prefix *copy; 1286 unsigned int lsa_prefix_len; 1287 unsigned int new_len; 1288 char *new_lsa; 1289 1290 lsa_prefix_len = sizeof(struct lsa_prefix) 1291 + LSA_PREFIXSIZE(prefix->prefixlen); 1292 1293 new_len = *len + lsa_prefix_len; 1294 1295 /* Make sure we have enough space for this prefix. */ 1296 if ((new_lsa = realloc(*lsa, new_len)) == NULL) 1297 fatalx("append_prefix_lsa"); 1298 1299 /* Append prefix to LSA. */ 1300 copy = (struct lsa_prefix *)(new_lsa + *len); 1301 memcpy(copy, prefix, lsa_prefix_len); 1302 copy->metric = 0; 1303 1304 *lsa = (struct lsa *)new_lsa; 1305 *len = new_len; 1306 } 1307 1308 int 1309 prefix_compare(struct prefix_node *a, struct prefix_node *b) 1310 { 1311 struct lsa_prefix *p; 1312 struct lsa_prefix *q; 1313 int i; 1314 int len; 1315 1316 p = a->prefix; 1317 q = b->prefix; 1318 1319 len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen)); 1320 1321 i = memcmp(p + 1, q + 1, len); 1322 if (i) 1323 return (i); 1324 if (p->prefixlen < q->prefixlen) 1325 return (-1); 1326 if (p->prefixlen > q->prefixlen) 1327 return (1); 1328 return (0); 1329 } 1330 1331 void 1332 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa) 1333 { 1334 struct prefix_node *old; 1335 struct prefix_node *new; 1336 struct in6_addr addr; 1337 unsigned int len; 1338 unsigned int i; 1339 char *cur_prefix; 1340 1341 cur_prefix = (char *)(lsa + 1); 1342 1343 for (i = 0; i < ntohl(lsa->numprefix); i++) { 1344 if ((new = calloc(1, sizeof(*new))) == NULL) 1345 fatal("prefix_tree_add"); 1346 new->prefix = (struct lsa_prefix *)cur_prefix; 1347 1348 len = sizeof(*new->prefix) 1349 + LSA_PREFIXSIZE(new->prefix->prefixlen); 1350 1351 bzero(&addr, sizeof(addr)); 1352 memcpy(&addr, new->prefix + 1, 1353 LSA_PREFIXSIZE(new->prefix->prefixlen)); 1354 1355 if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) && 1356 (new->prefix->options & OSPF_PREFIX_NU) == 0 && 1357 (new->prefix->options & OSPF_PREFIX_LA) == 0) { 1358 old = RB_INSERT(prefix_tree, tree, new); 1359 if (old != NULL) { 1360 old->prefix->options |= new->prefix->options; 1361 free(new); 1362 } 1363 } 1364 1365 cur_prefix = cur_prefix + len; 1366 } 1367 } 1368 1369 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare) 1370 1371 struct lsa * 1372 orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old) 1373 { 1374 struct lsa *lsa; 1375 struct vertex *v; 1376 struct rde_nbr *nbr; 1377 struct prefix_node *node; 1378 struct prefix_tree tree; 1379 int num_full_nbr; 1380 u_int16_t len; 1381 u_int16_t numprefix; 1382 1383 log_debug("orig_intra_lsa_net: area %s, interface %s", 1384 inet_ntoa(area->id), iface->name); 1385 1386 RB_INIT(&tree); 1387 1388 if (iface->state & IF_STA_DR) { 1389 num_full_nbr = 0; 1390 LIST_FOREACH(nbr, &area->nbr_list, entry) { 1391 if (nbr->self || 1392 nbr->iface->ifindex != iface->ifindex || 1393 (nbr->state & NBR_STA_FULL) == 0) 1394 continue; 1395 num_full_nbr++; 1396 v = lsa_find(iface, htons(LSA_TYPE_LINK), 1397 htonl(nbr->iface_id), nbr->id.s_addr); 1398 if (v) 1399 prefix_tree_add(&tree, &v->lsa->data.link); 1400 } 1401 if (num_full_nbr == 0) { 1402 /* There are no adjacent neighbors on link. 1403 * If a copy of this LSA already exists in DB, 1404 * it needs to be flushed. orig_intra_lsa_rtr() 1405 * will take care of prefixes configured on 1406 * this interface. */ 1407 if (!old) 1408 return NULL; 1409 } else { 1410 /* Add our own prefixes configured for this link. */ 1411 v = lsa_find(iface, htons(LSA_TYPE_LINK), 1412 htonl(iface->ifindex), rde_router_id()); 1413 if (v) 1414 prefix_tree_add(&tree, &v->lsa->data.link); 1415 } 1416 /* Continue only if a copy of this LSA already exists in DB. 1417 * It needs to be flushed. */ 1418 } else if (!old) 1419 return NULL; 1420 1421 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix); 1422 if ((lsa = calloc(1, len)) == NULL) 1423 fatal("orig_intra_lsa_net"); 1424 1425 lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK); 1426 lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex); 1427 lsa->data.pref_intra.ref_adv_rtr = rde_router_id(); 1428 1429 numprefix = 0; 1430 RB_FOREACH(node, prefix_tree, &tree) { 1431 append_prefix_lsa(&lsa, &len, node->prefix); 1432 numprefix++; 1433 } 1434 1435 lsa->data.pref_intra.numprefix = htons(numprefix); 1436 1437 while (!RB_EMPTY(&tree)) 1438 free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree))); 1439 1440 /* LSA header */ 1441 /* If numprefix is zero, originate with MAX_AGE to flush LSA. */ 1442 lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE); 1443 lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX); 1444 lsa->hdr.ls_id = htonl(iface->ifindex); 1445 lsa->hdr.adv_rtr = rde_router_id(); 1446 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1447 lsa->hdr.len = htons(len); 1448 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1449 1450 return lsa; 1451 } 1452 1453 struct lsa * 1454 orig_intra_lsa_rtr(struct area *area, struct vertex *old) 1455 { 1456 char lsa_prefix_buf[sizeof(struct lsa_prefix) 1457 + sizeof(struct in6_addr)]; 1458 struct lsa *lsa; 1459 struct lsa_prefix *lsa_prefix; 1460 struct in6_addr *prefix; 1461 struct iface *iface; 1462 struct iface_addr *ia; 1463 struct rde_nbr *nbr; 1464 u_int16_t len; 1465 u_int16_t numprefix; 1466 1467 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix); 1468 if ((lsa = calloc(1, len)) == NULL) 1469 fatal("orig_intra_lsa_rtr"); 1470 1471 lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER); 1472 lsa->data.pref_intra.ref_ls_id = 0; 1473 lsa->data.pref_intra.ref_adv_rtr = rde_router_id(); 1474 1475 numprefix = 0; 1476 LIST_FOREACH(iface, &area->iface_list, entry) { 1477 if (!((iface->flags & IFF_UP) && 1478 LINK_STATE_IS_UP(iface->linkstate))) 1479 /* interface or link state down */ 1480 continue; 1481 if ((iface->state & IF_STA_DOWN) && 1482 !(iface->cflags & F_IFACE_PASSIVE)) 1483 /* passive interfaces stay in state DOWN */ 1484 continue; 1485 1486 /* Broadcast links with adjacencies are handled 1487 * by orig_intra_lsa_net(), ignore. */ 1488 if (iface->type == IF_TYPE_BROADCAST || 1489 iface->type == IF_TYPE_NBMA) { 1490 if (iface->state & IF_STA_WAITING) 1491 /* Skip, we're still waiting for 1492 * adjacencies to form. */ 1493 continue; 1494 1495 LIST_FOREACH(nbr, &area->nbr_list, entry) 1496 if (!nbr->self && 1497 nbr->iface->ifindex == iface->ifindex && 1498 nbr->state & NBR_STA_FULL) 1499 break; 1500 if (nbr) 1501 continue; 1502 } 1503 1504 lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf; 1505 1506 TAILQ_FOREACH(ia, &iface->ifa_list, entry) { 1507 if (IN6_IS_ADDR_LINKLOCAL(&ia->addr)) 1508 continue; 1509 1510 bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf)); 1511 1512 if (iface->type == IF_TYPE_POINTOMULTIPOINT || 1513 iface->state & IF_STA_LOOPBACK) { 1514 lsa_prefix->prefixlen = 128; 1515 } else { 1516 lsa_prefix->prefixlen = ia->prefixlen; 1517 lsa_prefix->metric = htons(iface->metric); 1518 } 1519 1520 if (lsa_prefix->prefixlen == 128) 1521 lsa_prefix->options |= OSPF_PREFIX_LA; 1522 1523 log_debug("orig_intra_lsa_rtr: area %s, interface %s: " 1524 "%s/%d", inet_ntoa(area->id), 1525 iface->name, log_in6addr(&ia->addr), 1526 lsa_prefix->prefixlen); 1527 1528 prefix = (struct in6_addr *)(lsa_prefix + 1); 1529 inet6applymask(prefix, &ia->addr, 1530 lsa_prefix->prefixlen); 1531 append_prefix_lsa(&lsa, &len, lsa_prefix); 1532 numprefix++; 1533 } 1534 1535 /* TOD: Add prefixes of directly attached hosts, too */ 1536 /* TOD: Add prefixes for virtual links */ 1537 } 1538 1539 /* If no prefixes were included, continue only if a copy of this 1540 * LSA already exists in DB. It needs to be flushed. */ 1541 if (numprefix == 0 && !old) { 1542 free(lsa); 1543 return NULL; 1544 } 1545 1546 lsa->data.pref_intra.numprefix = htons(numprefix); 1547 1548 /* LSA header */ 1549 /* If numprefix is zero, originate with MAX_AGE to flush LSA. */ 1550 lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE); 1551 lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX); 1552 lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR); 1553 lsa->hdr.adv_rtr = rde_router_id(); 1554 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1555 lsa->hdr.len = htons(len); 1556 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1557 1558 return lsa; 1559 } 1560 1561 void 1562 orig_intra_area_prefix_lsas(struct area *area) 1563 { 1564 struct lsa *lsa; 1565 struct vertex *old; 1566 struct iface *iface; 1567 1568 LIST_FOREACH(iface, &area->iface_list, entry) { 1569 if (iface->type == IF_TYPE_BROADCAST || 1570 iface->type == IF_TYPE_NBMA) { 1571 old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX), 1572 htonl(iface->ifindex), rde_router_id()); 1573 lsa = orig_intra_lsa_net(area, iface, old); 1574 if (lsa) 1575 lsa_merge(rde_nbr_self(area), lsa, old); 1576 } 1577 } 1578 1579 old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX), 1580 htonl(LS_ID_INTRA_RTR), rde_router_id()); 1581 lsa = orig_intra_lsa_rtr(area, old); 1582 if (lsa) 1583 lsa_merge(rde_nbr_self(area), lsa, old); 1584 } 1585 1586 int 1587 comp_asext(struct lsa *a, struct lsa *b) 1588 { 1589 /* compare prefixes, if they are equal or not */ 1590 if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen) 1591 return (-1); 1592 return (memcmp( 1593 (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext), 1594 (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext), 1595 LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen))); 1596 } 1597 1598 struct lsa * 1599 orig_asext_lsa(struct rroute *rr, u_int16_t age) 1600 { 1601 struct lsa *lsa; 1602 u_int32_t ext_tag; 1603 u_int16_t len, ext_off; 1604 1605 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) + 1606 LSA_PREFIXSIZE(rr->kr.prefixlen); 1607 1608 /* 1609 * nexthop -- on connected routes we are the nexthop, 1610 * on all other cases we should announce the true nexthop 1611 * unless that nexthop is outside of the ospf cloud. 1612 * XXX for now we don't do this. 1613 */ 1614 1615 ext_off = len; 1616 if (rr->kr.ext_tag) { 1617 len += sizeof(ext_tag); 1618 } 1619 if ((lsa = calloc(1, len)) == NULL) 1620 fatal("orig_asext_lsa"); 1621 1622 log_debug("orig_asext_lsa: %s/%d age %d", 1623 log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age); 1624 1625 /* LSA header */ 1626 lsa->hdr.age = htons(age); 1627 lsa->hdr.type = htons(LSA_TYPE_EXTERNAL); 1628 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1629 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1630 lsa->hdr.len = htons(len); 1631 1632 lsa->data.asext.prefix.prefixlen = rr->kr.prefixlen; 1633 memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext), 1634 &rr->kr.prefix, LSA_PREFIXSIZE(rr->kr.prefixlen)); 1635 1636 lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, lsa->hdr.type, 1637 lsa->hdr.adv_rtr, comp_asext, lsa); 1638 1639 if (age == MAX_AGE) { 1640 /* inherit metric and ext_tag from the current LSA, 1641 * some routers don't like to get withdraws that are 1642 * different from what they have in their table. 1643 */ 1644 struct vertex *v; 1645 v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id, 1646 lsa->hdr.adv_rtr); 1647 if (v != NULL) { 1648 rr->metric = ntohl(v->lsa->data.asext.metric); 1649 if (rr->metric & LSA_ASEXT_T_FLAG) { 1650 memcpy(&ext_tag, (char *)v->lsa + ext_off, 1651 sizeof(ext_tag)); 1652 rr->kr.ext_tag = ntohl(ext_tag); 1653 } 1654 rr->metric &= LSA_METRIC_MASK; 1655 } 1656 } 1657 1658 if (rr->kr.ext_tag) { 1659 lsa->data.asext.metric = htonl(rr->metric | LSA_ASEXT_T_FLAG); 1660 ext_tag = htonl(rr->kr.ext_tag); 1661 memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag)); 1662 } else { 1663 lsa->data.asext.metric = htonl(rr->metric); 1664 } 1665 1666 lsa->hdr.ls_chksum = 0; 1667 lsa->hdr.ls_chksum = 1668 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1669 1670 return (lsa); 1671 } 1672 1673 struct lsa * 1674 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid) 1675 { 1676 #if 0 /* XXX a lot todo */ 1677 struct lsa *lsa; 1678 u_int16_t len; 1679 1680 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum); 1681 if ((lsa = calloc(1, len)) == NULL) 1682 fatal("orig_sum_lsa"); 1683 1684 /* LSA header */ 1685 lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE); 1686 lsa->hdr.type = type; 1687 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1688 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1689 lsa->hdr.len = htons(len); 1690 1691 /* prefix and mask */ 1692 /* 1693 * TODO ls_id must be unique, for overlapping routes this may 1694 * not be true. In this case a hack needs to be done to 1695 * make the ls_id unique. 1696 */ 1697 lsa->hdr.ls_id = rte->prefix.s_addr; 1698 if (type == LSA_TYPE_SUM_NETWORK) 1699 lsa->data.sum.mask = prefixlen2mask(rte->prefixlen); 1700 else 1701 lsa->data.sum.mask = 0; /* must be zero per RFC */ 1702 1703 lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK); 1704 1705 lsa->hdr.ls_chksum = 0; 1706 lsa->hdr.ls_chksum = 1707 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1708 1709 return (lsa); 1710 #endif 1711 return NULL; 1712 } 1713