1 /* $OpenBSD: rde_spf.c,v 1.76 2015/11/22 13:09:10 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2005 Esben Norby <norby@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/socket.h> 21 #include <netinet/in.h> 22 #include <arpa/inet.h> 23 #include <err.h> 24 #include <stdlib.h> 25 #include <string.h> 26 27 #include "ospfd.h" 28 #include "ospf.h" 29 #include "log.h" 30 #include "rde.h" 31 32 extern struct ospfd_conf *rdeconf; 33 TAILQ_HEAD(, vertex) cand_list; 34 RB_HEAD(rt_tree, rt_node) rt; 35 RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare) 36 RB_GENERATE(rt_tree, rt_node, entry, rt_compare) 37 struct vertex *spf_root = NULL; 38 39 void calc_nexthop(struct vertex *, struct vertex *, 40 struct area *, struct lsa_rtr_link *); 41 void rt_nexthop_clear(struct rt_node *); 42 void rt_nexthop_add(struct rt_node *, struct v_nexthead *, u_int8_t, 43 struct in_addr); 44 void rt_update(struct in_addr, u_int8_t, struct v_nexthead *, u_int8_t, 45 u_int32_t, u_int32_t, struct in_addr, struct in_addr, 46 enum path_type, enum dst_type, u_int8_t, u_int32_t); 47 void rt_invalidate(struct area *); 48 struct lsa_rtr_link *get_rtr_link(struct vertex *, int); 49 struct lsa_net_link *get_net_link(struct vertex *, int); 50 int linked(struct vertex *, struct vertex *); 51 52 void 53 spf_calc(struct area *area) 54 { 55 struct vertex *v, *w; 56 struct lsa_rtr_link *rtr_link = NULL; 57 struct lsa_net_link *net_link; 58 u_int32_t d; 59 int i; 60 struct in_addr addr; 61 62 /* clear SPF tree */ 63 spf_tree_clr(area); 64 cand_list_clr(); 65 66 /* initialize SPF tree */ 67 if ((v = spf_root = lsa_find_area(area, LSA_TYPE_ROUTER, 68 rde_router_id(), rde_router_id())) == NULL) { 69 /* empty area because no interface is active */ 70 return; 71 } 72 73 area->transit = 0; 74 spf_root->cost = 0; 75 w = NULL; 76 77 /* make sure the spf root has a nexthop */ 78 vertex_nexthop_clear(spf_root); 79 vertex_nexthop_add(spf_root, spf_root, 0); 80 81 /* calculate SPF tree */ 82 do { 83 /* loop links */ 84 for (i = 0; i < lsa_num_links(v); i++) { 85 switch (v->type) { 86 case LSA_TYPE_ROUTER: 87 rtr_link = get_rtr_link(v, i); 88 switch (rtr_link->type) { 89 case LINK_TYPE_STUB_NET: 90 /* skip */ 91 continue; 92 case LINK_TYPE_POINTTOPOINT: 93 case LINK_TYPE_VIRTUAL: 94 /* find router LSA */ 95 w = lsa_find_area(area, LSA_TYPE_ROUTER, 96 rtr_link->id, rtr_link->id); 97 break; 98 case LINK_TYPE_TRANSIT_NET: 99 /* find network LSA */ 100 w = lsa_find_net(area, rtr_link->id); 101 break; 102 default: 103 fatalx("spf_calc: invalid link type"); 104 } 105 break; 106 case LSA_TYPE_NETWORK: 107 net_link = get_net_link(v, i); 108 /* find router LSA */ 109 w = lsa_find_area(area, LSA_TYPE_ROUTER, 110 net_link->att_rtr, net_link->att_rtr); 111 break; 112 default: 113 fatalx("spf_calc: invalid LSA type"); 114 } 115 116 if (w == NULL) 117 continue; 118 119 if (w->lsa->hdr.age == MAX_AGE) 120 continue; 121 122 if (!linked(w, v)) { 123 addr.s_addr = htonl(w->ls_id); 124 log_debug("spf_calc: w id %s type %d has ", 125 inet_ntoa(addr), w->type); 126 addr.s_addr = htonl(v->ls_id); 127 log_debug(" no link to v id %s type %d", 128 inet_ntoa(addr), v->type); 129 continue; 130 } 131 132 if (v->type == LSA_TYPE_ROUTER) 133 d = v->cost + ntohs(rtr_link->metric); 134 else 135 d = v->cost; 136 137 if (cand_list_present(w)) { 138 if (d > w->cost) 139 continue; 140 if (d < w->cost) { 141 w->cost = d; 142 vertex_nexthop_clear(w); 143 calc_nexthop(w, v, area, rtr_link); 144 /* 145 * need to readd to candidate list 146 * because the list is sorted 147 */ 148 TAILQ_REMOVE(&cand_list, w, cand); 149 cand_list_add(w); 150 } else 151 /* equal cost path */ 152 calc_nexthop(w, v, area, rtr_link); 153 } else if (w->cost == LS_INFINITY && d < LS_INFINITY) { 154 w->cost = d; 155 156 vertex_nexthop_clear(w); 157 calc_nexthop(w, v, area, rtr_link); 158 cand_list_add(w); 159 } 160 } 161 162 /* get next vertex */ 163 v = cand_list_pop(); 164 w = NULL; 165 } while (v != NULL); 166 167 /* spf_dump(area); */ 168 log_debug("spf_calc: area %s calculated", inet_ntoa(area->id)); 169 170 area->num_spf_calc++; 171 start_spf_timer(); 172 } 173 174 void 175 rt_calc(struct vertex *v, struct area *area, struct ospfd_conf *conf) 176 { 177 struct vertex *w; 178 struct v_nexthop *vn; 179 struct lsa_rtr_link *rtr_link = NULL; 180 int i; 181 struct in_addr addr, adv_rtr; 182 183 lsa_age(v); 184 if (ntohs(v->lsa->hdr.age) == MAX_AGE) 185 return; 186 187 switch (v->type) { 188 case LSA_TYPE_ROUTER: 189 /* stub networks */ 190 if (v->cost >= LS_INFINITY) 191 return; 192 193 for (i = 0; i < lsa_num_links(v); i++) { 194 rtr_link = get_rtr_link(v, i); 195 if (rtr_link->type != LINK_TYPE_STUB_NET) 196 continue; 197 198 addr.s_addr = rtr_link->id; 199 adv_rtr.s_addr = htonl(v->adv_rtr); 200 201 rt_update(addr, mask2prefixlen(rtr_link->data), 202 &v->nexthop, v->type, 203 v->cost + ntohs(rtr_link->metric), 0, 204 area->id, adv_rtr, PT_INTRA_AREA, DT_NET, 205 v->lsa->data.rtr.flags, 0); 206 } 207 208 /* router, only add border and as-external routers */ 209 if ((v->lsa->data.rtr.flags & (OSPF_RTR_B | OSPF_RTR_E)) == 0) 210 return; 211 212 addr.s_addr = htonl(v->ls_id); 213 adv_rtr.s_addr = htonl(v->adv_rtr); 214 215 rt_update(addr, 32, &v->nexthop, v->type, v->cost, 0, area->id, 216 adv_rtr, PT_INTRA_AREA, DT_RTR, v->lsa->data.rtr.flags, 0); 217 break; 218 case LSA_TYPE_NETWORK: 219 if (v->cost >= LS_INFINITY) 220 return; 221 222 addr.s_addr = htonl(v->ls_id) & v->lsa->data.net.mask; 223 adv_rtr.s_addr = htonl(v->adv_rtr); 224 rt_update(addr, mask2prefixlen(v->lsa->data.net.mask), 225 &v->nexthop, v->type, v->cost, 0, area->id, adv_rtr, 226 PT_INTRA_AREA, DT_NET, 0, 0); 227 break; 228 case LSA_TYPE_SUM_NETWORK: 229 case LSA_TYPE_SUM_ROUTER: 230 /* if ABR only look at area 0.0.0.0 LSA */ 231 if (area_border_router(conf) && area->id.s_addr != INADDR_ANY) 232 return; 233 234 /* ignore self-originated stuff */ 235 if (v->self) 236 return; 237 238 /* TODO type 3 area address range check */ 239 240 if ((w = lsa_find_area(area, LSA_TYPE_ROUTER, 241 htonl(v->adv_rtr), 242 htonl(v->adv_rtr))) == NULL) 243 return; 244 245 /* copy nexthops */ 246 vertex_nexthop_clear(v); /* XXX needed ??? */ 247 TAILQ_FOREACH(vn, &w->nexthop, entry) 248 vertex_nexthop_add(v, w, vn->nexthop.s_addr); 249 250 v->cost = w->cost + 251 (ntohl(v->lsa->data.sum.metric) & LSA_METRIC_MASK); 252 253 if (v->cost >= LS_INFINITY) 254 return; 255 256 adv_rtr.s_addr = htonl(v->adv_rtr); 257 if (v->type == LSA_TYPE_SUM_NETWORK) { 258 addr.s_addr = htonl(v->ls_id) & v->lsa->data.sum.mask; 259 rt_update(addr, mask2prefixlen(v->lsa->data.sum.mask), 260 &v->nexthop, v->type, v->cost, 0, area->id, adv_rtr, 261 PT_INTER_AREA, DT_NET, 0, 0); 262 } else { 263 addr.s_addr = htonl(v->ls_id); 264 rt_update(addr, 32, &v->nexthop, v->type, v->cost, 0, 265 area->id, adv_rtr, PT_INTER_AREA, DT_RTR, 266 v->lsa->data.rtr.flags, 0); 267 } 268 269 break; 270 case LSA_TYPE_AREA_OPAQ: 271 /* nothing to calculate */ 272 break; 273 default: 274 /* as-external LSA are stored in a different tree */ 275 fatalx("rt_calc: invalid LSA type"); 276 } 277 } 278 279 void 280 asext_calc(struct vertex *v) 281 { 282 struct rt_node *r; 283 struct rt_nexthop *rn; 284 u_int32_t cost2; 285 struct in_addr addr, adv_rtr, a; 286 enum path_type type; 287 288 lsa_age(v); 289 if (ntohs(v->lsa->hdr.age) == MAX_AGE || 290 (ntohl(v->lsa->data.asext.metric) & LSA_METRIC_MASK) >= 291 LS_INFINITY) 292 return; 293 294 switch (v->type) { 295 case LSA_TYPE_EXTERNAL: 296 /* ignore self-originated stuff */ 297 if (v->self) 298 return; 299 300 if ((r = rt_lookup(DT_RTR, htonl(v->adv_rtr))) == NULL) 301 return; 302 303 /* XXX RFC1583Compatibility */ 304 if (v->lsa->data.asext.fw_addr != 0 && 305 (r = rt_lookup(DT_NET, v->lsa->data.asext.fw_addr)) == NULL) 306 return; 307 308 if (v->lsa->data.asext.fw_addr != 0 && 309 r->p_type != PT_INTRA_AREA && 310 r->p_type != PT_INTER_AREA) 311 return; 312 313 if (ntohl(v->lsa->data.asext.metric) & LSA_ASEXT_E_FLAG) { 314 v->cost = r->cost; 315 cost2 = ntohl(v->lsa->data.asext.metric) & 316 LSA_METRIC_MASK; 317 type = PT_TYPE2_EXT; 318 } else { 319 v->cost = r->cost + (ntohl(v->lsa->data.asext.metric) & 320 LSA_METRIC_MASK); 321 cost2 = 0; 322 type = PT_TYPE1_EXT; 323 } 324 325 a.s_addr = 0; 326 adv_rtr.s_addr = htonl(v->adv_rtr); 327 addr.s_addr = htonl(v->ls_id) & v->lsa->data.asext.mask; 328 329 vertex_nexthop_clear(v); 330 TAILQ_FOREACH(rn, &r->nexthop, entry) { 331 if (rn->invalid) 332 continue; 333 334 /* 335 * if a fw_addr is specified and the nexthop 336 * is directly connected then it is possible to 337 * send traffic directly to fw_addr. 338 */ 339 if (v->lsa->data.asext.fw_addr != 0 && rn->connected) 340 vertex_nexthop_add(v, NULL, 341 v->lsa->data.asext.fw_addr); 342 else 343 vertex_nexthop_add(v, NULL, rn->nexthop.s_addr); 344 } 345 346 rt_update(addr, mask2prefixlen(v->lsa->data.asext.mask), 347 &v->nexthop, v->type, v->cost, cost2, a, adv_rtr, type, 348 DT_NET, 0, ntohl(v->lsa->data.asext.ext_tag)); 349 break; 350 case LSA_TYPE_AS_OPAQ: 351 /* nothing to calculate */ 352 break; 353 default: 354 fatalx("asext_calc: invalid LSA type"); 355 } 356 } 357 358 void 359 spf_tree_clr(struct area *area) 360 { 361 struct lsa_tree *tree = &area->lsa_tree; 362 struct vertex *v; 363 364 RB_FOREACH(v, lsa_tree, tree) { 365 v->cost = LS_INFINITY; 366 vertex_nexthop_clear(v); 367 } 368 } 369 370 void 371 calc_nexthop(struct vertex *dst, struct vertex *parent, 372 struct area *area, struct lsa_rtr_link *rtr_link) 373 { 374 struct v_nexthop *vn; 375 struct iface *iface; 376 int i; 377 378 /* case 1 */ 379 if (parent == spf_root) { 380 switch (dst->type) { 381 case LSA_TYPE_ROUTER: 382 if (rtr_link->type != LINK_TYPE_POINTTOPOINT) 383 fatalx("inconsistent SPF tree"); 384 LIST_FOREACH(iface, &area->iface_list, entry) { 385 if (rtr_link->data == iface->addr.s_addr) { 386 vertex_nexthop_add(dst, parent, 387 iface->dst.s_addr); 388 return; 389 } 390 } 391 fatalx("no interface found for interface"); 392 case LSA_TYPE_NETWORK: 393 switch (rtr_link->type) { 394 case LINK_TYPE_POINTTOPOINT: 395 case LINK_TYPE_STUB_NET: 396 /* ignore */ 397 break; 398 case LINK_TYPE_TRANSIT_NET: 399 if ((htonl(dst->ls_id) & 400 dst->lsa->data.net.mask) == 401 (rtr_link->data & 402 dst->lsa->data.net.mask)) { 403 vertex_nexthop_add(dst, parent, 404 rtr_link->data); 405 } 406 break; 407 default: 408 fatalx("calc_nexthop: invalid link " 409 "type"); 410 } 411 return; 412 default: 413 fatalx("calc_nexthop: invalid dst type"); 414 } 415 return; 416 } 417 418 /* case 2 */ 419 if (parent->type == LSA_TYPE_NETWORK && dst->type == LSA_TYPE_ROUTER) { 420 TAILQ_FOREACH(vn, &parent->nexthop, entry) { 421 if (vn->prev == spf_root) { 422 for (i = 0; i < lsa_num_links(dst); i++) { 423 rtr_link = get_rtr_link(dst, i); 424 if ((rtr_link->type == 425 LINK_TYPE_TRANSIT_NET) && 426 (rtr_link->data & 427 parent->lsa->data.net.mask) == 428 (htonl(parent->ls_id) & 429 parent->lsa->data.net.mask)) 430 vertex_nexthop_add(dst, parent, 431 rtr_link->data); 432 } 433 } else { 434 vertex_nexthop_add(dst, parent, 435 vn->nexthop.s_addr); 436 } 437 } 438 return; 439 } 440 441 /* case 3 */ 442 TAILQ_FOREACH(vn, &parent->nexthop, entry) 443 vertex_nexthop_add(dst, parent, vn->nexthop.s_addr); 444 } 445 446 /* candidate list */ 447 void 448 cand_list_init(void) 449 { 450 TAILQ_INIT(&cand_list); 451 } 452 453 void 454 cand_list_add(struct vertex *v) 455 { 456 struct vertex *c = NULL; 457 458 TAILQ_FOREACH(c, &cand_list, cand) { 459 if (c->cost > v->cost) { 460 TAILQ_INSERT_BEFORE(c, v, cand); 461 return; 462 } else if (c->cost == v->cost && c->type == LSA_TYPE_ROUTER && 463 v->type == LSA_TYPE_NETWORK) { 464 TAILQ_INSERT_BEFORE(c, v, cand); 465 return; 466 } 467 } 468 TAILQ_INSERT_TAIL(&cand_list, v, cand); 469 } 470 471 struct vertex * 472 cand_list_pop(void) 473 { 474 struct vertex *c; 475 476 if ((c = TAILQ_FIRST(&cand_list)) != NULL) { 477 TAILQ_REMOVE(&cand_list, c, cand); 478 } 479 480 return (c); 481 } 482 483 int 484 cand_list_present(struct vertex *v) 485 { 486 struct vertex *c; 487 488 TAILQ_FOREACH(c, &cand_list, cand) { 489 if (c == v) 490 return (1); 491 } 492 493 return (0); 494 } 495 496 void 497 cand_list_clr(void) 498 { 499 struct vertex *c; 500 501 while ((c = TAILQ_FIRST(&cand_list)) != NULL) { 502 TAILQ_REMOVE(&cand_list, c, cand); 503 } 504 } 505 506 /* timers */ 507 /* ARGSUSED */ 508 void 509 spf_timer(int fd, short event, void *arg) 510 { 511 struct vertex *v; 512 struct ospfd_conf *conf = arg; 513 struct area *area; 514 struct rt_node *r; 515 516 switch (conf->spf_state) { 517 case SPF_IDLE: 518 fatalx("spf_timer: invalid state IDLE"); 519 case SPF_HOLDQUEUE: 520 conf->spf_state = SPF_DELAY; 521 /* FALLTHROUGH */ 522 case SPF_DELAY: 523 LIST_FOREACH(area, &conf->area_list, entry) { 524 if (area->dirty) { 525 /* invalidate RIB entries of this area */ 526 rt_invalidate(area); 527 528 /* calculate SPF tree */ 529 spf_calc(area); 530 531 /* calculate route table */ 532 RB_FOREACH(v, lsa_tree, &area->lsa_tree) { 533 rt_calc(v, area, conf); 534 } 535 536 area->dirty = 0; 537 } 538 } 539 540 /* calculate as-external routes, first invalidate them */ 541 rt_invalidate(NULL); 542 RB_FOREACH(v, lsa_tree, &asext_tree) { 543 asext_calc(v); 544 } 545 546 RB_FOREACH(r, rt_tree, &rt) { 547 LIST_FOREACH(area, &conf->area_list, entry) 548 rde_summary_update(r, area); 549 550 if (r->d_type != DT_NET) 551 continue; 552 553 if (r->invalid) 554 rde_send_delete_kroute(r); 555 else 556 rde_send_change_kroute(r); 557 } 558 559 LIST_FOREACH(area, &conf->area_list, entry) { 560 lsa_generate_stub_sums(area); 561 lsa_remove_invalid_sums(area); 562 } 563 564 start_spf_holdtimer(conf); 565 break; 566 case SPF_HOLD: 567 conf->spf_state = SPF_IDLE; 568 break; 569 default: 570 fatalx("spf_timer: unknown state"); 571 } 572 } 573 574 void 575 start_spf_timer(void) 576 { 577 struct timeval tv; 578 579 switch (rdeconf->spf_state) { 580 case SPF_IDLE: 581 timerclear(&tv); 582 tv.tv_sec = rdeconf->spf_delay / 1000; 583 tv.tv_usec = (rdeconf->spf_delay % 1000) * 1000; 584 rdeconf->spf_state = SPF_DELAY; 585 if (evtimer_add(&rdeconf->ev, &tv) == -1) 586 fatal("start_spf_timer"); 587 break; 588 case SPF_DELAY: 589 /* ignore */ 590 break; 591 case SPF_HOLD: 592 rdeconf->spf_state = SPF_HOLDQUEUE; 593 break; 594 case SPF_HOLDQUEUE: 595 /* ignore */ 596 break; 597 default: 598 fatalx("start_spf_timer: invalid spf_state"); 599 } 600 } 601 602 void 603 stop_spf_timer(struct ospfd_conf *conf) 604 { 605 if (evtimer_del(&conf->ev) == -1) 606 fatal("stop_spf_timer"); 607 } 608 609 void 610 start_spf_holdtimer(struct ospfd_conf *conf) 611 { 612 struct timeval tv; 613 614 switch (conf->spf_state) { 615 case SPF_DELAY: 616 timerclear(&tv); 617 tv.tv_sec = rdeconf->spf_hold_time / 1000; 618 tv.tv_usec = (rdeconf->spf_hold_time % 1000) * 1000; 619 conf->spf_state = SPF_HOLD; 620 if (evtimer_add(&conf->ev, &tv) == -1) 621 fatal("start_spf_holdtimer"); 622 break; 623 case SPF_IDLE: 624 case SPF_HOLD: 625 case SPF_HOLDQUEUE: 626 fatalx("start_spf_holdtimer: invalid state"); 627 default: 628 fatalx("start_spf_holdtimer: unknown state"); 629 } 630 } 631 632 /* route table */ 633 void 634 rt_init(void) 635 { 636 RB_INIT(&rt); 637 } 638 639 int 640 rt_compare(struct rt_node *a, struct rt_node *b) 641 { 642 if (ntohl(a->prefix.s_addr) < ntohl(b->prefix.s_addr)) 643 return (-1); 644 if (ntohl(a->prefix.s_addr) > ntohl(b->prefix.s_addr)) 645 return (1); 646 if (a->prefixlen < b->prefixlen) 647 return (-1); 648 if (a->prefixlen > b->prefixlen) 649 return (1); 650 if (a->d_type > b->d_type) 651 return (-1); 652 if (a->d_type < b->d_type) 653 return (1); 654 return (0); 655 } 656 657 struct rt_node * 658 rt_find(in_addr_t prefix, u_int8_t prefixlen, enum dst_type d_type) 659 { 660 struct rt_node s; 661 662 s.prefix.s_addr = prefix; 663 s.prefixlen = prefixlen; 664 s.d_type = d_type; 665 666 return (RB_FIND(rt_tree, &rt, &s)); 667 } 668 669 int 670 rt_insert(struct rt_node *r) 671 { 672 if (RB_INSERT(rt_tree, &rt, r) != NULL) { 673 log_warnx("rt_insert failed for %s/%u", 674 inet_ntoa(r->prefix), r->prefixlen); 675 free(r); 676 return (-1); 677 } 678 679 return (0); 680 } 681 682 int 683 rt_remove(struct rt_node *r) 684 { 685 if (RB_REMOVE(rt_tree, &rt, r) == NULL) { 686 log_warnx("rt_remove failed for %s/%u", 687 inet_ntoa(r->prefix), r->prefixlen); 688 return (-1); 689 } 690 691 rt_nexthop_clear(r); 692 free(r); 693 return (0); 694 } 695 696 void 697 rt_invalidate(struct area *area) 698 { 699 struct rt_node *r, *nr; 700 struct rt_nexthop *rn, *nrn; 701 702 for (r = RB_MIN(rt_tree, &rt); r != NULL; r = nr) { 703 nr = RB_NEXT(rt_tree, &rt, r); 704 if (area == NULL) { 705 /* look only at as_ext routes */ 706 if (r->p_type != PT_TYPE1_EXT && 707 r->p_type != PT_TYPE2_EXT) 708 continue; 709 } else { 710 /* ignore all as_ext routes */ 711 if (r->p_type == PT_TYPE1_EXT || 712 r->p_type == PT_TYPE2_EXT) 713 continue; 714 715 /* look only at routes matching the area */ 716 if (r->area.s_addr != area->id.s_addr) 717 continue; 718 } 719 r->invalid = 1; 720 for (rn = TAILQ_FIRST(&r->nexthop); rn != NULL; rn = nrn) { 721 nrn = TAILQ_NEXT(rn, entry); 722 if (rn->invalid) { 723 TAILQ_REMOVE(&r->nexthop, rn, entry); 724 free(rn); 725 } else 726 rn->invalid = 1; 727 } 728 if (TAILQ_EMPTY(&r->nexthop)) 729 rt_remove(r); 730 } 731 } 732 733 void 734 rt_nexthop_clear(struct rt_node *r) 735 { 736 struct rt_nexthop *rn; 737 738 while ((rn = TAILQ_FIRST(&r->nexthop)) != NULL) { 739 TAILQ_REMOVE(&r->nexthop, rn, entry); 740 free(rn); 741 } 742 } 743 744 void 745 rt_nexthop_add(struct rt_node *r, struct v_nexthead *vnh, u_int8_t type, 746 struct in_addr adv_rtr) 747 { 748 struct v_nexthop *vn; 749 struct rt_nexthop *rn; 750 struct timespec now; 751 752 TAILQ_FOREACH(vn, vnh, entry) { 753 TAILQ_FOREACH(rn, &r->nexthop, entry) { 754 if (rn->nexthop.s_addr != vn->nexthop.s_addr) 755 continue; 756 757 rn->adv_rtr.s_addr = adv_rtr.s_addr; 758 rn->connected = (type == LSA_TYPE_NETWORK && 759 vn->prev == spf_root) || (vn->nexthop.s_addr == 0); 760 rn->invalid = 0; 761 762 r->invalid = 0; 763 break; 764 } 765 if (rn) 766 continue; 767 768 if ((rn = calloc(1, sizeof(struct rt_nexthop))) == NULL) 769 fatal("rt_nexthop_add"); 770 771 clock_gettime(CLOCK_MONOTONIC, &now); 772 rn->nexthop.s_addr = vn->nexthop.s_addr; 773 rn->adv_rtr.s_addr = adv_rtr.s_addr; 774 rn->uptime = now.tv_sec; 775 rn->connected = (type == LSA_TYPE_NETWORK && 776 vn->prev == spf_root) || (vn->nexthop.s_addr == 0); 777 rn->invalid = 0; 778 779 r->invalid = 0; 780 TAILQ_INSERT_TAIL(&r->nexthop, rn, entry); 781 } 782 } 783 784 void 785 rt_clear(void) 786 { 787 struct rt_node *r; 788 789 while ((r = RB_MIN(rt_tree, &rt)) != NULL) 790 rt_remove(r); 791 } 792 793 void 794 rt_dump(struct in_addr area, pid_t pid, u_int8_t r_type) 795 { 796 static struct ctl_rt rtctl; 797 struct timespec now; 798 struct rt_node *r; 799 struct rt_nexthop *rn; 800 801 clock_gettime(CLOCK_MONOTONIC, &now); 802 803 RB_FOREACH(r, rt_tree, &rt) { 804 if (r->invalid) 805 continue; 806 807 if (r->area.s_addr != area.s_addr) 808 continue; 809 810 switch (r_type) { 811 case RIB_RTR: 812 if (r->d_type != DT_RTR) 813 continue; 814 break; 815 case RIB_NET: 816 if (r->d_type != DT_NET) 817 continue; 818 if (r->p_type == PT_TYPE1_EXT || 819 r->p_type == PT_TYPE2_EXT) 820 continue; 821 break; 822 case RIB_EXT: 823 if (r->p_type != PT_TYPE1_EXT && 824 r->p_type != PT_TYPE2_EXT) 825 continue; 826 break; 827 default: 828 fatalx("rt_dump: invalid RIB type"); 829 } 830 831 bzero(&rtctl, sizeof(rtctl)); 832 rtctl.prefix.s_addr = r->prefix.s_addr; 833 rtctl.area.s_addr = r->area.s_addr; 834 rtctl.cost = r->cost; 835 rtctl.cost2 = r->cost2; 836 rtctl.p_type = r->p_type; 837 rtctl.d_type = r->d_type; 838 rtctl.flags = r->flags; 839 rtctl.prefixlen = r->prefixlen; 840 841 TAILQ_FOREACH(rn, &r->nexthop, entry) { 842 if (rn->invalid) 843 continue; 844 845 rtctl.connected = rn->connected; 846 rtctl.nexthop.s_addr = rn->nexthop.s_addr; 847 rtctl.adv_rtr.s_addr = rn->adv_rtr.s_addr; 848 rtctl.uptime = now.tv_sec - rn->uptime; 849 850 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_RIB, 0, pid, 851 &rtctl, sizeof(rtctl)); 852 } 853 } 854 } 855 856 void 857 rt_update(struct in_addr prefix, u_int8_t prefixlen, struct v_nexthead *vnh, 858 u_int8_t v_type, u_int32_t cost, u_int32_t cost2, struct in_addr area, 859 struct in_addr adv_rtr, enum path_type p_type, enum dst_type d_type, 860 u_int8_t flags, u_int32_t tag) 861 { 862 struct rt_node *rte; 863 struct rt_nexthop *rn; 864 int better = 0, equal = 0; 865 866 if ((rte = rt_find(prefix.s_addr, prefixlen, d_type)) == NULL) { 867 if ((rte = calloc(1, sizeof(struct rt_node))) == NULL) 868 fatal("rt_update"); 869 870 TAILQ_INIT(&rte->nexthop); 871 rte->prefix.s_addr = prefix.s_addr; 872 rte->prefixlen = prefixlen; 873 rte->cost = cost; 874 rte->cost2 = cost2; 875 rte->area = area; 876 rte->p_type = p_type; 877 rte->d_type = d_type; 878 rte->flags = flags; 879 rte->ext_tag = tag; 880 881 rt_nexthop_add(rte, vnh, v_type, adv_rtr); 882 883 rt_insert(rte); 884 } else { 885 /* order: 886 * 1. intra-area 887 * 2. inter-area 888 * 3. type 1 as ext 889 * 4. type 2 as ext 890 */ 891 if (rte->invalid) /* everything is better than invalid */ 892 better = 1; 893 else if (p_type < rte->p_type) 894 better = 1; 895 else if (p_type == rte->p_type) 896 switch (p_type) { 897 case PT_INTRA_AREA: 898 case PT_INTER_AREA: 899 if (cost < rte->cost) 900 better = 1; 901 else if (cost == rte->cost && 902 rte->area.s_addr == area.s_addr) 903 equal = 1; 904 break; 905 case PT_TYPE1_EXT: 906 /* XXX rfc1583 compat */ 907 if (cost < rte->cost) 908 better = 1; 909 else if (cost == rte->cost) 910 equal = 1; 911 break; 912 case PT_TYPE2_EXT: 913 if (cost2 < rte->cost2) 914 better = 1; 915 /* XXX rfc1583 compat */ 916 else if (cost2 == rte->cost2 && 917 cost < rte->cost) 918 better = 1; 919 else if (cost2 == rte->cost2 && 920 cost == rte->cost) 921 equal = 1; 922 break; 923 } 924 925 if (better) { 926 TAILQ_FOREACH(rn, &rte->nexthop, entry) 927 rn->invalid = 1; 928 929 rte->area = area; 930 rte->cost = cost; 931 rte->cost2 = cost2; 932 rte->p_type = p_type; 933 rte->flags = flags; 934 rte->ext_tag = tag; 935 } 936 937 if (equal || better) 938 rt_nexthop_add(rte, vnh, v_type, adv_rtr); 939 } 940 } 941 942 struct rt_node * 943 rt_lookup(enum dst_type type, in_addr_t addr) 944 { 945 struct rt_node *rn; 946 u_int8_t i = 32; 947 948 if (type == DT_RTR) { 949 rn = rt_find(addr, 32, type); 950 if (rn && rn->invalid == 0) 951 return (rn); 952 return (NULL); 953 } 954 955 /* type == DT_NET */ 956 do { 957 if ((rn = rt_find(addr & prefixlen2mask(i), i, type)) && 958 rn->invalid == 0) 959 return (rn); 960 } while (i-- != 0); 961 962 return (NULL); 963 } 964 965 /* router LSA links */ 966 struct lsa_rtr_link * 967 get_rtr_link(struct vertex *v, int idx) 968 { 969 struct lsa_rtr_link *rtr_link = NULL; 970 char *buf = (char *)v->lsa; 971 u_int16_t i, off, nlinks; 972 973 if (v->type != LSA_TYPE_ROUTER) 974 fatalx("get_rtr_link: invalid LSA type"); 975 976 off = sizeof(v->lsa->hdr) + sizeof(struct lsa_rtr); 977 978 /* nlinks validated earlier by lsa_check() */ 979 nlinks = lsa_num_links(v); 980 for (i = 0; i < nlinks; i++) { 981 rtr_link = (struct lsa_rtr_link *)(buf + off); 982 if (i == idx) 983 return (rtr_link); 984 985 off += sizeof(struct lsa_rtr_link) + 986 rtr_link->num_tos * sizeof(u_int32_t); 987 } 988 989 fatalx("get_rtr_link: index not found"); 990 } 991 992 /* network LSA links */ 993 struct lsa_net_link * 994 get_net_link(struct vertex *v, int idx) 995 { 996 struct lsa_net_link *net_link = NULL; 997 char *buf = (char *)v->lsa; 998 u_int16_t i, off, nlinks; 999 1000 if (v->type != LSA_TYPE_NETWORK) 1001 fatalx("get_net_link: invalid LSA type"); 1002 1003 off = sizeof(v->lsa->hdr) + sizeof(u_int32_t); 1004 1005 /* nlinks validated earlier by lsa_check() */ 1006 nlinks = lsa_num_links(v); 1007 for (i = 0; i < nlinks; i++) { 1008 net_link = (struct lsa_net_link *)(buf + off); 1009 if (i == idx) 1010 return (net_link); 1011 1012 off += sizeof(struct lsa_net_link); 1013 } 1014 1015 fatalx("get_net_link: index not found"); 1016 } 1017 1018 /* misc */ 1019 int 1020 linked(struct vertex *w, struct vertex *v) 1021 { 1022 struct lsa_rtr_link *rtr_link = NULL; 1023 struct lsa_net_link *net_link = NULL; 1024 int i; 1025 1026 switch (w->type) { 1027 case LSA_TYPE_ROUTER: 1028 for (i = 0; i < lsa_num_links(w); i++) { 1029 rtr_link = get_rtr_link(w, i); 1030 switch (v->type) { 1031 case LSA_TYPE_ROUTER: 1032 if (rtr_link->type == LINK_TYPE_POINTTOPOINT && 1033 rtr_link->id == htonl(v->ls_id)) 1034 return (1); 1035 break; 1036 case LSA_TYPE_NETWORK: 1037 if (rtr_link->id == htonl(v->ls_id)) 1038 return (1); 1039 break; 1040 default: 1041 fatalx("linked: invalid type"); 1042 } 1043 } 1044 return (0); 1045 case LSA_TYPE_NETWORK: 1046 for (i = 0; i < lsa_num_links(w); i++) { 1047 net_link = get_net_link(w, i); 1048 switch (v->type) { 1049 case LSA_TYPE_ROUTER: 1050 if (net_link->att_rtr == htonl(v->ls_id)) 1051 return (1); 1052 break; 1053 default: 1054 fatalx("linked: invalid type"); 1055 } 1056 } 1057 return (0); 1058 default: 1059 fatalx("linked: invalid LSA type"); 1060 } 1061 1062 return (0); 1063 } 1064