1 /* $OpenBSD: rde_lsdb.c,v 1.26 2009/03/29 19:18:20 stsp Exp $ */ 2 3 /* 4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/tree.h> 21 #include <stdlib.h> 22 #include <string.h> 23 #include <unistd.h> 24 25 #include "ospf6.h" 26 #include "ospf6d.h" 27 #include "rde.h" 28 #include "log.h" 29 30 struct vertex *vertex_get(struct lsa *, struct rde_nbr *, struct lsa_tree *); 31 32 int lsa_link_check(struct lsa *, u_int16_t); 33 int lsa_intra_a_pref_check(struct lsa *, u_int16_t); 34 void lsa_timeout(int, short, void *); 35 void lsa_refresh(struct vertex *); 36 int lsa_equal(struct lsa *, struct lsa *); 37 int lsa_get_prefix(void *, u_int16_t, struct rt_prefix *); 38 39 RB_GENERATE(lsa_tree, vertex, entry, lsa_compare) 40 41 extern struct ospfd_conf *rdeconf; 42 43 void 44 lsa_init(struct lsa_tree *t) 45 { 46 RB_INIT(t); 47 } 48 49 int 50 lsa_compare(struct vertex *a, struct vertex *b) 51 { 52 if (a->type < b->type) 53 return (-1); 54 if (a->type > b->type) 55 return (1); 56 if (a->ls_id < b->ls_id) 57 return (-1); 58 if (a->ls_id > b->ls_id) 59 return (1); 60 if (a->adv_rtr < b->adv_rtr) 61 return (-1); 62 if (a->adv_rtr > b->adv_rtr) 63 return (1); 64 return (0); 65 } 66 67 68 struct vertex * 69 vertex_get(struct lsa *lsa, struct rde_nbr *nbr, struct lsa_tree *tree) 70 { 71 struct vertex *v; 72 struct timespec tp; 73 74 if ((v = calloc(1, sizeof(struct vertex))) == NULL) 75 fatal(NULL); 76 TAILQ_INIT(&v->nexthop); 77 v->area = nbr->area; 78 v->peerid = nbr->peerid; 79 v->lsa = lsa; 80 clock_gettime(CLOCK_MONOTONIC, &tp); 81 v->changed = v->stamp = tp.tv_sec; 82 v->cost = LS_INFINITY; 83 v->ls_id = ntohl(lsa->hdr.ls_id); 84 v->adv_rtr = ntohl(lsa->hdr.adv_rtr); 85 v->type = ntohs(lsa->hdr.type); 86 v->lsa_tree = tree; 87 88 if (!nbr->self) 89 v->flooded = 1; /* XXX fix me */ 90 v->self = nbr->self; 91 92 evtimer_set(&v->ev, lsa_timeout, v); 93 94 return (v); 95 } 96 97 void 98 vertex_free(struct vertex *v) 99 { 100 RB_REMOVE(lsa_tree, v->lsa_tree, v); 101 (void)evtimer_del(&v->ev); 102 free(v->lsa); 103 free(v); 104 } 105 106 /* returns -1 if a is older, 1 if newer and 0 if equal to b */ 107 int 108 lsa_newer(struct lsa_hdr *a, struct lsa_hdr *b) 109 { 110 int32_t a32, b32; 111 u_int16_t a16, b16; 112 int i; 113 114 if (a == NULL) 115 return (-1); 116 if (b == NULL) 117 return (1); 118 119 /* 120 * The sequence number is defined as signed 32-bit integer, 121 * no idea how IETF came up with such a stupid idea. 122 */ 123 a32 = (int32_t)ntohl(a->seq_num); 124 b32 = (int32_t)ntohl(b->seq_num); 125 126 if (a32 > b32) 127 return (1); 128 if (a32 < b32) 129 return (-1); 130 131 a16 = ntohs(a->ls_chksum); 132 b16 = ntohs(b->ls_chksum); 133 134 if (a16 > b16) 135 return (1); 136 if (a16 < b16) 137 return (-1); 138 139 a16 = ntohs(a->age); 140 b16 = ntohs(b->age); 141 142 if (a16 >= MAX_AGE && b16 >= MAX_AGE) 143 return (0); 144 if (b16 >= MAX_AGE) 145 return (-1); 146 if (a16 >= MAX_AGE) 147 return (1); 148 149 i = b16 - a16; 150 if (abs(i) > MAX_AGE_DIFF) 151 return (i > 0 ? 1 : -1); 152 153 return (0); 154 } 155 156 int 157 lsa_check(struct rde_nbr *nbr, struct lsa *lsa, u_int16_t len) 158 { 159 u_int32_t metric; 160 161 if (len < sizeof(lsa->hdr)) { 162 log_warnx("lsa_check: bad packet size"); 163 return (0); 164 } 165 if (ntohs(lsa->hdr.len) != len) { 166 log_warnx("lsa_check: bad packet size"); 167 return (0); 168 } 169 170 if (iso_cksum(lsa, len, 0)) { 171 log_warnx("lsa_check: bad packet checksum"); 172 return (0); 173 } 174 175 /* invalid ages */ 176 if ((ntohs(lsa->hdr.age) < 1 && !nbr->self) || 177 ntohs(lsa->hdr.age) > MAX_AGE) { 178 log_warnx("lsa_check: bad age"); 179 return (0); 180 } 181 182 /* invalid sequence number */ 183 if (ntohl(lsa->hdr.seq_num) == RESV_SEQ_NUM) { 184 log_warnx("ls_check: bad seq num"); 185 return (0); 186 } 187 188 switch (ntohs(lsa->hdr.type)) { 189 case LSA_TYPE_LINK: 190 if (!lsa_link_check(lsa, len)) 191 return (0); 192 break; 193 case LSA_TYPE_ROUTER: 194 if (len < sizeof(lsa->hdr) + sizeof(struct lsa_rtr)) { 195 log_warnx("lsa_check: bad LSA rtr packet"); 196 return (0); 197 } 198 len -= sizeof(lsa->hdr) + sizeof(struct lsa_rtr); 199 if (len % sizeof(struct lsa_rtr_link)) { 200 log_warnx("lsa_check: bad LSA rtr packet"); 201 return (0); 202 } 203 break; 204 case LSA_TYPE_NETWORK: 205 if ((len % sizeof(u_int32_t)) || 206 len < sizeof(lsa->hdr) + sizeof(u_int32_t)) { 207 return (0); 208 } 209 break; 210 case LSA_TYPE_INTER_A_PREFIX: 211 if (len < sizeof(lsa->hdr) + sizeof(lsa->data.pref_sum)) { 212 log_warnx("lsa_check: bad LSA prefix summary packet"); 213 return (0); 214 } 215 metric = ntohl(lsa->data.pref_sum.metric); 216 if (metric & ~LSA_METRIC_MASK) { 217 log_warnx("lsa_check: bad LSA summary metric"); 218 return (0); 219 } 220 if (lsa_get_prefix(((char *)lsa) + sizeof(lsa->hdr) + 221 sizeof(lsa->data.pref_sum), 222 len - sizeof(lsa->hdr) + sizeof(lsa->data.pref_sum), 223 NULL) == -1) { 224 log_warnx("lsa_check: " 225 "invalid LSA prefix summary packet"); 226 return (0); 227 } 228 break; 229 case LSA_TYPE_INTER_A_ROUTER: 230 if (len < sizeof(lsa->hdr) + sizeof(lsa->data.rtr_sum)) { 231 log_warnx("lsa_check: bad LSA router summary packet"); 232 return (0); 233 } 234 metric = ntohl(lsa->data.rtr_sum.metric); 235 if (metric & ~LSA_METRIC_MASK) { 236 log_warnx("lsa_check: bad LSA summary metric"); 237 return (0); 238 } 239 break; 240 case LSA_TYPE_INTRA_A_PREFIX: 241 if (!lsa_intra_a_pref_check(lsa, len)) 242 return (0); 243 break; 244 case LSA_TYPE_EXTERNAL: 245 if ((len % (3 * sizeof(u_int32_t))) || 246 len < sizeof(lsa->hdr) + sizeof(lsa->data.asext)) { 247 log_warnx("lsa_check: bad LSA as-external packet"); 248 return (0); 249 } 250 metric = ntohl(lsa->data.asext.metric); 251 if (metric & ~(LSA_METRIC_MASK | LSA_ASEXT_E_FLAG)) { 252 log_warnx("lsa_check: bad LSA as-external metric"); 253 return (0); 254 } 255 /* AS-external-LSA are silently discarded in stub areas */ 256 if (nbr->area->stub) 257 return (0); 258 break; 259 default: 260 log_warnx("lsa_check: unknown type %x", ntohs(lsa->hdr.type)); 261 return (0); 262 } 263 264 /* MaxAge handling */ 265 if (lsa->hdr.age == htons(MAX_AGE) && !nbr->self && lsa_find(nbr->iface, 266 lsa->hdr.type, lsa->hdr.ls_id, lsa->hdr.adv_rtr) == NULL && 267 !rde_nbr_loading(nbr->area)) { 268 /* 269 * if no neighbor in state Exchange or Loading 270 * ack LSA but don't add it. Needs to be a direct ack. 271 */ 272 rde_imsg_compose_ospfe(IMSG_LS_ACK, nbr->peerid, 0, &lsa->hdr, 273 sizeof(struct lsa_hdr)); 274 return (0); 275 } 276 277 return (1); 278 } 279 280 int 281 lsa_link_check(struct lsa *lsa, u_int16_t len) 282 { 283 char *buf = (char *)lsa; 284 struct lsa_link *llink; 285 u_int32_t i, off, npref; 286 int rv; 287 288 llink = (struct lsa_link *)(buf + sizeof(lsa->hdr)); 289 off = sizeof(lsa->hdr) + sizeof(struct lsa_link); 290 if (off > len) { 291 log_warnx("lsa_link_check: invalid LSA link packet, " 292 "short header"); 293 return (0); 294 } 295 296 len -= off; 297 npref = ntohl(llink->numprefix); 298 299 for (i = 0; i < npref; i++) { 300 rv = lsa_get_prefix(buf + off, len, NULL); 301 if (rv == -1) { 302 log_warnx("lsa_link_check: invalid LSA link packet"); 303 return (0); 304 } 305 off += rv; 306 len -= rv; 307 } 308 309 return (1); 310 } 311 312 int 313 lsa_intra_a_pref_check(struct lsa *lsa, u_int16_t len) 314 { 315 char *buf = (char *)lsa; 316 struct lsa_intra_prefix *iap; 317 u_int32_t i, off, npref; 318 int rv; 319 320 iap = (struct lsa_intra_prefix *)(buf + sizeof(lsa->hdr)); 321 off = sizeof(lsa->hdr) + sizeof(struct lsa_intra_prefix); 322 if (off > len) { 323 log_warnx("lsa_intra_a_pref_check: " 324 "invalid LSA intra area prefix packet, short header"); 325 return (0); 326 } 327 328 len -= off; 329 npref = ntohs(iap->numprefix); 330 331 for (i = 0; i < npref; i++) { 332 rv = lsa_get_prefix(buf + off, len, NULL); 333 if (rv == -1) { 334 log_warnx("lsa_intra_a_pref_check: " 335 "invalid LSA intra area prefix packet"); 336 return (0); 337 } 338 off += rv; 339 len -= rv; 340 } 341 342 return (1); 343 } 344 345 int 346 lsa_self(struct lsa *lsa) 347 { 348 return rde_router_id() == lsa->hdr.adv_rtr; 349 } 350 351 void 352 lsa_flush(struct rde_nbr *nbr, struct lsa *lsa) 353 { 354 struct lsa *copy; 355 356 /* 357 * The LSA may not be altered because the caller may still 358 * use it, so a copy needs to be added to the LSDB. 359 * The copy will be reflooded via the default timeout handler. 360 */ 361 if ((copy = malloc(ntohs(lsa->hdr.len))) == NULL) 362 fatal("lsa_flush"); 363 memcpy(copy, lsa, ntohs(lsa->hdr.len)); 364 copy->hdr.age = htons(MAX_AGE); 365 (void)lsa_add(rde_nbr_self(nbr->area), copy); 366 } 367 368 void 369 lsa_reflood(struct vertex *v, struct lsa *new) 370 { 371 /* 372 * We only need to create a new instance by setting the LSA 373 * sequence number equal to the one of 'new' and calling 374 * lsa_refresh(). Actual flooding will be done by the caller. 375 */ 376 v->lsa->hdr.seq_num = new->hdr.seq_num; 377 lsa_refresh(v); 378 } 379 380 int 381 lsa_add(struct rde_nbr *nbr, struct lsa *lsa) 382 { 383 struct lsa_tree *tree; 384 struct vertex *new, *old; 385 struct timeval tv, now, res; 386 387 if (LSA_IS_SCOPE_AS(ntohs(lsa->hdr.type))) 388 tree = &asext_tree; 389 else if (LSA_IS_SCOPE_AREA(ntohs(lsa->hdr.type))) 390 tree = &nbr->area->lsa_tree; 391 else if (LSA_IS_SCOPE_LLOCAL(ntohs(lsa->hdr.type))) 392 tree = &nbr->iface->lsa_tree; 393 else 394 fatalx("unknown scope type"); 395 396 new = vertex_get(lsa, nbr, tree); 397 old = RB_INSERT(lsa_tree, tree, new); 398 399 if (old != NULL) { 400 if (old->deleted && evtimer_pending(&old->ev, &tv)) { 401 /* new update added before hold time expired */ 402 gettimeofday(&now, NULL); 403 timersub(&tv, &now, &res); 404 405 /* remove old LSA and insert new LSA with delay */ 406 vertex_free(old); 407 RB_INSERT(lsa_tree, tree, new); 408 new->deleted = 1; 409 410 if (evtimer_add(&new->ev, &res) != 0) 411 fatal("lsa_add"); 412 return (1); 413 } 414 if (!lsa_equal(new->lsa, old->lsa)) { 415 if (ntohs(lsa->hdr.type) != LSA_TYPE_EXTERNAL) 416 nbr->area->dirty = 1; 417 start_spf_timer(); 418 } 419 vertex_free(old); 420 RB_INSERT(lsa_tree, tree, new); 421 } else { 422 if (ntohs(lsa->hdr.type) != LSA_TYPE_EXTERNAL) 423 nbr->area->dirty = 1; 424 start_spf_timer(); 425 } 426 427 /* timeout handling either MAX_AGE or LS_REFRESH_TIME */ 428 timerclear(&tv); 429 430 if (nbr->self && ntohs(new->lsa->hdr.age) == DEFAULT_AGE) 431 tv.tv_sec = LS_REFRESH_TIME; 432 else 433 tv.tv_sec = MAX_AGE - ntohs(new->lsa->hdr.age); 434 435 if (evtimer_add(&new->ev, &tv) != 0) 436 fatal("lsa_add"); 437 return (0); 438 } 439 440 void 441 lsa_del(struct rde_nbr *nbr, struct lsa_hdr *lsa) 442 { 443 struct vertex *v; 444 struct timeval tv; 445 446 v = lsa_find(nbr->iface, lsa->type, lsa->ls_id, lsa->adv_rtr); 447 if (v == NULL) 448 return; 449 450 v->deleted = 1; 451 /* hold time to make sure that a new lsa is not added premature */ 452 timerclear(&tv); 453 tv.tv_sec = MIN_LS_INTERVAL; 454 if (evtimer_add(&v->ev, &tv) == -1) 455 fatal("lsa_del"); 456 } 457 458 void 459 lsa_age(struct vertex *v) 460 { 461 struct timespec tp; 462 time_t now; 463 int d; 464 u_int16_t age; 465 466 clock_gettime(CLOCK_MONOTONIC, &tp); 467 now = tp.tv_sec; 468 469 d = now - v->stamp; 470 /* set stamp so that at least new calls work */ 471 v->stamp = now; 472 473 if (d < 0) { 474 log_warnx("lsa_age: time went backwards"); 475 return; 476 } 477 478 age = ntohs(v->lsa->hdr.age); 479 if (age + d > MAX_AGE) 480 age = MAX_AGE; 481 else 482 age += d; 483 484 v->lsa->hdr.age = htons(age); 485 } 486 487 struct vertex * 488 lsa_find(struct iface *iface, u_int16_t type, u_int32_t ls_id, 489 u_int32_t adv_rtr) 490 { 491 struct lsa_tree *tree; 492 493 if (LSA_IS_SCOPE_AS(ntohs(type))) 494 tree = &asext_tree; 495 else if (LSA_IS_SCOPE_AREA(ntohs(type))) { 496 struct area *area; 497 498 if ((area = area_find(rdeconf, iface->area_id)) == NULL) 499 fatalx("interface lost area"); 500 tree = &area->lsa_tree; 501 } else if (LSA_IS_SCOPE_LLOCAL(ntohs(type))) 502 tree = &iface->lsa_tree; 503 else 504 fatalx("unknown scope type"); 505 506 return lsa_find_tree(tree, type, ls_id, adv_rtr); 507 508 } 509 510 struct vertex * 511 lsa_find_tree(struct lsa_tree *tree, u_int16_t type, u_int32_t ls_id, 512 u_int32_t adv_rtr) 513 { 514 struct vertex key; 515 struct vertex *v; 516 517 key.ls_id = ntohl(ls_id); 518 key.adv_rtr = ntohl(adv_rtr); 519 key.type = ntohs(type); 520 521 v = RB_FIND(lsa_tree, tree, &key); 522 523 /* LSA that are deleted are not findable */ 524 if (v && v->deleted) 525 return (NULL); 526 527 if (v) 528 lsa_age(v); 529 530 return (v); 531 } 532 533 struct vertex * 534 lsa_find_rtr(struct area *area, u_int32_t rtr_id) 535 { 536 struct vertex *v; 537 struct vertex *r; 538 539 /* A router can originate multiple router LSAs, 540 * differentiated by link state ID. Our job is 541 * to find among those the LSA with the lowest 542 * link state ID, because this is where the options 543 * field and router-type bits come from. */ 544 545 r = NULL; 546 /* XXX speed me up */ 547 RB_FOREACH(v, lsa_tree, &area->lsa_tree) { 548 if (v->deleted) 549 continue; 550 551 if (v->type == LSA_TYPE_ROUTER && 552 v->adv_rtr == ntohl(rtr_id)) { 553 if (r == NULL) 554 r = v; 555 else if (v->ls_id < r->ls_id) 556 r = v; 557 } 558 } 559 560 if (r) 561 lsa_age(r); 562 563 return (r); 564 } 565 566 u_int16_t 567 lsa_num_links(struct vertex *v) 568 { 569 switch (v->type) { 570 case LSA_TYPE_ROUTER: 571 return ((ntohs(v->lsa->hdr.len) - sizeof(struct lsa_hdr) - 572 sizeof(struct lsa_rtr)) / sizeof(struct lsa_rtr_link)); 573 case LSA_TYPE_NETWORK: 574 return ((ntohs(v->lsa->hdr.len) - sizeof(struct lsa_hdr) - 575 sizeof(struct lsa_net)) / sizeof(struct lsa_net_link)); 576 default: 577 fatalx("lsa_num_links: invalid LSA type"); 578 } 579 580 return (0); 581 } 582 583 void 584 lsa_snap(struct rde_nbr *nbr, u_int32_t peerid) 585 { 586 struct lsa_tree *tree = &nbr->area->lsa_tree; 587 struct vertex *v; 588 589 do { 590 RB_FOREACH(v, lsa_tree, tree) { 591 if (v->deleted) 592 continue; 593 lsa_age(v); 594 if (ntohs(v->lsa->hdr.age) >= MAX_AGE) { 595 rde_imsg_compose_ospfe(IMSG_LS_UPD, peerid, 596 0, &v->lsa->hdr, ntohs(v->lsa->hdr.len)); 597 } else { 598 rde_imsg_compose_ospfe(IMSG_DB_SNAPSHOT, peerid, 599 0, &v->lsa->hdr, sizeof(struct lsa_hdr)); 600 } 601 } 602 if (tree == &asext_tree) 603 break; 604 if (tree == &nbr->area->lsa_tree) { 605 tree = &nbr->iface->lsa_tree; 606 continue; 607 } else 608 tree = &asext_tree; 609 610 } while (1); 611 } 612 613 void 614 lsa_dump(struct lsa_tree *tree, int imsg_type, pid_t pid) 615 { 616 struct vertex *v; 617 618 RB_FOREACH(v, lsa_tree, tree) { 619 if (v->deleted) 620 continue; 621 lsa_age(v); 622 switch (imsg_type) { 623 case IMSG_CTL_SHOW_DATABASE: 624 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_DATABASE, 0, pid, 625 &v->lsa->hdr, ntohs(v->lsa->hdr.len)); 626 continue; 627 case IMSG_CTL_SHOW_DB_SELF: 628 if (v->lsa->hdr.adv_rtr == rde_router_id()) 629 break; 630 continue; 631 case IMSG_CTL_SHOW_DB_EXT: 632 if (v->type == LSA_TYPE_EXTERNAL) 633 break; 634 continue; 635 case IMSG_CTL_SHOW_DB_LINK: 636 if (v->type == LSA_TYPE_LINK) 637 break; 638 continue; 639 case IMSG_CTL_SHOW_DB_NET: 640 if (v->type == LSA_TYPE_NETWORK) 641 break; 642 continue; 643 case IMSG_CTL_SHOW_DB_RTR: 644 if (v->type == LSA_TYPE_ROUTER) 645 break; 646 continue; 647 case IMSG_CTL_SHOW_DB_INTRA: 648 if (v->type == LSA_TYPE_INTRA_A_PREFIX) 649 break; 650 case IMSG_CTL_SHOW_DB_SUM: 651 if (v->type == LSA_TYPE_INTER_A_PREFIX) 652 break; 653 continue; 654 case IMSG_CTL_SHOW_DB_ASBR: 655 if (v->type == LSA_TYPE_INTER_A_ROUTER) 656 break; 657 continue; 658 default: 659 log_warnx("lsa_dump: unknown imsg type"); 660 return; 661 } 662 rde_imsg_compose_ospfe(imsg_type, 0, pid, &v->lsa->hdr, 663 ntohs(v->lsa->hdr.len)); 664 } 665 } 666 667 /* ARGSUSED */ 668 void 669 lsa_timeout(int fd, short event, void *bula) 670 { 671 struct vertex *v = bula; 672 struct timeval tv; 673 674 lsa_age(v); 675 676 if (v->deleted) { 677 if (ntohs(v->lsa->hdr.age) >= MAX_AGE) { 678 vertex_free(v); 679 } else { 680 v->deleted = 0; 681 682 /* schedule recalculation of the RIB */ 683 if (v->lsa->hdr.type != LSA_TYPE_EXTERNAL) 684 v->area->dirty = 1; 685 start_spf_timer(); 686 687 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0, 688 v->lsa, ntohs(v->lsa->hdr.len)); 689 690 /* timeout handling either MAX_AGE or LS_REFRESH_TIME */ 691 timerclear(&tv); 692 if (v->self) 693 tv.tv_sec = LS_REFRESH_TIME; 694 else 695 tv.tv_sec = MAX_AGE - ntohs(v->lsa->hdr.age); 696 697 if (evtimer_add(&v->ev, &tv) != 0) 698 fatal("lsa_timeout"); 699 } 700 return; 701 } 702 703 if (v->self && ntohs(v->lsa->hdr.age) < MAX_AGE) 704 lsa_refresh(v); 705 706 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0, 707 v->lsa, ntohs(v->lsa->hdr.len)); 708 } 709 710 void 711 lsa_refresh(struct vertex *v) 712 { 713 struct timeval tv; 714 struct timespec tp; 715 u_int32_t seqnum; 716 u_int16_t len; 717 718 /* refresh LSA by increasing sequence number by one */ 719 v->lsa->hdr.age = htons(DEFAULT_AGE); 720 seqnum = ntohl(v->lsa->hdr.seq_num); 721 if (seqnum++ == MAX_SEQ_NUM) 722 /* XXX fix me */ 723 fatalx("sequence number wrapping"); 724 v->lsa->hdr.seq_num = htonl(seqnum); 725 726 /* recalculate checksum */ 727 len = ntohs(v->lsa->hdr.len); 728 v->lsa->hdr.ls_chksum = 0; 729 v->lsa->hdr.ls_chksum = htons(iso_cksum(v->lsa, len, LS_CKSUM_OFFSET)); 730 731 clock_gettime(CLOCK_MONOTONIC, &tp); 732 v->changed = v->stamp = tp.tv_sec; 733 734 timerclear(&tv); 735 tv.tv_sec = LS_REFRESH_TIME; 736 if (evtimer_add(&v->ev, &tv) == -1) 737 fatal("lsa_refresh"); 738 } 739 740 void 741 lsa_merge(struct rde_nbr *nbr, struct lsa *lsa, struct vertex *v) 742 { 743 struct timeval tv; 744 struct timespec tp; 745 time_t now; 746 u_int16_t len; 747 748 if (v == NULL) { 749 if (lsa_add(nbr, lsa)) 750 /* delayed update */ 751 return; 752 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, nbr->peerid, 0, 753 lsa, ntohs(lsa->hdr.len)); 754 return; 755 } 756 757 /* set the seq_num to the current one. lsa_refresh() will do the ++ */ 758 lsa->hdr.seq_num = v->lsa->hdr.seq_num; 759 /* recalculate checksum */ 760 len = ntohs(lsa->hdr.len); 761 lsa->hdr.ls_chksum = 0; 762 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 763 764 /* compare LSA most header fields are equal so don't check them */ 765 if (lsa_equal(lsa, v->lsa)) { 766 free(lsa); 767 return; 768 } 769 770 /* overwrite the lsa all other fields are unaffected */ 771 free(v->lsa); 772 v->lsa = lsa; 773 start_spf_timer(); 774 if (v->type != LSA_TYPE_EXTERNAL) 775 nbr->area->dirty = 1; 776 777 /* set correct timeout for reflooding the LSA */ 778 clock_gettime(CLOCK_MONOTONIC, &tp); 779 now = tp.tv_sec; 780 timerclear(&tv); 781 if (v->changed + MIN_LS_INTERVAL >= now) 782 tv.tv_sec = MIN_LS_INTERVAL; 783 if (evtimer_add(&v->ev, &tv) == -1) 784 fatal("lsa_merge"); 785 } 786 787 void 788 lsa_remove_invalid_sums(struct area *area) 789 { 790 struct lsa_tree *tree = &area->lsa_tree; 791 struct vertex *v, *nv; 792 793 /* XXX speed me up */ 794 for (v = RB_MIN(lsa_tree, tree); v != NULL; v = nv) { 795 nv = RB_NEXT(lsa_tree, tree, v); 796 if ((v->type == LSA_TYPE_INTER_A_PREFIX || 797 v->type == LSA_TYPE_INTER_A_ROUTER) && 798 v->self && v->cost == LS_INFINITY && 799 v->deleted == 0) { 800 /* 801 * age the lsa and call lsa_timeout() which will 802 * actually remove it from the database. 803 */ 804 v->lsa->hdr.age = htons(MAX_AGE); 805 lsa_timeout(0, 0, v); 806 } 807 } 808 } 809 810 int 811 lsa_equal(struct lsa *a, struct lsa *b) 812 { 813 /* 814 * compare LSA that already have same type, adv_rtr and ls_id 815 * so not all header need to be compared 816 */ 817 if (a == NULL || b == NULL) 818 return (0); 819 if (a->hdr.len != b->hdr.len) 820 return (0); 821 /* LSA with age MAX_AGE are never equal */ 822 if (a->hdr.age == htons(MAX_AGE) || b->hdr.age == htons(MAX_AGE)) 823 return (0); 824 if (memcmp(&a->data, &b->data, ntohs(a->hdr.len) - 825 sizeof(struct lsa_hdr))) 826 return (0); 827 828 return (1); 829 } 830 831 int 832 lsa_get_prefix(void *buf, u_int16_t len, struct rt_prefix *p) 833 { 834 struct lsa_prefix *lp = buf; 835 u_int32_t *buf32, *addr = NULL; 836 u_int8_t prefixlen; 837 u_int16_t consumed = 0; 838 839 if (len < sizeof(*lp)) 840 return (-1); 841 842 prefixlen = lp->prefixlen; 843 844 if (p) { 845 bzero(p, sizeof(*p)); 846 p->prefixlen = lp->prefixlen; 847 p->options = lp->options; 848 p->metric = ntohs(lp->metric); 849 addr = (u_int32_t *)&p->prefix; 850 } 851 852 buf32 = (u_int32_t *)(lp + 1); 853 consumed += sizeof(*lp); 854 855 for (; ((prefixlen + 31) / 32) > 0; prefixlen -= 32) { 856 if (len < consumed + sizeof(u_int32_t)) 857 return (-1); 858 if (addr) 859 *addr++ = *buf32++; 860 consumed += sizeof(u_int32_t); 861 } 862 863 return (consumed); 864 } 865 866