1 /* $OpenBSD: lsupdate.c,v 1.36 2009/03/13 03:58:26 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2005 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2004, 2005 Esben Norby <norby@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/types.h> 21 #include <sys/hash.h> 22 #include <sys/socket.h> 23 #include <netinet/in.h> 24 #include <arpa/inet.h> 25 26 #include <stdlib.h> 27 #include <string.h> 28 29 #include "ospf.h" 30 #include "ospfd.h" 31 #include "log.h" 32 #include "ospfe.h" 33 #include "rde.h" 34 35 extern struct ospfd_conf *oeconf; 36 extern struct imsgbuf *ibuf_rde; 37 38 struct buf *prepare_ls_update(struct iface *); 39 int add_ls_update(struct buf *, struct iface *, void *, u_int16_t, 40 u_int16_t); 41 int send_ls_update(struct buf *, struct iface *, struct in_addr, u_int32_t); 42 43 void ls_retrans_list_insert(struct nbr *, struct lsa_entry *); 44 void ls_retrans_list_remove(struct nbr *, struct lsa_entry *); 45 46 /* link state update packet handling */ 47 int 48 lsa_flood(struct iface *iface, struct nbr *originator, struct lsa_hdr *lsa_hdr, 49 void *data) 50 { 51 struct nbr *nbr; 52 struct lsa_entry *le = NULL; 53 int queued = 0, dont_ack = 0; 54 int r; 55 56 LIST_FOREACH(nbr, &iface->nbr_list, entry) { 57 if (nbr == iface->self) 58 continue; 59 if (!(nbr->state & NBR_STA_FLOOD)) 60 continue; 61 62 if (iface->state & IF_STA_DROTHER && !queued) 63 while ((le = ls_retrans_list_get(iface->self, lsa_hdr))) 64 ls_retrans_list_free(iface->self, le); 65 66 while ((le = ls_retrans_list_get(nbr, lsa_hdr))) 67 ls_retrans_list_free(nbr, le); 68 69 if (!(nbr->state & NBR_STA_FULL) && 70 (le = ls_req_list_get(nbr, lsa_hdr)) != NULL) { 71 r = lsa_newer(lsa_hdr, le->le_lsa); 72 if (r > 0) { 73 /* to flood LSA is newer than requested */ 74 ls_req_list_free(nbr, le); 75 /* new needs to be flooded */ 76 } else if (r < 0) { 77 /* to flood LSA is older than requested */ 78 continue; 79 } else { 80 /* LSA are equal */ 81 ls_req_list_free(nbr, le); 82 continue; 83 } 84 } 85 86 if (nbr == originator) { 87 dont_ack++; 88 continue; 89 } 90 91 /* non DR or BDR router keep all lsa in one retrans list */ 92 if (iface->state & IF_STA_DROTHER) { 93 if (!queued) 94 ls_retrans_list_add(iface->self, data, 95 iface->rxmt_interval, 0); 96 queued = 1; 97 } else { 98 ls_retrans_list_add(nbr, data, iface->rxmt_interval, 0); 99 queued = 1; 100 } 101 } 102 103 if (!queued) 104 return (0); 105 106 if (iface == originator->iface && iface->self != originator) { 107 if (iface->dr == originator || iface->bdr == originator) 108 return (0); 109 if (iface->state & IF_STA_BACKUP) 110 return (0); 111 dont_ack++; 112 } 113 114 /* 115 * initial flood needs to be queued separately, timeout is zero 116 * and oneshot has to be set because the retransimssion queues 117 * are already loaded. 118 */ 119 switch (iface->type) { 120 case IF_TYPE_POINTOPOINT: 121 case IF_TYPE_BROADCAST: 122 ls_retrans_list_add(iface->self, data, 0, 1); 123 break; 124 case IF_TYPE_NBMA: 125 case IF_TYPE_POINTOMULTIPOINT: 126 case IF_TYPE_VIRTUALLINK: 127 LIST_FOREACH(nbr, &iface->nbr_list, entry) { 128 if (nbr == iface->self) 129 continue; 130 if (!(nbr->state & NBR_STA_FLOOD)) 131 continue; 132 if (!TAILQ_EMPTY(&nbr->ls_retrans_list)) { 133 le = TAILQ_LAST(&nbr->ls_retrans_list, 134 lsa_head); 135 if (lsa_hdr->type != le->le_lsa->type || 136 lsa_hdr->ls_id != le->le_lsa->ls_id || 137 lsa_hdr->adv_rtr != le->le_lsa->adv_rtr) 138 continue; 139 } 140 ls_retrans_list_add(nbr, data, 0, 1); 141 } 142 break; 143 default: 144 fatalx("lsa_flood: unknown interface type"); 145 } 146 147 return (dont_ack == 2); 148 } 149 150 struct buf * 151 prepare_ls_update(struct iface *iface) 152 { 153 struct buf *buf; 154 155 if ((buf = buf_open(iface->mtu - sizeof(struct ip))) == NULL) 156 fatal("prepare_ls_update"); 157 158 /* OSPF header */ 159 if (gen_ospf_hdr(buf, iface, PACKET_TYPE_LS_UPDATE)) 160 goto fail; 161 162 /* reserve space for number of lsa field */ 163 if (buf_reserve(buf, sizeof(u_int32_t)) == NULL) 164 goto fail; 165 166 return (buf); 167 fail: 168 log_warn("prepare_ls_update"); 169 buf_free(buf); 170 return (NULL); 171 } 172 173 int 174 add_ls_update(struct buf *buf, struct iface *iface, void *data, u_int16_t len, 175 u_int16_t older) 176 { 177 void *lsage; 178 u_int16_t age; 179 180 if (buf_left(buf) < (size_t)len + MD5_DIGEST_LENGTH) 181 return (0); 182 183 lsage = buf_reserve(buf, 0); 184 if (buf_add(buf, data, len)) { 185 log_warn("add_ls_update"); 186 return (0); 187 } 188 189 /* age LSA before sending it out */ 190 memcpy(&age, data, sizeof(age)); 191 age = ntohs(age); 192 if ((age += older + iface->transmit_delay) >= MAX_AGE) 193 age = MAX_AGE; 194 age = htons(age); 195 memcpy(lsage, &age, sizeof(age)); 196 197 return (1); 198 } 199 200 201 202 int 203 send_ls_update(struct buf *buf, struct iface *iface, struct in_addr addr, 204 u_int32_t nlsa) 205 { 206 struct sockaddr_in dst; 207 int ret; 208 209 nlsa = htonl(nlsa); 210 memcpy(buf_seek(buf, sizeof(struct ospf_hdr), sizeof(nlsa)), 211 &nlsa, sizeof(nlsa)); 212 /* update authentication and calculate checksum */ 213 if (auth_gen(buf, iface)) 214 goto fail; 215 216 /* set destination */ 217 dst.sin_family = AF_INET; 218 dst.sin_len = sizeof(struct sockaddr_in); 219 dst.sin_addr.s_addr = addr.s_addr; 220 221 ret = send_packet(iface, buf, &dst); 222 223 buf_free(buf); 224 return (ret); 225 fail: 226 log_warn("send_ls_update"); 227 buf_free(buf); 228 return (-1); 229 } 230 231 void 232 recv_ls_update(struct nbr *nbr, char *buf, u_int16_t len) 233 { 234 struct lsa_hdr lsa; 235 u_int32_t nlsa; 236 237 if (len < sizeof(nlsa)) { 238 log_warnx("recv_ls_update: bad packet size, neighbor ID %s", 239 inet_ntoa(nbr->id)); 240 return; 241 } 242 memcpy(&nlsa, buf, sizeof(nlsa)); 243 nlsa = ntohl(nlsa); 244 buf += sizeof(nlsa); 245 len -= sizeof(nlsa); 246 247 switch (nbr->state) { 248 case NBR_STA_DOWN: 249 case NBR_STA_ATTEMPT: 250 case NBR_STA_INIT: 251 case NBR_STA_2_WAY: 252 case NBR_STA_XSTRT: 253 case NBR_STA_SNAP: 254 log_debug("recv_ls_update: packet ignored in state %s, " 255 "neighbor ID %s", nbr_state_name(nbr->state), 256 inet_ntoa(nbr->id)); 257 break; 258 case NBR_STA_XCHNG: 259 case NBR_STA_LOAD: 260 case NBR_STA_FULL: 261 for (; nlsa > 0 && len > 0; nlsa--) { 262 if (len < sizeof(lsa)) { 263 log_warnx("recv_ls_update: bad packet size, " 264 "neighbor ID %s", inet_ntoa(nbr->id)); 265 return; 266 } 267 memcpy(&lsa, buf, sizeof(lsa)); 268 if (len < ntohs(lsa.len)) { 269 log_warnx("recv_ls_update: bad packet size, " 270 "neighbor ID %s", inet_ntoa(nbr->id)); 271 return; 272 } 273 imsg_compose(ibuf_rde, IMSG_LS_UPD, nbr->peerid, 0, 274 buf, ntohs(lsa.len)); 275 buf += ntohs(lsa.len); 276 len -= ntohs(lsa.len); 277 } 278 if (nlsa > 0 || len > 0) { 279 log_warnx("recv_ls_update: bad packet size, " 280 "neighbor ID %s", inet_ntoa(nbr->id)); 281 return; 282 } 283 break; 284 default: 285 fatalx("recv_ls_update: unknown neighbor state"); 286 } 287 } 288 289 /* link state retransmit list */ 290 void 291 ls_retrans_list_add(struct nbr *nbr, struct lsa_hdr *lsa, 292 unsigned short timeout, unsigned short oneshot) 293 { 294 struct timeval tv; 295 struct lsa_entry *le; 296 struct lsa_ref *ref; 297 298 if ((ref = lsa_cache_get(lsa)) == NULL) 299 fatalx("King Bula sez: somebody forgot to lsa_cache_add"); 300 301 if ((le = calloc(1, sizeof(*le))) == NULL) 302 fatal("ls_retrans_list_add"); 303 304 le->le_ref = ref; 305 le->le_when = timeout; 306 le->le_oneshot = oneshot; 307 308 ls_retrans_list_insert(nbr, le); 309 310 if (!evtimer_pending(&nbr->ls_retrans_timer, NULL)) { 311 timerclear(&tv); 312 tv.tv_sec = TAILQ_FIRST(&nbr->ls_retrans_list)->le_when; 313 314 if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1) 315 fatal("ls_retrans_list_add"); 316 } 317 } 318 319 int 320 ls_retrans_list_del(struct nbr *nbr, struct lsa_hdr *lsa_hdr) 321 { 322 struct lsa_entry *le; 323 324 if ((le = ls_retrans_list_get(nbr, lsa_hdr)) == NULL) 325 return (-1); 326 if (lsa_hdr->seq_num == le->le_ref->hdr.seq_num && 327 lsa_hdr->ls_chksum == le->le_ref->hdr.ls_chksum) { 328 ls_retrans_list_free(nbr, le); 329 return (0); 330 } 331 332 return (-1); 333 } 334 335 struct lsa_entry * 336 ls_retrans_list_get(struct nbr *nbr, struct lsa_hdr *lsa_hdr) 337 { 338 struct lsa_entry *le; 339 340 TAILQ_FOREACH(le, &nbr->ls_retrans_list, entry) { 341 if ((lsa_hdr->type == le->le_ref->hdr.type) && 342 (lsa_hdr->ls_id == le->le_ref->hdr.ls_id) && 343 (lsa_hdr->adv_rtr == le->le_ref->hdr.adv_rtr)) 344 return (le); 345 } 346 return (NULL); 347 } 348 349 void 350 ls_retrans_list_insert(struct nbr *nbr, struct lsa_entry *new) 351 { 352 struct lsa_entry *le; 353 unsigned short when = new->le_when; 354 355 TAILQ_FOREACH(le, &nbr->ls_retrans_list, entry) { 356 if (when < le->le_when) { 357 new->le_when = when; 358 TAILQ_INSERT_BEFORE(le, new, entry); 359 nbr->ls_ret_cnt++; 360 return; 361 } 362 when -= le->le_when; 363 } 364 new->le_when = when; 365 TAILQ_INSERT_TAIL(&nbr->ls_retrans_list, new, entry); 366 nbr->ls_ret_cnt++; 367 } 368 369 void 370 ls_retrans_list_remove(struct nbr *nbr, struct lsa_entry *le) 371 { 372 struct timeval tv; 373 struct lsa_entry *next = TAILQ_NEXT(le, entry); 374 int reset = 0; 375 376 /* adjust timeout of next entry */ 377 if (next) 378 next->le_when += le->le_when; 379 380 if (TAILQ_FIRST(&nbr->ls_retrans_list) == le && 381 evtimer_pending(&nbr->ls_retrans_timer, NULL)) 382 reset = 1; 383 384 TAILQ_REMOVE(&nbr->ls_retrans_list, le, entry); 385 nbr->ls_ret_cnt--; 386 387 if (reset && TAILQ_FIRST(&nbr->ls_retrans_list)) { 388 if (evtimer_del(&nbr->ls_retrans_timer) == -1) 389 fatal("ls_retrans_list_remove"); 390 391 timerclear(&tv); 392 tv.tv_sec = TAILQ_FIRST(&nbr->ls_retrans_list)->le_when; 393 394 if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1) 395 fatal("ls_retrans_list_remove"); 396 } 397 } 398 399 void 400 ls_retrans_list_free(struct nbr *nbr, struct lsa_entry *le) 401 { 402 ls_retrans_list_remove(nbr, le); 403 404 lsa_cache_put(le->le_ref, nbr); 405 free(le); 406 } 407 408 void 409 ls_retrans_list_clr(struct nbr *nbr) 410 { 411 struct lsa_entry *le; 412 413 while ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL) 414 ls_retrans_list_free(nbr, le); 415 416 nbr->ls_ret_cnt = 0; 417 } 418 419 /* ARGSUSED */ 420 void 421 ls_retrans_timer(int fd, short event, void *bula) 422 { 423 struct timeval tv; 424 struct timespec tp; 425 struct in_addr addr; 426 struct nbr *nbr = bula; 427 struct lsa_entry *le; 428 struct buf *buf; 429 time_t now; 430 int d; 431 u_int32_t nlsa = 0; 432 433 if ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL) 434 le->le_when = 0; /* timer fired */ 435 else 436 return; /* queue empty, nothing to do */ 437 438 clock_gettime(CLOCK_MONOTONIC, &tp); 439 now = tp.tv_sec; 440 441 if (nbr->iface->self == nbr) { 442 /* 443 * oneshot needs to be set for lsa queued for flooding, 444 * if oneshot is not set then the lsa needs to be converted 445 * because the router switched lately to DR or BDR 446 */ 447 if (le->le_oneshot && nbr->iface->state & IF_STA_DRORBDR) 448 inet_aton(AllSPFRouters, &addr); 449 else if (nbr->iface->state & IF_STA_DRORBDR) { 450 /* 451 * old retransmission needs to be converted into 452 * flood by rerunning the lsa_flood. 453 */ 454 lsa_flood(nbr->iface, nbr, &le->le_ref->hdr, 455 le->le_ref->data); 456 ls_retrans_list_free(nbr, le); 457 /* ls_retrans_list_free retriggers the timer */ 458 return; 459 } else 460 inet_aton(AllDRouters, &addr); 461 } else 462 memcpy(&addr, &nbr->addr, sizeof(addr)); 463 464 if ((buf = prepare_ls_update(nbr->iface)) == NULL) { 465 le->le_when = 1; 466 goto done; 467 } 468 469 while ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL && 470 le->le_when == 0) { 471 d = now - le->le_ref->stamp; 472 if (d < 0) 473 d = 0; 474 else if (d > MAX_AGE) 475 d = MAX_AGE; 476 477 if (add_ls_update(buf, nbr->iface, le->le_ref->data, 478 le->le_ref->len, d) == 0) 479 break; 480 nlsa++; 481 if (le->le_oneshot) 482 ls_retrans_list_free(nbr, le); 483 else { 484 TAILQ_REMOVE(&nbr->ls_retrans_list, le, entry); 485 nbr->ls_ret_cnt--; 486 le->le_when = nbr->iface->rxmt_interval; 487 ls_retrans_list_insert(nbr, le); 488 } 489 } 490 send_ls_update(buf, nbr->iface, addr, nlsa); 491 492 done: 493 if ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL) { 494 timerclear(&tv); 495 tv.tv_sec = le->le_when; 496 497 if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1) 498 fatal("ls_retrans_timer"); 499 } 500 } 501 502 LIST_HEAD(lsa_cache_head, lsa_ref); 503 504 struct lsa_cache { 505 struct lsa_cache_head *hashtbl; 506 u_int32_t hashmask; 507 } lsacache; 508 509 struct lsa_ref *lsa_cache_look(struct lsa_hdr *); 510 511 void 512 lsa_cache_init(u_int32_t hashsize) 513 { 514 u_int32_t hs, i; 515 516 for (hs = 1; hs < hashsize; hs <<= 1) 517 ; 518 lsacache.hashtbl = calloc(hs, sizeof(struct lsa_cache_head)); 519 if (lsacache.hashtbl == NULL) 520 fatal("lsa_cache_init"); 521 522 for (i = 0; i < hs; i++) 523 LIST_INIT(&lsacache.hashtbl[i]); 524 525 lsacache.hashmask = hs - 1; 526 } 527 528 struct lsa_ref * 529 lsa_cache_add(void *data, u_int16_t len) 530 { 531 struct lsa_cache_head *head; 532 struct lsa_ref *ref, *old; 533 struct timespec tp; 534 535 if ((ref = calloc(1, sizeof(*ref))) == NULL) 536 fatal("lsa_cache_add"); 537 memcpy(&ref->hdr, data, sizeof(ref->hdr)); 538 539 if ((old = lsa_cache_look(&ref->hdr))) { 540 free(ref); 541 old->refcnt++; 542 return (old); 543 } 544 545 if ((ref->data = malloc(len)) == NULL) 546 fatal("lsa_cache_add"); 547 memcpy(ref->data, data, len); 548 549 clock_gettime(CLOCK_MONOTONIC, &tp); 550 ref->stamp = tp.tv_sec; 551 ref->len = len; 552 ref->refcnt = 1; 553 554 head = &lsacache.hashtbl[hash32_buf(&ref->hdr, sizeof(ref->hdr), 555 HASHINIT) & lsacache.hashmask]; 556 LIST_INSERT_HEAD(head, ref, entry); 557 return (ref); 558 } 559 560 struct lsa_ref * 561 lsa_cache_get(struct lsa_hdr *lsa_hdr) 562 { 563 struct lsa_ref *ref; 564 565 ref = lsa_cache_look(lsa_hdr); 566 if (ref) 567 ref->refcnt++; 568 569 return (ref); 570 } 571 572 void 573 lsa_cache_put(struct lsa_ref *ref, struct nbr *nbr) 574 { 575 if (--ref->refcnt > 0) 576 return; 577 578 if (ntohs(ref->hdr.age) >= MAX_AGE) 579 ospfe_imsg_compose_rde(IMSG_LS_MAXAGE, nbr->peerid, 0, 580 ref->data, sizeof(struct lsa_hdr)); 581 582 free(ref->data); 583 LIST_REMOVE(ref, entry); 584 free(ref); 585 } 586 587 struct lsa_ref * 588 lsa_cache_look(struct lsa_hdr *lsa_hdr) 589 { 590 struct lsa_cache_head *head; 591 struct lsa_ref *ref; 592 593 head = &lsacache.hashtbl[hash32_buf(lsa_hdr, sizeof(*lsa_hdr), 594 HASHINIT) & lsacache.hashmask]; 595 596 LIST_FOREACH(ref, head, entry) { 597 if (memcmp(&ref->hdr, lsa_hdr, sizeof(*lsa_hdr)) == 0) 598 /* found match */ 599 return (ref); 600 } 601 602 return (NULL); 603 } 604