xref: /openbsd-src/usr.sbin/ospf6d/rde.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: rde.c,v 1.27 2009/03/29 21:42:30 stsp Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <sys/param.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ospf6.h"
37 #include "ospf6d.h"
38 #include "ospfe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 void		 rde_sig_handler(int sig, short, void *);
43 void		 rde_shutdown(void);
44 void		 rde_dispatch_imsg(int, short, void *);
45 void		 rde_dispatch_parent(int, short, void *);
46 void		 rde_dump_area(struct area *, int, pid_t);
47 
48 void		 rde_send_summary(pid_t);
49 void		 rde_send_summary_area(struct area *, pid_t);
50 void		 rde_nbr_init(u_int32_t);
51 void		 rde_nbr_free(void);
52 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53 void		 rde_nbr_del(struct rde_nbr *);
54 
55 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_free(struct rde_nbr *);
59 
60 struct lsa	*rde_asext_get(struct rroute *);
61 struct lsa	*rde_asext_put(struct rroute *);
62 
63 struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
64 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
65 struct lsa	*orig_intra_lsa_net(struct iface *, struct vertex *);
66 struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
67 void		 orig_intra_area_prefix_lsas(struct area *);
68 void		 append_prefix_lsa(struct lsa **, u_int16_t *,
69 		    struct lsa_prefix *);
70 int		 link_lsa_from_full_nbr(struct lsa *, struct iface *);
71 
72 /* A 32-bit value != any ifindex.
73  * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
74 #define	LS_ID_INTRA_RTR	0x01000000
75 
76 /* Tree of prefixes with global scope on given a link,
77  * see orig_intra_lsa_*() */
78 struct prefix_node {
79 	RB_ENTRY(prefix_node)	 entry;
80 	struct lsa_prefix	*prefix;
81 };
82 RB_HEAD(prefix_tree, prefix_node);
83 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
84 int		 prefix_compare(struct prefix_node *, struct prefix_node *);
85 void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
86 
87 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
88 struct imsgbuf		*ibuf_ospfe;
89 struct imsgbuf		*ibuf_main;
90 struct rde_nbr		*nbrself;
91 struct lsa_tree		 asext_tree;
92 
93 /* ARGSUSED */
94 void
95 rde_sig_handler(int sig, short event, void *arg)
96 {
97 	/*
98 	 * signal handler rules don't apply, libevent decouples for us
99 	 */
100 
101 	switch (sig) {
102 	case SIGINT:
103 	case SIGTERM:
104 		rde_shutdown();
105 		/* NOTREACHED */
106 	default:
107 		fatalx("unexpected signal");
108 	}
109 }
110 
111 /* route decision engine */
112 pid_t
113 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
114     int pipe_parent2ospfe[2])
115 {
116 	struct event		 ev_sigint, ev_sigterm;
117 	struct timeval		 now;
118 	struct passwd		*pw;
119 	struct redistribute	*r;
120 	pid_t			 pid;
121 
122 	switch (pid = fork()) {
123 	case -1:
124 		fatal("cannot fork");
125 		/* NOTREACHED */
126 	case 0:
127 		break;
128 	default:
129 		return (pid);
130 	}
131 
132 	rdeconf = xconf;
133 
134 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
135 		fatal("getpwnam");
136 
137 	if (chroot(pw->pw_dir) == -1)
138 		fatal("chroot");
139 	if (chdir("/") == -1)
140 		fatal("chdir(\"/\")");
141 
142 	setproctitle("route decision engine");
143 	ospfd_process = PROC_RDE_ENGINE;
144 
145 	if (setgroups(1, &pw->pw_gid) ||
146 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
147 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
148 		fatal("can't drop privileges");
149 
150 	event_init();
151 	rde_nbr_init(NBR_HASHSIZE);
152 	lsa_init(&asext_tree);
153 
154 	/* setup signal handler */
155 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
156 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
157 	signal_add(&ev_sigint, NULL);
158 	signal_add(&ev_sigterm, NULL);
159 	signal(SIGPIPE, SIG_IGN);
160 	signal(SIGHUP, SIG_IGN);
161 
162 	/* setup pipes */
163 	close(pipe_ospfe2rde[0]);
164 	close(pipe_parent2rde[0]);
165 	close(pipe_parent2ospfe[0]);
166 	close(pipe_parent2ospfe[1]);
167 
168 	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
169 	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
170 		fatal(NULL);
171 	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
172 	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
173 
174 	/* setup event handler */
175 	ibuf_ospfe->events = EV_READ;
176 	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
177 	    ibuf_ospfe->handler, ibuf_ospfe);
178 	event_add(&ibuf_ospfe->ev, NULL);
179 
180 	ibuf_main->events = EV_READ;
181 	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
182 	    ibuf_main->handler, ibuf_main);
183 	event_add(&ibuf_main->ev, NULL);
184 
185 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
186 	cand_list_init();
187 	rt_init();
188 
189 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
190 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
191 		free(r);
192 	}
193 
194 	gettimeofday(&now, NULL);
195 	rdeconf->uptime = now.tv_sec;
196 
197 	event_dispatch();
198 
199 	rde_shutdown();
200 	/* NOTREACHED */
201 
202 	return (0);
203 }
204 
205 void
206 rde_shutdown(void)
207 {
208 	struct area	*a;
209 
210 	stop_spf_timer(rdeconf);
211 	cand_list_clr();
212 	rt_clear();
213 
214 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
215 		LIST_REMOVE(a, entry);
216 		area_del(a);
217 	}
218 	rde_nbr_free();
219 
220 	msgbuf_clear(&ibuf_ospfe->w);
221 	free(ibuf_ospfe);
222 	msgbuf_clear(&ibuf_main->w);
223 	free(ibuf_main);
224 	free(rdeconf);
225 
226 	log_info("route decision engine exiting");
227 	_exit(0);
228 }
229 
230 int
231 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
232     u_int16_t datalen)
233 {
234 	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
235 }
236 
237 /* ARGSUSED */
238 void
239 rde_dispatch_imsg(int fd, short event, void *bula)
240 {
241 	struct imsgbuf		*ibuf = bula;
242 	struct imsg		 imsg;
243 	struct in_addr		 aid;
244 	struct ls_req_hdr	 req_hdr;
245 	struct lsa_hdr		 lsa_hdr, *db_hdr;
246 	struct rde_nbr		 rn, *nbr;
247 	struct timespec		 tp;
248 	struct lsa		*lsa;
249 	struct area		*area;
250 	struct vertex		*v;
251 	struct iface		*iface, *ifp;
252 	char			*buf;
253 	ssize_t			 n;
254 	time_t			 now;
255 	int			 r, state, self, shut = 0;
256 	u_int16_t		 l;
257 
258 	switch (event) {
259 	case EV_READ:
260 		if ((n = imsg_read(ibuf)) == -1)
261 			fatal("imsg_read error");
262 		if (n == 0)	/* connection closed */
263 			shut = 1;
264 		break;
265 	case EV_WRITE:
266 		if (msgbuf_write(&ibuf->w) == -1)
267 			fatal("msgbuf_write");
268 		imsg_event_add(ibuf);
269 		return;
270 	default:
271 		fatalx("unknown event");
272 	}
273 
274 	clock_gettime(CLOCK_MONOTONIC, &tp);
275 	now = tp.tv_sec;
276 
277 	for (;;) {
278 		if ((n = imsg_get(ibuf, &imsg)) == -1)
279 			fatal("rde_dispatch_imsg: imsg_read error");
280 		if (n == 0)
281 			break;
282 
283 		switch (imsg.hdr.type) {
284 		case IMSG_NEIGHBOR_UP:
285 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
286 				fatalx("invalid size of OE request");
287 			memcpy(&rn, imsg.data, sizeof(rn));
288 
289 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
290 				fatalx("rde_dispatch_imsg: "
291 				    "neighbor already exists");
292 			break;
293 		case IMSG_NEIGHBOR_DOWN:
294 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
295 			break;
296 		case IMSG_NEIGHBOR_CHANGE:
297 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
298 				fatalx("invalid size of OE request");
299 			memcpy(&state, imsg.data, sizeof(state));
300 
301 			nbr = rde_nbr_find(imsg.hdr.peerid);
302 			if (nbr == NULL)
303 				break;
304 
305 			if (state != nbr->state &&
306 			    (nbr->state & NBR_STA_FULL ||
307 			    state & NBR_STA_FULL)) {
308 				nbr->state = state;
309 				area_track(nbr->area, state);
310 				orig_intra_area_prefix_lsas(nbr->area);
311 			}
312 
313 			nbr->state = state;
314 			if (nbr->state & NBR_STA_FULL)
315 				rde_req_list_free(nbr);
316 			break;
317 		case IMSG_DB_SNAPSHOT:
318 			nbr = rde_nbr_find(imsg.hdr.peerid);
319 			if (nbr == NULL)
320 				break;
321 
322 			lsa_snap(nbr, imsg.hdr.peerid);
323 
324 			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
325 			    0, NULL, 0);
326 			break;
327 		case IMSG_DD:
328 			nbr = rde_nbr_find(imsg.hdr.peerid);
329 			if (nbr == NULL)
330 				break;
331 
332 			buf = imsg.data;
333 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
334 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
335 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
336 				buf += sizeof(lsa_hdr);
337 
338 				v = lsa_find(nbr->iface, lsa_hdr.type,
339 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
340 				if (v == NULL)
341 					db_hdr = NULL;
342 				else
343 					db_hdr = &v->lsa->hdr;
344 
345 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
346 					/*
347 					 * only request LSAs that are
348 					 * newer or missing
349 					 */
350 					rde_req_list_add(nbr, &lsa_hdr);
351 					imsg_compose(ibuf_ospfe, IMSG_DD,
352 					    imsg.hdr.peerid, 0, &lsa_hdr,
353 					    sizeof(lsa_hdr));
354 				}
355 			}
356 			if (l != 0)
357 				log_warnx("rde_dispatch_imsg: peerid %lu, "
358 				    "trailing garbage in Database Description "
359 				    "packet", imsg.hdr.peerid);
360 
361 			imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid,
362 			    0, NULL, 0);
363 			break;
364 		case IMSG_LS_REQ:
365 			nbr = rde_nbr_find(imsg.hdr.peerid);
366 			if (nbr == NULL)
367 				break;
368 
369 			buf = imsg.data;
370 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
371 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
372 				memcpy(&req_hdr, buf, sizeof(req_hdr));
373 				buf += sizeof(req_hdr);
374 
375 				if ((v = lsa_find(nbr->iface,
376 				    req_hdr.type, req_hdr.ls_id,
377 				    req_hdr.adv_rtr)) == NULL) {
378 					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
379 					    imsg.hdr.peerid, 0, NULL, 0);
380 					continue;
381 				}
382 				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
383 				    imsg.hdr.peerid, 0, v->lsa,
384 				    ntohs(v->lsa->hdr.len));
385 			}
386 			if (l != 0)
387 				log_warnx("rde_dispatch_imsg: peerid %lu, "
388 				    "trailing garbage in LS Request "
389 				    "packet", imsg.hdr.peerid);
390 			break;
391 		case IMSG_LS_UPD:
392 			nbr = rde_nbr_find(imsg.hdr.peerid);
393 			if (nbr == NULL)
394 				break;
395 
396 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
397 			if (lsa == NULL)
398 				fatal(NULL);
399 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
400 
401 			if (!lsa_check(nbr, lsa,
402 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
403 				free(lsa);
404 				break;
405 			}
406 
407 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
408 				    lsa->hdr.adv_rtr);
409 			if (v == NULL)
410 				db_hdr = NULL;
411 			else
412 				db_hdr = &v->lsa->hdr;
413 
414 			if (nbr->self) {
415 				lsa_merge(nbr, lsa, v);
416 				/* lsa_merge frees the right lsa */
417 				break;
418 			}
419 
420 			r = lsa_newer(&lsa->hdr, db_hdr);
421 			if (r > 0) {
422 				/* new LSA newer than DB */
423 				if (v && v->flooded &&
424 				    v->changed + MIN_LS_ARRIVAL >= now) {
425 					free(lsa);
426 					break;
427 				}
428 
429 				rde_req_list_del(nbr, &lsa->hdr);
430 
431 				self = lsa_self(lsa);
432 				if (self) {
433 					if (v == NULL)
434 						/* LSA is no longer announced,
435 						 * remove by premature aging. */
436 						lsa_flush(nbr, lsa);
437 					else
438 						lsa_reflood(v, lsa);
439 				} else if (lsa_add(nbr, lsa))
440 					/* delayed lsa, don't flood yet */
441 					break;
442 
443 				/* flood and perhaps ack LSA */
444 				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
445 				    imsg.hdr.peerid, 0, lsa,
446 				    ntohs(lsa->hdr.len));
447 
448 				/* reflood self originated LSA */
449 				if (self && v)
450 					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
451 					    v->peerid, 0, v->lsa,
452 					    ntohs(v->lsa->hdr.len));
453 				/* new LSA was not added so free it */
454 				if (self)
455 					free(lsa);
456 			} else if (r < 0) {
457 				/* lsa no longer needed */
458 				free(lsa);
459 
460 				/*
461 				 * point 6 of "The Flooding Procedure"
462 				 * We are violating the RFC here because
463 				 * it does not make sense to reset a session
464 				 * because an equal LSA is already in the table.
465 				 * Only if the LSA sent is older than the one
466 				 * in the table we should reset the session.
467 				 */
468 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
469 					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
470 					    imsg.hdr.peerid, 0, NULL, 0);
471 					break;
472 				}
473 
474 				/* new LSA older than DB */
475 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
476 				    ntohs(db_hdr->age) == MAX_AGE)
477 					/* seq-num wrap */
478 					break;
479 
480 				if (v->changed + MIN_LS_ARRIVAL >= now)
481 					break;
482 
483 				/* directly send current LSA, no ack */
484 				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
485 				    imsg.hdr.peerid, 0, v->lsa,
486 				    ntohs(v->lsa->hdr.len));
487 			} else {
488 				/* LSA equal send direct ack */
489 				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
490 				    imsg.hdr.peerid, 0, &lsa->hdr,
491 				    sizeof(lsa->hdr));
492 				free(lsa);
493 			}
494 			break;
495 		case IMSG_LS_MAXAGE:
496 			nbr = rde_nbr_find(imsg.hdr.peerid);
497 			if (nbr == NULL)
498 				break;
499 
500 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
501 			    sizeof(struct lsa_hdr))
502 				fatalx("invalid size of OE request");
503 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
504 
505 			if (rde_nbr_loading(nbr->area))
506 				break;
507 
508 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
509 				    lsa_hdr.adv_rtr);
510 			if (v == NULL)
511 				db_hdr = NULL;
512 			else
513 				db_hdr = &v->lsa->hdr;
514 
515 			/*
516 			 * only delete LSA if the one in the db is not newer
517 			 */
518 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
519 				lsa_del(nbr, &lsa_hdr);
520 			break;
521 		case IMSG_CTL_SHOW_DATABASE:
522 		case IMSG_CTL_SHOW_DB_EXT:
523 		case IMSG_CTL_SHOW_DB_LINK:
524 		case IMSG_CTL_SHOW_DB_NET:
525 		case IMSG_CTL_SHOW_DB_RTR:
526 		case IMSG_CTL_SHOW_DB_INTRA:
527 		case IMSG_CTL_SHOW_DB_SELF:
528 		case IMSG_CTL_SHOW_DB_SUM:
529 		case IMSG_CTL_SHOW_DB_ASBR:
530 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
531 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
532 				log_warnx("rde_dispatch_imsg: wrong imsg len");
533 				break;
534 			}
535 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
536 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
537 					rde_dump_area(area, imsg.hdr.type,
538 					    imsg.hdr.pid);
539 				}
540 				lsa_dump(&asext_tree, imsg.hdr.type,
541 				    imsg.hdr.pid);
542 			} else {
543 				memcpy(&aid, imsg.data, sizeof(aid));
544 				if ((area = area_find(rdeconf, aid)) != NULL) {
545 					rde_dump_area(area, imsg.hdr.type,
546 					    imsg.hdr.pid);
547 					if (!area->stub)
548 						lsa_dump(&asext_tree,
549 						    imsg.hdr.type,
550 						    imsg.hdr.pid);
551 				}
552 			}
553 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
554 			    NULL, 0);
555 			break;
556 		case IMSG_CTL_SHOW_RIB:
557 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
558 				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
559 				    0, imsg.hdr.pid, area, sizeof(*area));
560 
561 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
562 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
563 			}
564 			aid.s_addr = 0;
565 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
566 
567 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
568 			    NULL, 0);
569 			break;
570 		case IMSG_CTL_SHOW_SUM:
571 			rde_send_summary(imsg.hdr.pid);
572 			LIST_FOREACH(area, &rdeconf->area_list, entry)
573 				rde_send_summary_area(area, imsg.hdr.pid);
574 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
575 			    NULL, 0);
576 			break;
577 		case IMSG_IFINFO:
578 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
579 			    sizeof(struct iface))
580 				fatalx("IFINFO imsg with wrong len");
581 
582 			ifp = imsg.data;
583 
584 			iface = if_find(ifp->ifindex);
585 			if (iface == NULL)
586 				fatalx("interface lost in rde");
587 			iface->flags = ifp->flags;
588 			iface->linkstate = ifp->linkstate;
589 			iface->nh_reachable = ifp->nh_reachable;
590 			if (iface->state != ifp->state) {
591 				iface->state = ifp->state;
592 				area = area_find(rdeconf, iface->area_id);
593 				if (!area)
594 					fatalx("interface lost area");
595 				orig_intra_area_prefix_lsas(area);
596 			}
597 			break;
598 		default:
599 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
600 			    imsg.hdr.type);
601 			break;
602 		}
603 		imsg_free(&imsg);
604 	}
605 	if (!shut)
606 		imsg_event_add(ibuf);
607 	else {
608 		/* this pipe is dead, so remove the event handler */
609 		event_del(&ibuf->ev);
610 		event_loopexit(NULL);
611 	}
612 }
613 
614 /* ARGSUSED */
615 void
616 rde_dispatch_parent(int fd, short event, void *bula)
617 {
618 	static struct area	*narea;
619 	struct iface		*niface, *iface;
620 	struct imsg		 imsg;
621 	struct kroute		 kr;
622 	struct rroute		 rr;
623 	struct imsgbuf		*ibuf = bula;
624 	struct lsa		*lsa;
625 	struct vertex		*v;
626 	struct rt_node		*rn;
627 	ssize_t			 n;
628 	int			 shut = 0;
629 	unsigned int		 ifindex;
630 
631 	switch (event) {
632 	case EV_READ:
633 		if ((n = imsg_read(ibuf)) == -1)
634 			fatal("imsg_read error");
635 		if (n == 0)	/* connection closed */
636 			shut = 1;
637 		break;
638 	case EV_WRITE:
639 		if (msgbuf_write(&ibuf->w) == -1)
640 			fatal("msgbuf_write");
641 		imsg_event_add(ibuf);
642 		return;
643 	default:
644 		fatalx("unknown event");
645 	}
646 
647 	for (;;) {
648 		if ((n = imsg_get(ibuf, &imsg)) == -1)
649 			fatal("rde_dispatch_parent: imsg_read error");
650 		if (n == 0)
651 			break;
652 
653 		switch (imsg.hdr.type) {
654 		case IMSG_NETWORK_ADD:
655 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
656 				log_warnx("rde_dispatch_parent: "
657 				    "wrong imsg len");
658 				break;
659 			}
660 			memcpy(&rr, imsg.data, sizeof(rr));
661 
662 			if ((lsa = rde_asext_get(&rr)) != NULL) {
663 				v = lsa_find(NULL, lsa->hdr.type,
664 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
665 
666 				lsa_merge(nbrself, lsa, v);
667 			}
668 			break;
669 		case IMSG_NETWORK_DEL:
670 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
671 				log_warnx("rde_dispatch_parent: "
672 				    "wrong imsg len");
673 				break;
674 			}
675 			memcpy(&rr, imsg.data, sizeof(rr));
676 
677 			if ((lsa = rde_asext_put(&rr)) != NULL) {
678 				v = lsa_find(NULL, lsa->hdr.type,
679 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
680 
681 				/*
682 				 * if v == NULL no LSA is in the table and
683 				 * nothing has to be done.
684 				 */
685 				if (v)
686 					lsa_merge(nbrself, lsa, v);
687 			}
688 			break;
689 		case IMSG_KROUTE_GET:
690 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
691 				log_warnx("rde_dispatch_parent: "
692 				    "wrong imsg len");
693 				break;
694 			}
695 			memcpy(&kr, imsg.data, sizeof(kr));
696 
697 			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
698 			    DT_NET)) != NULL)
699 				rde_send_change_kroute(rn);
700 			else
701 				/* should not happen */
702 				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
703 				    0, &kr, sizeof(kr));
704 			break;
705 		case IMSG_IFADD:
706 			if ((niface = malloc(sizeof(struct iface))) == NULL)
707 				fatal(NULL);
708 			memcpy(niface, imsg.data, sizeof(struct iface));
709 
710 			LIST_INIT(&niface->nbr_list);
711 			TAILQ_INIT(&niface->ls_ack_list);
712 			RB_INIT(&niface->lsa_tree);
713 
714 			narea = area_find(rdeconf, niface->area_id);
715 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
716 			break;
717 		case IMSG_IFDELETE:
718 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
719 			    sizeof(ifindex))
720 				fatalx("IFDELETE imsg with wrong len");
721 
722 			memcpy(&ifindex, imsg.data, sizeof(ifindex));
723 			iface = if_find(ifindex);
724 			if (iface == NULL)
725 				fatalx("interface lost in ospfe");
726 
727 			LIST_REMOVE(iface, entry);
728 			if_del(iface);
729 			break;
730 		case IMSG_RECONF_CONF:
731 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
732 			    NULL)
733 				fatal(NULL);
734 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
735 
736 			LIST_INIT(&nconf->area_list);
737 			LIST_INIT(&nconf->cand_list);
738 			break;
739 		case IMSG_RECONF_AREA:
740 			if ((narea = area_new()) == NULL)
741 				fatal(NULL);
742 			memcpy(narea, imsg.data, sizeof(struct area));
743 
744 			LIST_INIT(&narea->iface_list);
745 			LIST_INIT(&narea->nbr_list);
746 			RB_INIT(&narea->lsa_tree);
747 
748 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
749 			break;
750 		case IMSG_RECONF_END:
751 			merge_config(rdeconf, nconf);
752 			nconf = NULL;
753 			break;
754 		default:
755 			log_debug("rde_dispatch_parent: unexpected imsg %d",
756 			    imsg.hdr.type);
757 			break;
758 		}
759 		imsg_free(&imsg);
760 	}
761 	if (!shut)
762 		imsg_event_add(ibuf);
763 	else {
764 		/* this pipe is dead, so remove the event handler */
765 		event_del(&ibuf->ev);
766 		event_loopexit(NULL);
767 	}
768 }
769 
770 void
771 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
772 {
773 	struct iface	*iface;
774 
775 	/* dump header */
776 	imsg_compose(ibuf_ospfe, IMSG_CTL_AREA, 0, pid, area, sizeof(*area));
777 
778 	/* dump link local lsa */
779 	LIST_FOREACH(iface, &area->iface_list, entry) {
780 		imsg_compose(ibuf_ospfe, IMSG_CTL_IFACE,
781 		    0, pid, iface, sizeof(*iface));
782 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
783 	}
784 
785 	/* dump area lsa */
786 	lsa_dump(&area->lsa_tree, imsg_type, pid);
787 }
788 
789 u_int32_t
790 rde_router_id(void)
791 {
792 	return (rdeconf->rtr_id.s_addr);
793 }
794 
795 void
796 rde_send_change_kroute(struct rt_node *r)
797 {
798 	struct kroute		 kr;
799 	struct rt_nexthop	*rn;
800 
801 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
802 		if (!rn->invalid)
803 			break;
804 	}
805 	if (!rn)
806 		fatalx("rde_send_change_kroute: no valid nexthop found");
807 
808 	bzero(&kr, sizeof(kr));
809 	kr.prefix = r->prefix;
810 	kr.nexthop = rn->nexthop;
811 	kr.prefixlen = r->prefixlen;
812 	kr.ext_tag = r->ext_tag;
813 
814 	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
815 }
816 
817 void
818 rde_send_delete_kroute(struct rt_node *r)
819 {
820 	struct kroute	 kr;
821 
822 	bzero(&kr, sizeof(kr));
823 	kr.prefix = r->prefix;
824 	kr.prefixlen = r->prefixlen;
825 
826 	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
827 }
828 
829 void
830 rde_send_summary(pid_t pid)
831 {
832 	static struct ctl_sum	 sumctl;
833 	struct timeval		 now;
834 	struct area		*area;
835 	struct vertex		*v;
836 
837 	bzero(&sumctl, sizeof(struct ctl_sum));
838 
839 	sumctl.rtr_id.s_addr = rde_router_id();
840 	sumctl.spf_delay = rdeconf->spf_delay;
841 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
842 
843 	LIST_FOREACH(area, &rdeconf->area_list, entry)
844 		sumctl.num_area++;
845 
846 	RB_FOREACH(v, lsa_tree, &asext_tree)
847 		sumctl.num_ext_lsa++;
848 
849 	gettimeofday(&now, NULL);
850 	if (rdeconf->uptime < now.tv_sec)
851 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
852 	else
853 		sumctl.uptime = 0;
854 
855 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
856 	    sizeof(sumctl));
857 }
858 
859 void
860 rde_send_summary_area(struct area *area, pid_t pid)
861 {
862 	static struct ctl_sum_area	 sumareactl;
863 	struct iface			*iface;
864 	struct rde_nbr			*nbr;
865 	struct lsa_tree			*tree = &area->lsa_tree;
866 	struct vertex			*v;
867 
868 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
869 
870 	sumareactl.area.s_addr = area->id.s_addr;
871 	sumareactl.num_spf_calc = area->num_spf_calc;
872 
873 	LIST_FOREACH(iface, &area->iface_list, entry)
874 		sumareactl.num_iface++;
875 
876 	LIST_FOREACH(nbr, &area->nbr_list, entry)
877 		if (nbr->state == NBR_STA_FULL && !nbr->self)
878 			sumareactl.num_adj_nbr++;
879 
880 	RB_FOREACH(v, lsa_tree, tree)
881 		sumareactl.num_lsa++;
882 
883 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
884 	    sizeof(sumareactl));
885 }
886 
887 LIST_HEAD(rde_nbr_head, rde_nbr);
888 
889 struct nbr_table {
890 	struct rde_nbr_head	*hashtbl;
891 	u_int32_t		 hashmask;
892 } rdenbrtable;
893 
894 #define RDE_NBR_HASH(x)		\
895 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
896 
897 void
898 rde_nbr_init(u_int32_t hashsize)
899 {
900 	struct rde_nbr_head	*head;
901 	u_int32_t		 hs, i;
902 
903 	for (hs = 1; hs < hashsize; hs <<= 1)
904 		;
905 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
906 	if (rdenbrtable.hashtbl == NULL)
907 		fatal("rde_nbr_init");
908 
909 	for (i = 0; i < hs; i++)
910 		LIST_INIT(&rdenbrtable.hashtbl[i]);
911 
912 	rdenbrtable.hashmask = hs - 1;
913 
914 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
915 		fatal("rde_nbr_init");
916 
917 	nbrself->id.s_addr = rde_router_id();
918 	nbrself->peerid = NBR_IDSELF;
919 	nbrself->state = NBR_STA_DOWN;
920 	nbrself->self = 1;
921 	head = RDE_NBR_HASH(NBR_IDSELF);
922 	LIST_INSERT_HEAD(head, nbrself, hash);
923 }
924 
925 void
926 rde_nbr_free(void)
927 {
928 	free(nbrself);
929 	free(rdenbrtable.hashtbl);
930 }
931 
932 struct rde_nbr *
933 rde_nbr_find(u_int32_t peerid)
934 {
935 	struct rde_nbr_head	*head;
936 	struct rde_nbr		*nbr;
937 
938 	head = RDE_NBR_HASH(peerid);
939 
940 	LIST_FOREACH(nbr, head, hash) {
941 		if (nbr->peerid == peerid)
942 			return (nbr);
943 	}
944 
945 	return (NULL);
946 }
947 
948 struct rde_nbr *
949 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
950 {
951 	struct rde_nbr_head	*head;
952 	struct rde_nbr		*nbr;
953 	struct area		*area;
954 	struct iface		*iface;
955 
956 	if (rde_nbr_find(peerid))
957 		return (NULL);
958 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
959 		fatalx("rde_nbr_new: unknown area");
960 
961 	LIST_FOREACH(iface, &area->iface_list, entry) {
962 		if (iface->ifindex == new->ifindex)
963 			break;
964 	}
965 	if (iface == NULL)
966 		fatalx("rde_nbr_new: unknown interface");
967 
968 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
969 		fatal("rde_nbr_new");
970 
971 	memcpy(nbr, new, sizeof(*nbr));
972 	nbr->peerid = peerid;
973 	nbr->area = area;
974 	nbr->iface = iface;
975 
976 	TAILQ_INIT(&nbr->req_list);
977 
978 	head = RDE_NBR_HASH(peerid);
979 	LIST_INSERT_HEAD(head, nbr, hash);
980 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
981 
982 	return (nbr);
983 }
984 
985 void
986 rde_nbr_del(struct rde_nbr *nbr)
987 {
988 	if (nbr == NULL)
989 		return;
990 
991 	rde_req_list_free(nbr);
992 
993 	LIST_REMOVE(nbr, entry);
994 	LIST_REMOVE(nbr, hash);
995 
996 	free(nbr);
997 }
998 
999 int
1000 rde_nbr_loading(struct area *area)
1001 {
1002 	struct rde_nbr		*nbr;
1003 	int			 checkall = 0;
1004 
1005 	if (area == NULL) {
1006 		area = LIST_FIRST(&rdeconf->area_list);
1007 		checkall = 1;
1008 	}
1009 
1010 	while (area != NULL) {
1011 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1012 			if (nbr->self)
1013 				continue;
1014 			if (nbr->state & NBR_STA_XCHNG ||
1015 			    nbr->state & NBR_STA_LOAD)
1016 				return (1);
1017 		}
1018 		if (!checkall)
1019 			break;
1020 		area = LIST_NEXT(area, entry);
1021 	}
1022 
1023 	return (0);
1024 }
1025 
1026 struct rde_nbr *
1027 rde_nbr_self(struct area *area)
1028 {
1029 	struct rde_nbr		*nbr;
1030 
1031 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1032 		if (nbr->self)
1033 			return (nbr);
1034 
1035 	/* this may not happen */
1036 	fatalx("rde_nbr_self: area without self");
1037 	return (NULL);
1038 }
1039 
1040 /*
1041  * LSA req list
1042  */
1043 void
1044 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1045 {
1046 	struct rde_req_entry	*le;
1047 
1048 	if ((le = calloc(1, sizeof(*le))) == NULL)
1049 		fatal("rde_req_list_add");
1050 
1051 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1052 	le->type = lsa->type;
1053 	le->ls_id = lsa->ls_id;
1054 	le->adv_rtr = lsa->adv_rtr;
1055 }
1056 
1057 int
1058 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1059 {
1060 	struct rde_req_entry	*le;
1061 
1062 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1063 		if ((lsa_hdr->type == le->type) &&
1064 		    (lsa_hdr->ls_id == le->ls_id) &&
1065 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1066 			return (1);
1067 	}
1068 	return (0);
1069 }
1070 
1071 void
1072 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1073 {
1074 	struct rde_req_entry	*le;
1075 
1076 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1077 		if ((lsa_hdr->type == le->type) &&
1078 		    (lsa_hdr->ls_id == le->ls_id) &&
1079 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1080 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1081 			free(le);
1082 			return;
1083 		}
1084 	}
1085 }
1086 
1087 void
1088 rde_req_list_free(struct rde_nbr *nbr)
1089 {
1090 	struct rde_req_entry	*le;
1091 
1092 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1093 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1094 		free(le);
1095 	}
1096 }
1097 
1098 /*
1099  * as-external LSA handling
1100  */
1101 struct lsa *
1102 rde_asext_get(struct rroute *rr)
1103 {
1104 #if 0
1105 	struct area	*area;
1106 	struct iface	*iface;
1107 XXX
1108 	LIST_FOREACH(area, &rdeconf->area_list, entry)
1109 		LIST_FOREACH(iface, &area->iface_list, entry) {
1110 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1111 			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1112 			    prefixlen2mask(rr->kr.prefixlen)) {
1113 				/* already announced as (stub) net LSA */
1114 				log_debug("rde_asext_get: %s/%d is net LSA",
1115 				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1116 				return (NULL);
1117 			}
1118 		}
1119 #endif
1120 	/* update of seqnum is done by lsa_merge */
1121 	return (orig_asext_lsa(rr, DEFAULT_AGE));
1122 }
1123 
1124 struct lsa *
1125 rde_asext_put(struct rroute *rr)
1126 {
1127 	/*
1128 	 * just try to remove the LSA. If the prefix is announced as
1129 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1130 	 */
1131 
1132 	/* remove by reflooding with MAX_AGE */
1133 	return (orig_asext_lsa(rr, MAX_AGE));
1134 }
1135 
1136 /*
1137  * summary LSA stuff
1138  */
1139 void
1140 rde_summary_update(struct rt_node *rte, struct area *area)
1141 {
1142 	struct vertex		*v = NULL;
1143 //XXX	struct lsa		*lsa;
1144 	u_int16_t		 type = 0;
1145 
1146 	/* first check if we actually need to announce this route */
1147 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1148 		return;
1149 	/* never create summaries for as-ext LSA */
1150 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1151 		return;
1152 	/* no need for summary LSA in the originating area */
1153 	if (rte->area.s_addr == area->id.s_addr)
1154 		return;
1155 	/* no need to originate inter-area routes to the backbone */
1156 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1157 		return;
1158 	/* TODO nexthop check, nexthop part of area -> no summary */
1159 	if (rte->cost >= LS_INFINITY)
1160 		return;
1161 	/* TODO AS border router specific checks */
1162 	/* TODO inter-area network route stuff */
1163 	/* TODO intra-area stuff -- condense LSA ??? */
1164 
1165 	if (rte->d_type == DT_NET) {
1166 		type = LSA_TYPE_INTER_A_PREFIX;
1167 	} else if (rte->d_type == DT_RTR) {
1168 		type = LSA_TYPE_INTER_A_ROUTER;
1169 	} else
1170 
1171 #if 0 /* XXX a lot todo */
1172 	/* update lsa but only if it was changed */
1173 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1174 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1175 	lsa_merge(rde_nbr_self(area), lsa, v);
1176 
1177 	if (v == NULL)
1178 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1179 #endif
1180 
1181 	/* suppressed/deleted routes are not found in the second lsa_find */
1182 	if (v)
1183 		v->cost = rte->cost;
1184 }
1185 
1186 /*
1187  * Functions for self-originated LSAs
1188  */
1189 
1190 struct lsa *
1191 orig_intra_lsa_net(struct iface *iface, struct vertex *old)
1192 {
1193 	struct lsa		*lsa;
1194 	struct vertex		*v;
1195 	struct area		*area;
1196 	struct prefix_node	*node;
1197 	struct prefix_tree	 tree;
1198 	u_int16_t		 len;
1199 	u_int16_t		 numprefix;
1200 
1201 	if ((area = area_find(rdeconf, iface->area_id)) == NULL)
1202 		fatalx("interface lost area");
1203 
1204 	log_debug("orig_intra_lsa_net: area %s, interface %s",
1205 	    inet_ntoa(area->id), iface->name);
1206 
1207 	RB_INIT(&tree);
1208 
1209 	if (iface->state & IF_STA_DR) {
1210 		RB_FOREACH(v, lsa_tree, &iface->lsa_tree) {
1211 			if (v->type != LSA_TYPE_LINK)
1212 				continue;
1213 			if (link_lsa_from_full_nbr(v->lsa, iface))
1214 				prefix_tree_add(&tree, &v->lsa->data.link);
1215 		}
1216 		if (RB_EMPTY(&tree)) {
1217 			/* There are no adjacent neighbors on link.
1218 			 * If a copy of this LSA already exists in DB,
1219 			 * it needs to be flushed. orig_intra_lsa_rtr()
1220 			 * will take care of prefixes configured on
1221 			 * this interface. */
1222 			if (!old)
1223 				return NULL;
1224 		} else {
1225 			/* Add our own prefixes configured for this link. */
1226 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1227 			    htonl(iface->ifindex), rde_router_id());
1228 			if (v)
1229 				prefix_tree_add(&tree, &v->lsa->data.link);
1230 		}
1231 	/* Continue only if a copy of this LSA already exists in DB.
1232 	 * It needs to be flushed. */
1233 	} else if (!old)
1234 		return NULL;
1235 
1236 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1237 	if ((lsa = calloc(1, len)) == NULL)
1238 		fatal("orig_intra_lsa_net");
1239 
1240 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1241 	lsa->data.pref_intra.ref_lsid = htonl(iface->ifindex);
1242 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1243 
1244 	numprefix = 0;
1245 	RB_FOREACH(node, prefix_tree, &tree) {
1246 		append_prefix_lsa(&lsa, &len, node->prefix);
1247 		numprefix++;
1248 	}
1249 
1250 	lsa->data.pref_intra.numprefix = htons(numprefix);
1251 
1252 	while (!RB_EMPTY(&tree))
1253 		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1254 
1255 	/* LSA header */
1256 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1257 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1258 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1259 	lsa->hdr.ls_id = htonl(iface->ifindex);
1260 	lsa->hdr.adv_rtr = rde_router_id();
1261 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1262 	lsa->hdr.len = htons(len);
1263 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1264 
1265 	return lsa;
1266 }
1267 
1268 /* Prefix LSAs have variable size. We have to be careful to copy the right
1269  * amount of bytes, and to realloc() the right amount of memory. */
1270 void
1271 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1272 {
1273 	struct lsa_prefix	*copy;
1274 	unsigned int		 lsa_prefix_len;
1275 	unsigned int		 new_len;
1276 	char  			*new_lsa;
1277 
1278 	lsa_prefix_len = sizeof(struct lsa_prefix)
1279 	    + LSA_PREFIXSIZE(prefix->prefixlen);
1280 
1281 	new_len = *len + lsa_prefix_len;
1282 
1283 	/* Make sure we have enough space for this prefix. */
1284 	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1285 		fatalx("append_prefix_lsa");
1286 
1287 	/* Append prefix to LSA. */
1288 	copy = (struct lsa_prefix *)(new_lsa + *len);
1289 	memcpy(copy, prefix, lsa_prefix_len);
1290 	copy->metric = 0;
1291 
1292 	*lsa = (struct lsa *)new_lsa;
1293 	*len = new_len;
1294 }
1295 
1296 int
1297 prefix_compare(struct prefix_node *a, struct prefix_node *b)
1298 {
1299 	struct lsa_prefix	*p;
1300 	struct lsa_prefix	*q;
1301 	int		 	 i;
1302 	int			 len;
1303 
1304 	p = a->prefix;
1305 	q = b->prefix;
1306 
1307 	len = MIN(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1308 
1309 	i = memcmp(p + 1, q + 1, len);
1310 	if (i)
1311 		return (i);
1312 	if (p->prefixlen < q->prefixlen)
1313 		return (-1);
1314 	if (p->prefixlen > q->prefixlen)
1315 		return (1);
1316 	return (0);
1317 }
1318 
1319 void
1320 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1321 {
1322 	struct prefix_node	*old;
1323 	struct prefix_node	*new;
1324 	struct in6_addr		 addr;
1325 	unsigned int		 len;
1326 	unsigned int		 i;
1327 	char			*cur_prefix;
1328 
1329 	cur_prefix = (char *)(lsa + 1);
1330 
1331 	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1332 		new = calloc(sizeof(*new), 1);
1333 		new->prefix = (struct lsa_prefix *)cur_prefix;
1334 
1335 		len = sizeof(*new->prefix)
1336 		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1337 
1338 		bzero(&addr, sizeof(addr));
1339 		memcpy(&addr, new->prefix + 1,
1340 		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1341 
1342 		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1343 		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1344 		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1345 			old = RB_INSERT(prefix_tree, tree, new);
1346 			if (old != NULL) {
1347 				old->prefix->options |= new->prefix->options;
1348 				free(new);
1349 			}
1350 		}
1351 
1352 		cur_prefix = cur_prefix + len;
1353 	}
1354 }
1355 
1356 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1357 
1358 /* Return non-zero if Link LSA was originated from an adjacent neighbor. */
1359 int
1360 link_lsa_from_full_nbr(struct lsa *lsa, struct iface *iface)
1361 {
1362 	struct rde_nbr	*nbr;
1363 	struct area	*area;
1364 
1365 	if ((area = area_find(rdeconf, iface->area_id)) == NULL)
1366 		fatalx("interface lost area");
1367 
1368 	LIST_FOREACH(nbr, &area->nbr_list, entry) {
1369 		if (nbr->self || nbr->iface->ifindex != iface->ifindex)
1370 			continue;
1371 		if (lsa->hdr.adv_rtr == nbr->id.s_addr)
1372 			break;
1373 	}
1374 	if (!nbr)
1375 		return 0;
1376 
1377 	if (nbr->state & NBR_STA_FULL &&
1378 	    ntohl(lsa->hdr.ls_id) == nbr->iface_id)
1379 		return 1;
1380 
1381 	return 0;
1382 }
1383 
1384 struct lsa *
1385 orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1386 {
1387 	struct lsa		*lsa;
1388 	struct lsa_prefix	*lsa_prefix;
1389 	struct in6_addr		*prefix;
1390 	struct iface		*iface;
1391 	struct iface_addr	*ia;
1392 	struct rde_nbr		*nbr;
1393 	u_int16_t		 len;
1394 	u_int16_t		 numprefix;
1395 
1396 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1397 	if ((lsa = calloc(1, len)) == NULL)
1398 		fatal("orig_intra_lsa_net");
1399 
1400 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1401 	lsa->data.pref_intra.ref_lsid = 0;
1402 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1403 
1404 	log_debug("orig_intra_lsa_rtr: area %s", inet_ntoa(area->id));
1405 
1406 	numprefix = 0;
1407 	LIST_FOREACH(iface, &area->iface_list, entry) {
1408 		if (iface->state & IF_STA_DOWN)
1409 			continue;
1410 
1411 		/* Broadcast links with adjacencies are handled
1412 		 * by orig_intra_lsa_net(), ignore. */
1413 		if (iface->type == IF_TYPE_BROADCAST ||
1414 		    iface->type == IF_TYPE_NBMA) {
1415 			if (iface->state & IF_STA_WAITING)
1416 				/* Skip, we're still waiting for
1417 				 * adjacencies to form. */
1418 				continue;
1419 
1420 			LIST_FOREACH(nbr, &area->nbr_list, entry)
1421 				if (!nbr->self &&
1422 				    nbr->iface->ifindex == iface->ifindex &&
1423 				    nbr->state & NBR_STA_FULL)
1424 					break;
1425 			if (nbr)
1426 				continue;
1427 		}
1428 
1429 		if ((lsa_prefix = calloc(sizeof(*lsa_prefix)
1430 		    + sizeof(struct in6_addr), 1)) == NULL)
1431 			fatal("orig_intra_lsa_rtr");
1432 
1433 		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1434 			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1435 				continue;
1436 
1437 			bzero(lsa_prefix, sizeof(*lsa_prefix)
1438 			    + sizeof(struct in6_addr));
1439 
1440 			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1441 			    iface->state & IF_STA_LOOPBACK) {
1442 				lsa_prefix->prefixlen = 128;
1443 			} else {
1444 				lsa_prefix->prefixlen = ia->prefixlen;
1445 				lsa_prefix->metric = htons(iface->metric);
1446 			}
1447 
1448 			if (lsa_prefix->prefixlen == 128)
1449 				lsa_prefix->options |= OSPF_PREFIX_LA;
1450 
1451 			prefix = (struct in6_addr *)(lsa_prefix + 1);
1452 			inet6applymask(prefix, &ia->addr,
1453 			    lsa_prefix->prefixlen);
1454 			append_prefix_lsa(&lsa, &len, lsa_prefix);
1455 			numprefix++;
1456 		}
1457 
1458 		free(lsa_prefix);
1459 
1460 		/* TOD: Add prefixes of directly attached hosts, too */
1461 		/* TOD: Add prefixes for virtual links */
1462 	}
1463 
1464 	/* If no prefixes were included, continue only if a copy of this
1465 	 * LSA already exists in DB. It needs to be flushed. */
1466 	if (numprefix == 0 && !old) {
1467 		free(lsa);
1468 		return NULL;
1469 	}
1470 
1471 	lsa->data.pref_intra.numprefix = htons(numprefix);
1472 
1473 	/* LSA header */
1474 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1475 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1476 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1477 	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1478 	lsa->hdr.adv_rtr = rde_router_id();
1479 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1480 	lsa->hdr.len = htons(len);
1481 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1482 
1483 	return lsa;
1484 }
1485 
1486 void
1487 orig_intra_area_prefix_lsas(struct area *area)
1488 {
1489 	struct lsa	*lsa;
1490 	struct vertex	*old;
1491 	struct iface	*iface;
1492 
1493 	LIST_FOREACH(iface, &area->iface_list, entry) {
1494 		if (iface->type == IF_TYPE_BROADCAST ||
1495 		    iface->type == IF_TYPE_NBMA) {
1496 			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1497 			    htonl(iface->ifindex), rde_router_id());
1498 			lsa = orig_intra_lsa_net(iface, old);
1499 			if (lsa)
1500 				lsa_merge(rde_nbr_self(area), lsa, old);
1501 		}
1502 	}
1503 
1504 	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1505 		htonl(LS_ID_INTRA_RTR), rde_router_id());
1506 	lsa = orig_intra_lsa_rtr(area, old);
1507 	if (lsa)
1508 		lsa_merge(rde_nbr_self(area), lsa, old);
1509 }
1510 
1511 struct lsa *
1512 orig_asext_lsa(struct rroute *rr, u_int16_t age)
1513 {
1514 #if 0 /* XXX a lot todo */
1515 	struct lsa	*lsa;
1516 	u_int16_t	 len;
1517 
1518 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1519 	if ((lsa = calloc(1, len)) == NULL)
1520 		fatal("orig_asext_lsa");
1521 
1522 	log_debug("orig_asext_lsa: %s/%d age %d",
1523 	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1524 
1525 	/* LSA header */
1526 	lsa->hdr.age = htons(age);
1527 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1528 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1529 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1530 	lsa->hdr.len = htons(len);
1531 
1532 	/* prefix and mask */
1533 	/*
1534 	 * TODO ls_id must be unique, for overlapping routes this may
1535 	 * not be true. In this case a hack needs to be done to
1536 	 * make the ls_id unique.
1537 	 */
1538 	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1539 	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1540 
1541 	/*
1542 	 * nexthop -- on connected routes we are the nexthop,
1543 	 * on all other cases we announce the true nexthop.
1544 	 * XXX this is wrong as the true nexthop may be outside
1545 	 * of the ospf cloud and so unreachable. For now we force
1546 	 * all traffic to be directed to us.
1547 	 */
1548 	lsa->data.asext.fw_addr = 0;
1549 
1550 	lsa->data.asext.metric = htonl(rr->metric);
1551 	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1552 
1553 	lsa->hdr.ls_chksum = 0;
1554 	lsa->hdr.ls_chksum =
1555 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1556 
1557 	return (lsa);
1558 #endif
1559 	return NULL;
1560 }
1561 
1562 struct lsa *
1563 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1564 {
1565 #if 0 /* XXX a lot todo */
1566 	struct lsa	*lsa;
1567 	u_int16_t	 len;
1568 
1569 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1570 	if ((lsa = calloc(1, len)) == NULL)
1571 		fatal("orig_sum_lsa");
1572 
1573 	/* LSA header */
1574 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1575 	lsa->hdr.type = type;
1576 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1577 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1578 	lsa->hdr.len = htons(len);
1579 
1580 	/* prefix and mask */
1581 	/*
1582 	 * TODO ls_id must be unique, for overlapping routes this may
1583 	 * not be true. In this case a hack needs to be done to
1584 	 * make the ls_id unique.
1585 	 */
1586 	lsa->hdr.ls_id = rte->prefix.s_addr;
1587 	if (type == LSA_TYPE_SUM_NETWORK)
1588 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1589 	else
1590 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1591 
1592 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1593 
1594 	lsa->hdr.ls_chksum = 0;
1595 	lsa->hdr.ls_chksum =
1596 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1597 
1598 	return (lsa);
1599 #endif
1600 	return NULL;
1601 }
1602