xref: /openbsd-src/usr.sbin/ospf6d/rde.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: rde.c,v 1.68 2016/09/03 10:25:36 renato Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <net/if_types.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ospf6.h"
37 #include "ospf6d.h"
38 #include "ospfe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 #define MINIMUM(a, b)	(((a) < (b)) ? (a) : (b))
43 
44 void		 rde_sig_handler(int sig, short, void *);
45 __dead void	 rde_shutdown(void);
46 void		 rde_dispatch_imsg(int, short, void *);
47 void		 rde_dispatch_parent(int, short, void *);
48 void		 rde_dump_area(struct area *, int, pid_t);
49 
50 void		 rde_send_summary(pid_t);
51 void		 rde_send_summary_area(struct area *, pid_t);
52 void		 rde_nbr_init(u_int32_t);
53 void		 rde_nbr_free(void);
54 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
55 void		 rde_nbr_del(struct rde_nbr *);
56 
57 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
58 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
59 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
60 void		 rde_req_list_free(struct rde_nbr *);
61 
62 struct lsa	*rde_asext_get(struct rroute *);
63 struct lsa	*rde_asext_put(struct rroute *);
64 
65 int		 comp_asext(struct lsa *, struct lsa *);
66 struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
67 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
68 struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
69 		 struct vertex *);
70 struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
71 void		 append_prefix_lsa(struct lsa **, u_int16_t *,
72 		    struct lsa_prefix *);
73 
74 /* A 32-bit value != any ifindex.
75  * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
76 #define	LS_ID_INTRA_RTR	0x01000000
77 
78 /* Tree of prefixes with global scope on given a link,
79  * see orig_intra_lsa_*() */
80 struct prefix_node {
81 	RB_ENTRY(prefix_node)	 entry;
82 	struct lsa_prefix	*prefix;
83 };
84 RB_HEAD(prefix_tree, prefix_node);
85 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
86 int		 prefix_compare(struct prefix_node *, struct prefix_node *);
87 void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
88 
89 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
90 struct imsgev		*iev_ospfe;
91 struct imsgev		*iev_main;
92 struct rde_nbr		*nbrself;
93 struct lsa_tree		 asext_tree;
94 
95 /* ARGSUSED */
96 void
97 rde_sig_handler(int sig, short event, void *arg)
98 {
99 	/*
100 	 * signal handler rules don't apply, libevent decouples for us
101 	 */
102 
103 	switch (sig) {
104 	case SIGINT:
105 	case SIGTERM:
106 		rde_shutdown();
107 		/* NOTREACHED */
108 	default:
109 		fatalx("unexpected signal");
110 	}
111 }
112 
113 /* route decision engine */
114 pid_t
115 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
116     int pipe_parent2ospfe[2])
117 {
118 	struct event		 ev_sigint, ev_sigterm;
119 	struct timeval		 now;
120 	struct passwd		*pw;
121 	struct redistribute	*r;
122 	pid_t			 pid;
123 
124 	switch (pid = fork()) {
125 	case -1:
126 		fatal("cannot fork");
127 		/* NOTREACHED */
128 	case 0:
129 		break;
130 	default:
131 		return (pid);
132 	}
133 
134 	rdeconf = xconf;
135 
136 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
137 		fatal("getpwnam");
138 
139 	if (chroot(pw->pw_dir) == -1)
140 		fatal("chroot");
141 	if (chdir("/") == -1)
142 		fatal("chdir(\"/\")");
143 
144 	setproctitle("route decision engine");
145 	ospfd_process = PROC_RDE_ENGINE;
146 	log_procname = log_procnames[ospfd_process];
147 
148 	if (setgroups(1, &pw->pw_gid) ||
149 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
150 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
151 		fatal("can't drop privileges");
152 
153 	event_init();
154 	rde_nbr_init(NBR_HASHSIZE);
155 	lsa_init(&asext_tree);
156 
157 	/* setup signal handler */
158 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
159 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
160 	signal_add(&ev_sigint, NULL);
161 	signal_add(&ev_sigterm, NULL);
162 	signal(SIGPIPE, SIG_IGN);
163 	signal(SIGHUP, SIG_IGN);
164 
165 	/* setup pipes */
166 	close(pipe_ospfe2rde[0]);
167 	close(pipe_parent2rde[0]);
168 	close(pipe_parent2ospfe[0]);
169 	close(pipe_parent2ospfe[1]);
170 
171 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
172 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
173 		fatal(NULL);
174 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
175 	iev_ospfe->handler = rde_dispatch_imsg;
176 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
177 	iev_main->handler = rde_dispatch_parent;
178 
179 	/* setup event handler */
180 	iev_ospfe->events = EV_READ;
181 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
182 	    iev_ospfe->handler, iev_ospfe);
183 	event_add(&iev_ospfe->ev, NULL);
184 
185 	iev_main->events = EV_READ;
186 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
187 	    iev_main->handler, iev_main);
188 	event_add(&iev_main->ev, NULL);
189 
190 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
191 	cand_list_init();
192 	rt_init();
193 
194 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
195 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
196 		free(r);
197 	}
198 
199 	gettimeofday(&now, NULL);
200 	rdeconf->uptime = now.tv_sec;
201 
202 	event_dispatch();
203 
204 	rde_shutdown();
205 	/* NOTREACHED */
206 
207 	return (0);
208 }
209 
210 __dead void
211 rde_shutdown(void)
212 {
213 	struct area	*a;
214 
215 	/* close pipes */
216 	msgbuf_clear(&iev_ospfe->ibuf.w);
217 	close(iev_ospfe->ibuf.fd);
218 	msgbuf_clear(&iev_main->ibuf.w);
219 	close(iev_main->ibuf.fd);
220 
221 	stop_spf_timer(rdeconf);
222 	cand_list_clr();
223 	rt_clear();
224 
225 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
226 		LIST_REMOVE(a, entry);
227 		area_del(a);
228 	}
229 	rde_nbr_free();
230 
231 	free(iev_ospfe);
232 	free(iev_main);
233 	free(rdeconf);
234 
235 	log_info("route decision engine exiting");
236 	_exit(0);
237 }
238 
239 int
240 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
241     u_int16_t datalen)
242 {
243 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
244 	    data, datalen));
245 }
246 
247 /* ARGSUSED */
248 void
249 rde_dispatch_imsg(int fd, short event, void *bula)
250 {
251 	struct imsgev		*iev = bula;
252 	struct imsgbuf		*ibuf = &iev->ibuf;
253 	struct imsg		 imsg;
254 	struct in_addr		 aid;
255 	struct ls_req_hdr	 req_hdr;
256 	struct lsa_hdr		 lsa_hdr, *db_hdr;
257 	struct rde_nbr		 rn, *nbr;
258 	struct timespec		 tp;
259 	struct lsa		*lsa;
260 	struct area		*area;
261 	struct vertex		*v;
262 	char			*buf;
263 	ssize_t			 n;
264 	time_t			 now;
265 	int			 r, state, self, shut = 0, verbose;
266 	u_int16_t		 l;
267 
268 	if (event & EV_READ) {
269 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
270 			fatal("imsg_read error");
271 		if (n == 0)	/* connection closed */
272 			shut = 1;
273 	}
274 	if (event & EV_WRITE) {
275 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
276 			fatal("msgbuf_write");
277 		if (n == 0)	/* connection closed */
278 			shut = 1;
279 	}
280 
281 	clock_gettime(CLOCK_MONOTONIC, &tp);
282 	now = tp.tv_sec;
283 
284 	for (;;) {
285 		if ((n = imsg_get(ibuf, &imsg)) == -1)
286 			fatal("rde_dispatch_imsg: imsg_get error");
287 		if (n == 0)
288 			break;
289 
290 		switch (imsg.hdr.type) {
291 		case IMSG_NEIGHBOR_UP:
292 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
293 				fatalx("invalid size of OE request");
294 			memcpy(&rn, imsg.data, sizeof(rn));
295 
296 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
297 				fatalx("rde_dispatch_imsg: "
298 				    "neighbor already exists");
299 			break;
300 		case IMSG_NEIGHBOR_DOWN:
301 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
302 			break;
303 		case IMSG_NEIGHBOR_CHANGE:
304 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
305 				fatalx("invalid size of OE request");
306 			memcpy(&state, imsg.data, sizeof(state));
307 
308 			nbr = rde_nbr_find(imsg.hdr.peerid);
309 			if (nbr == NULL)
310 				break;
311 
312 			if (state != nbr->state &&
313 			    (nbr->state & NBR_STA_FULL ||
314 			    state & NBR_STA_FULL)) {
315 				nbr->state = state;
316 				area_track(nbr->area, state);
317 				orig_intra_area_prefix_lsas(nbr->area);
318 			}
319 
320 			nbr->state = state;
321 			if (nbr->state & NBR_STA_FULL)
322 				rde_req_list_free(nbr);
323 			break;
324 		case IMSG_DB_SNAPSHOT:
325 			nbr = rde_nbr_find(imsg.hdr.peerid);
326 			if (nbr == NULL)
327 				break;
328 
329 			lsa_snap(nbr, imsg.hdr.peerid);
330 
331 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
332 			    0, -1, NULL, 0);
333 			break;
334 		case IMSG_DD:
335 			nbr = rde_nbr_find(imsg.hdr.peerid);
336 			if (nbr == NULL)
337 				break;
338 
339 			buf = imsg.data;
340 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
341 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
342 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
343 				buf += sizeof(lsa_hdr);
344 
345 				v = lsa_find(nbr->iface, lsa_hdr.type,
346 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
347 				if (v == NULL)
348 					db_hdr = NULL;
349 				else
350 					db_hdr = &v->lsa->hdr;
351 
352 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
353 					/*
354 					 * only request LSAs that are
355 					 * newer or missing
356 					 */
357 					rde_req_list_add(nbr, &lsa_hdr);
358 					imsg_compose_event(iev_ospfe, IMSG_DD,
359 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
360 					    sizeof(lsa_hdr));
361 				}
362 			}
363 			if (l != 0)
364 				log_warnx("rde_dispatch_imsg: peerid %u, "
365 				    "trailing garbage in Database Description "
366 				    "packet", imsg.hdr.peerid);
367 
368 			imsg_compose_event(iev_ospfe, IMSG_DD_END,
369 			    imsg.hdr.peerid, 0, -1, NULL, 0);
370 			break;
371 		case IMSG_LS_REQ:
372 			nbr = rde_nbr_find(imsg.hdr.peerid);
373 			if (nbr == NULL)
374 				break;
375 
376 			buf = imsg.data;
377 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
378 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
379 				memcpy(&req_hdr, buf, sizeof(req_hdr));
380 				buf += sizeof(req_hdr);
381 
382 				if ((v = lsa_find(nbr->iface,
383 				    req_hdr.type, req_hdr.ls_id,
384 				    req_hdr.adv_rtr)) == NULL) {
385 					imsg_compose_event(iev_ospfe,
386 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
387 					    0, -1, NULL, 0);
388 					continue;
389 				}
390 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
391 				    imsg.hdr.peerid, 0, -1, v->lsa,
392 				    ntohs(v->lsa->hdr.len));
393 			}
394 			if (l != 0)
395 				log_warnx("rde_dispatch_imsg: peerid %u, "
396 				    "trailing garbage in LS Request "
397 				    "packet", imsg.hdr.peerid);
398 			break;
399 		case IMSG_LS_UPD:
400 			nbr = rde_nbr_find(imsg.hdr.peerid);
401 			if (nbr == NULL)
402 				break;
403 
404 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
405 			if (lsa == NULL)
406 				fatal(NULL);
407 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
408 
409 			if (!lsa_check(nbr, lsa,
410 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
411 				free(lsa);
412 				break;
413 			}
414 
415 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
416 			    lsa->hdr.adv_rtr);
417 			if (v == NULL)
418 				db_hdr = NULL;
419 			else
420 				db_hdr = &v->lsa->hdr;
421 
422 			if (nbr->self) {
423 				lsa_merge(nbr, lsa, v);
424 				/* lsa_merge frees the right lsa */
425 				break;
426 			}
427 
428 			r = lsa_newer(&lsa->hdr, db_hdr);
429 			if (r > 0) {
430 				/* new LSA newer than DB */
431 				if (v && v->flooded &&
432 				    v->changed + MIN_LS_ARRIVAL >= now) {
433 					free(lsa);
434 					break;
435 				}
436 
437 				rde_req_list_del(nbr, &lsa->hdr);
438 
439 				self = lsa_self(lsa);
440 				if (self) {
441 					if (v == NULL)
442 						/* LSA is no longer announced,
443 						 * remove by premature aging. */
444 						lsa_flush(nbr, lsa);
445 					else
446 						lsa_reflood(v, lsa);
447 				} else if (lsa_add(nbr, lsa))
448 					/* delayed lsa, don't flood yet */
449 					break;
450 
451 				/* flood and perhaps ack LSA */
452 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
453 				    imsg.hdr.peerid, 0, -1, lsa,
454 				    ntohs(lsa->hdr.len));
455 
456 				/* reflood self originated LSA */
457 				if (self && v)
458 					imsg_compose_event(iev_ospfe,
459 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
460 					    v->lsa, ntohs(v->lsa->hdr.len));
461 				/* new LSA was not added so free it */
462 				if (self)
463 					free(lsa);
464 			} else if (r < 0) {
465 				/*
466 				 * point 6 of "The Flooding Procedure"
467 				 * We are violating the RFC here because
468 				 * it does not make sense to reset a session
469 				 * because an equal LSA is already in the table.
470 				 * Only if the LSA sent is older than the one
471 				 * in the table we should reset the session.
472 				 */
473 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
474 					imsg_compose_event(iev_ospfe,
475 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
476 					    0, -1, NULL, 0);
477 					free(lsa);
478 					break;
479 				}
480 
481 				/* lsa no longer needed */
482 				free(lsa);
483 
484 				/* new LSA older than DB */
485 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
486 				    ntohs(db_hdr->age) == MAX_AGE)
487 					/* seq-num wrap */
488 					break;
489 
490 				if (v->changed + MIN_LS_ARRIVAL >= now)
491 					break;
492 
493 				/* directly send current LSA, no ack */
494 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
495 				    imsg.hdr.peerid, 0, -1, v->lsa,
496 				    ntohs(v->lsa->hdr.len));
497 			} else {
498 				/* LSA equal send direct ack */
499 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
500 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
501 				    sizeof(lsa->hdr));
502 				free(lsa);
503 			}
504 			break;
505 		case IMSG_LS_MAXAGE:
506 			nbr = rde_nbr_find(imsg.hdr.peerid);
507 			if (nbr == NULL)
508 				break;
509 
510 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
511 			    sizeof(struct lsa_hdr))
512 				fatalx("invalid size of OE request");
513 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
514 
515 			if (rde_nbr_loading(nbr->area))
516 				break;
517 
518 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
519 			    lsa_hdr.adv_rtr);
520 			if (v == NULL)
521 				db_hdr = NULL;
522 			else
523 				db_hdr = &v->lsa->hdr;
524 
525 			/*
526 			 * only delete LSA if the one in the db is not newer
527 			 */
528 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
529 				lsa_del(nbr, &lsa_hdr);
530 			break;
531 		case IMSG_CTL_SHOW_DATABASE:
532 		case IMSG_CTL_SHOW_DB_EXT:
533 		case IMSG_CTL_SHOW_DB_LINK:
534 		case IMSG_CTL_SHOW_DB_NET:
535 		case IMSG_CTL_SHOW_DB_RTR:
536 		case IMSG_CTL_SHOW_DB_INTRA:
537 		case IMSG_CTL_SHOW_DB_SELF:
538 		case IMSG_CTL_SHOW_DB_SUM:
539 		case IMSG_CTL_SHOW_DB_ASBR:
540 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
541 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
542 				log_warnx("rde_dispatch_imsg: wrong imsg len");
543 				break;
544 			}
545 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
546 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
547 					rde_dump_area(area, imsg.hdr.type,
548 					    imsg.hdr.pid);
549 				}
550 				lsa_dump(&asext_tree, imsg.hdr.type,
551 				    imsg.hdr.pid);
552 			} else {
553 				memcpy(&aid, imsg.data, sizeof(aid));
554 				if ((area = area_find(rdeconf, aid)) != NULL) {
555 					rde_dump_area(area, imsg.hdr.type,
556 					    imsg.hdr.pid);
557 					if (!area->stub)
558 						lsa_dump(&asext_tree,
559 						    imsg.hdr.type,
560 						    imsg.hdr.pid);
561 				}
562 			}
563 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
564 			    imsg.hdr.pid, -1, NULL, 0);
565 			break;
566 		case IMSG_CTL_SHOW_RIB:
567 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
568 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
569 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
570 
571 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
572 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
573 			}
574 			aid.s_addr = 0;
575 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
576 
577 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
578 			    imsg.hdr.pid, -1, NULL, 0);
579 			break;
580 		case IMSG_CTL_SHOW_SUM:
581 			rde_send_summary(imsg.hdr.pid);
582 			LIST_FOREACH(area, &rdeconf->area_list, entry)
583 				rde_send_summary_area(area, imsg.hdr.pid);
584 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
585 			    imsg.hdr.pid, -1, NULL, 0);
586 			break;
587 		case IMSG_IFINFO:
588 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
589 			    sizeof(int))
590 				fatalx("IFINFO imsg with wrong len");
591 
592 			nbr = rde_nbr_find(imsg.hdr.peerid);
593 			if (nbr == NULL)
594 				fatalx("IFINFO imsg with bad peerid");
595 			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
596 
597 			/* Resend LSAs if interface state changes. */
598 			orig_intra_area_prefix_lsas(nbr->area);
599 			break;
600 		case IMSG_CTL_LOG_VERBOSE:
601 			/* already checked by ospfe */
602 			memcpy(&verbose, imsg.data, sizeof(verbose));
603 			log_verbose(verbose);
604 			break;
605 		default:
606 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
607 			    imsg.hdr.type);
608 			break;
609 		}
610 		imsg_free(&imsg);
611 	}
612 	if (!shut)
613 		imsg_event_add(iev);
614 	else {
615 		/* this pipe is dead, so remove the event handler */
616 		event_del(&iev->ev);
617 		event_loopexit(NULL);
618 	}
619 }
620 
621 /* ARGSUSED */
622 void
623 rde_dispatch_parent(int fd, short event, void *bula)
624 {
625 	static struct area	*narea;
626 	struct area		*area;
627 	struct iface		*iface, *ifp;
628 	struct ifaddrchange	*ifc;
629 	struct iface_addr	*ia, *nia;
630 	struct imsg		 imsg;
631 	struct kroute		 kr;
632 	struct rroute		 rr;
633 	struct imsgev		*iev = bula;
634 	struct imsgbuf		*ibuf = &iev->ibuf;
635 	struct lsa		*lsa;
636 	struct vertex		*v;
637 	struct rt_node		*rn;
638 	ssize_t			 n;
639 	int			 shut = 0, wasvalid;
640 	unsigned int		 ifindex;
641 
642 	if (event & EV_READ) {
643 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
644 			fatal("imsg_read error");
645 		if (n == 0)	/* connection closed */
646 			shut = 1;
647 	}
648 	if (event & EV_WRITE) {
649 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
650 			fatal("msgbuf_write");
651 		if (n == 0)	/* connection closed */
652 			shut = 1;
653 	}
654 
655 	for (;;) {
656 		if ((n = imsg_get(ibuf, &imsg)) == -1)
657 			fatal("rde_dispatch_parent: imsg_get error");
658 		if (n == 0)
659 			break;
660 
661 		switch (imsg.hdr.type) {
662 		case IMSG_NETWORK_ADD:
663 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
664 				log_warnx("rde_dispatch_parent: "
665 				    "wrong imsg len");
666 				break;
667 			}
668 			memcpy(&rr, imsg.data, sizeof(rr));
669 
670 			if ((lsa = rde_asext_get(&rr)) != NULL) {
671 				v = lsa_find(NULL, lsa->hdr.type,
672 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
673 
674 				lsa_merge(nbrself, lsa, v);
675 			}
676 			break;
677 		case IMSG_NETWORK_DEL:
678 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
679 				log_warnx("rde_dispatch_parent: "
680 				    "wrong imsg len");
681 				break;
682 			}
683 			memcpy(&rr, imsg.data, sizeof(rr));
684 
685 			if ((lsa = rde_asext_put(&rr)) != NULL) {
686 				v = lsa_find(NULL, lsa->hdr.type,
687 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
688 
689 				/*
690 				 * if v == NULL no LSA is in the table and
691 				 * nothing has to be done.
692 				 */
693 				if (v)
694 					lsa_merge(nbrself, lsa, v);
695 				else
696 					free(lsa);
697 			}
698 			break;
699 		case IMSG_KROUTE_GET:
700 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
701 				log_warnx("rde_dispatch_parent: "
702 				    "wrong imsg len");
703 				break;
704 			}
705 			memcpy(&kr, imsg.data, sizeof(kr));
706 
707 			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
708 			    DT_NET)) != NULL)
709 				rde_send_change_kroute(rn);
710 			else
711 				/* should not happen */
712 				imsg_compose_event(iev_main, IMSG_KROUTE_DELETE,
713 				    0, 0, -1, &kr, sizeof(kr));
714 			break;
715 		case IMSG_IFINFO:
716 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
717 			    sizeof(struct iface))
718 				fatalx("IFINFO imsg with wrong len");
719 
720 			ifp = imsg.data;
721 			iface = if_find(ifp->ifindex);
722 			if (iface == NULL)
723 				fatalx("interface lost in rde");
724 
725 			wasvalid = (iface->flags & IFF_UP) &&
726 			    LINK_STATE_IS_UP(iface->linkstate);
727 
728 			if_update(iface, ifp->mtu, ifp->flags, ifp->if_type,
729 			    ifp->linkstate, ifp->baudrate);
730 
731 			/* Resend LSAs if interface state changes. */
732 			if (wasvalid != (iface->flags & IFF_UP) &&
733 			    LINK_STATE_IS_UP(iface->linkstate)) {
734 				area = area_find(rdeconf, iface->area_id);
735 				if (!area)
736 					fatalx("interface lost area");
737 				orig_intra_area_prefix_lsas(area);
738 			}
739 			break;
740 		case IMSG_IFADD:
741 			if ((iface = malloc(sizeof(struct iface))) == NULL)
742 				fatal(NULL);
743 			memcpy(iface, imsg.data, sizeof(struct iface));
744 
745 			LIST_INIT(&iface->nbr_list);
746 			TAILQ_INIT(&iface->ls_ack_list);
747 			RB_INIT(&iface->lsa_tree);
748 
749 			area = area_find(rdeconf, iface->area_id);
750 			LIST_INSERT_HEAD(&area->iface_list, iface, entry);
751 			break;
752 		case IMSG_IFDELETE:
753 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
754 			    sizeof(ifindex))
755 				fatalx("IFDELETE imsg with wrong len");
756 
757 			memcpy(&ifindex, imsg.data, sizeof(ifindex));
758 			iface = if_find(ifindex);
759 			if (iface == NULL)
760 				fatalx("interface lost in rde");
761 
762 			LIST_REMOVE(iface, entry);
763 			if_del(iface);
764 			break;
765 		case IMSG_IFADDRNEW:
766 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
767 			    sizeof(struct ifaddrchange))
768 				fatalx("IFADDRNEW imsg with wrong len");
769 			ifc = imsg.data;
770 
771 			iface = if_find(ifc->ifindex);
772 			if (iface == NULL)
773 				fatalx("IFADDRNEW interface lost in rde");
774 
775 			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
776 			    NULL)
777 				fatal("rde_dispatch_parent IFADDRNEW");
778 			ia->addr = ifc->addr;
779 			ia->dstbrd = ifc->dstbrd;
780 			ia->prefixlen = ifc->prefixlen;
781 
782 			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
783 			area = area_find(rdeconf, iface->area_id);
784 			if (area)
785 				orig_intra_area_prefix_lsas(area);
786 			break;
787 		case IMSG_IFADDRDEL:
788 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
789 			    sizeof(struct ifaddrchange))
790 				fatalx("IFADDRDEL imsg with wrong len");
791 			ifc = imsg.data;
792 
793 			iface = if_find(ifc->ifindex);
794 			if (iface == NULL)
795 				fatalx("IFADDRDEL interface lost in rde");
796 
797 			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
798 			    ia = nia) {
799 				nia = TAILQ_NEXT(ia, entry);
800 
801 				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
802 				    &ifc->addr)) {
803 					TAILQ_REMOVE(&iface->ifa_list, ia,
804 					    entry);
805 					free(ia);
806 					break;
807 				}
808 			}
809 			area = area_find(rdeconf, iface->area_id);
810 			if (area)
811 				orig_intra_area_prefix_lsas(area);
812 			break;
813 		case IMSG_RECONF_CONF:
814 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
815 			    NULL)
816 				fatal(NULL);
817 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
818 
819 			LIST_INIT(&nconf->area_list);
820 			LIST_INIT(&nconf->cand_list);
821 			break;
822 		case IMSG_RECONF_AREA:
823 			if ((narea = area_new()) == NULL)
824 				fatal(NULL);
825 			memcpy(narea, imsg.data, sizeof(struct area));
826 
827 			LIST_INIT(&narea->iface_list);
828 			LIST_INIT(&narea->nbr_list);
829 			RB_INIT(&narea->lsa_tree);
830 
831 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
832 			break;
833 		case IMSG_RECONF_END:
834 			merge_config(rdeconf, nconf);
835 			nconf = NULL;
836 			break;
837 		default:
838 			log_debug("rde_dispatch_parent: unexpected imsg %d",
839 			    imsg.hdr.type);
840 			break;
841 		}
842 		imsg_free(&imsg);
843 	}
844 	if (!shut)
845 		imsg_event_add(iev);
846 	else {
847 		/* this pipe is dead, so remove the event handler */
848 		event_del(&iev->ev);
849 		event_loopexit(NULL);
850 	}
851 }
852 
853 void
854 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
855 {
856 	struct iface	*iface;
857 
858 	/* dump header */
859 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
860 	    area, sizeof(*area));
861 
862 	/* dump link local lsa */
863 	LIST_FOREACH(iface, &area->iface_list, entry) {
864 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
865 		    0, pid, -1, iface, sizeof(*iface));
866 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
867 	}
868 
869 	/* dump area lsa */
870 	lsa_dump(&area->lsa_tree, imsg_type, pid);
871 }
872 
873 u_int32_t
874 rde_router_id(void)
875 {
876 	return (rdeconf->rtr_id.s_addr);
877 }
878 
879 void
880 rde_send_change_kroute(struct rt_node *r)
881 {
882 	struct kroute		 kr;
883 	struct rt_nexthop	*rn;
884 
885 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
886 		if (!rn->invalid)
887 			break;
888 	}
889 	if (!rn)
890 		fatalx("rde_send_change_kroute: no valid nexthop found");
891 
892 	bzero(&kr, sizeof(kr));
893 	kr.prefix = r->prefix;
894 	kr.nexthop = rn->nexthop;
895 	if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
896 	    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
897 		kr.scope = rn->ifindex;
898 	kr.ifindex = rn->ifindex;
899 	kr.prefixlen = r->prefixlen;
900 	kr.ext_tag = r->ext_tag;
901 
902 	imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1,
903 	    &kr, sizeof(kr));
904 }
905 
906 void
907 rde_send_delete_kroute(struct rt_node *r)
908 {
909 	struct kroute	 kr;
910 
911 	bzero(&kr, sizeof(kr));
912 	kr.prefix = r->prefix;
913 	kr.prefixlen = r->prefixlen;
914 
915 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
916 	    &kr, sizeof(kr));
917 }
918 
919 void
920 rde_send_summary(pid_t pid)
921 {
922 	static struct ctl_sum	 sumctl;
923 	struct timeval		 now;
924 	struct area		*area;
925 	struct vertex		*v;
926 
927 	bzero(&sumctl, sizeof(struct ctl_sum));
928 
929 	sumctl.rtr_id.s_addr = rde_router_id();
930 	sumctl.spf_delay = rdeconf->spf_delay;
931 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
932 
933 	LIST_FOREACH(area, &rdeconf->area_list, entry)
934 		sumctl.num_area++;
935 
936 	RB_FOREACH(v, lsa_tree, &asext_tree)
937 		sumctl.num_ext_lsa++;
938 
939 	gettimeofday(&now, NULL);
940 	if (rdeconf->uptime < now.tv_sec)
941 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
942 	else
943 		sumctl.uptime = 0;
944 
945 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
946 	    sizeof(sumctl));
947 }
948 
949 void
950 rde_send_summary_area(struct area *area, pid_t pid)
951 {
952 	static struct ctl_sum_area	 sumareactl;
953 	struct iface			*iface;
954 	struct rde_nbr			*nbr;
955 	struct lsa_tree			*tree = &area->lsa_tree;
956 	struct vertex			*v;
957 
958 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
959 
960 	sumareactl.area.s_addr = area->id.s_addr;
961 	sumareactl.num_spf_calc = area->num_spf_calc;
962 
963 	LIST_FOREACH(iface, &area->iface_list, entry)
964 		sumareactl.num_iface++;
965 
966 	LIST_FOREACH(nbr, &area->nbr_list, entry)
967 		if (nbr->state == NBR_STA_FULL && !nbr->self)
968 			sumareactl.num_adj_nbr++;
969 
970 	RB_FOREACH(v, lsa_tree, tree)
971 		sumareactl.num_lsa++;
972 
973 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
974 	    sizeof(sumareactl));
975 }
976 
977 LIST_HEAD(rde_nbr_head, rde_nbr);
978 
979 struct nbr_table {
980 	struct rde_nbr_head	*hashtbl;
981 	u_int32_t		 hashmask;
982 } rdenbrtable;
983 
984 #define RDE_NBR_HASH(x)		\
985 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
986 
987 void
988 rde_nbr_init(u_int32_t hashsize)
989 {
990 	struct rde_nbr_head	*head;
991 	u_int32_t		 hs, i;
992 
993 	for (hs = 1; hs < hashsize; hs <<= 1)
994 		;
995 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
996 	if (rdenbrtable.hashtbl == NULL)
997 		fatal("rde_nbr_init");
998 
999 	for (i = 0; i < hs; i++)
1000 		LIST_INIT(&rdenbrtable.hashtbl[i]);
1001 
1002 	rdenbrtable.hashmask = hs - 1;
1003 
1004 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
1005 		fatal("rde_nbr_init");
1006 
1007 	nbrself->id.s_addr = rde_router_id();
1008 	nbrself->peerid = NBR_IDSELF;
1009 	nbrself->state = NBR_STA_DOWN;
1010 	nbrself->self = 1;
1011 	head = RDE_NBR_HASH(NBR_IDSELF);
1012 	LIST_INSERT_HEAD(head, nbrself, hash);
1013 }
1014 
1015 void
1016 rde_nbr_free(void)
1017 {
1018 	free(nbrself);
1019 	free(rdenbrtable.hashtbl);
1020 }
1021 
1022 struct rde_nbr *
1023 rde_nbr_find(u_int32_t peerid)
1024 {
1025 	struct rde_nbr_head	*head;
1026 	struct rde_nbr		*nbr;
1027 
1028 	head = RDE_NBR_HASH(peerid);
1029 
1030 	LIST_FOREACH(nbr, head, hash) {
1031 		if (nbr->peerid == peerid)
1032 			return (nbr);
1033 	}
1034 
1035 	return (NULL);
1036 }
1037 
1038 struct rde_nbr *
1039 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1040 {
1041 	struct rde_nbr_head	*head;
1042 	struct rde_nbr		*nbr;
1043 	struct area		*area;
1044 	struct iface		*iface;
1045 
1046 	if (rde_nbr_find(peerid))
1047 		return (NULL);
1048 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1049 		fatalx("rde_nbr_new: unknown area");
1050 
1051 	if ((iface = if_find(new->ifindex)) == NULL)
1052 		fatalx("rde_nbr_new: unknown interface");
1053 
1054 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1055 		fatal("rde_nbr_new");
1056 
1057 	memcpy(nbr, new, sizeof(*nbr));
1058 	nbr->peerid = peerid;
1059 	nbr->area = area;
1060 	nbr->iface = iface;
1061 
1062 	TAILQ_INIT(&nbr->req_list);
1063 
1064 	head = RDE_NBR_HASH(peerid);
1065 	LIST_INSERT_HEAD(head, nbr, hash);
1066 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1067 
1068 	return (nbr);
1069 }
1070 
1071 void
1072 rde_nbr_del(struct rde_nbr *nbr)
1073 {
1074 	if (nbr == NULL)
1075 		return;
1076 
1077 	rde_req_list_free(nbr);
1078 
1079 	LIST_REMOVE(nbr, entry);
1080 	LIST_REMOVE(nbr, hash);
1081 
1082 	free(nbr);
1083 }
1084 
1085 int
1086 rde_nbr_loading(struct area *area)
1087 {
1088 	struct rde_nbr		*nbr;
1089 	int			 checkall = 0;
1090 
1091 	if (area == NULL) {
1092 		area = LIST_FIRST(&rdeconf->area_list);
1093 		checkall = 1;
1094 	}
1095 
1096 	while (area != NULL) {
1097 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1098 			if (nbr->self)
1099 				continue;
1100 			if (nbr->state & NBR_STA_XCHNG ||
1101 			    nbr->state & NBR_STA_LOAD)
1102 				return (1);
1103 		}
1104 		if (!checkall)
1105 			break;
1106 		area = LIST_NEXT(area, entry);
1107 	}
1108 
1109 	return (0);
1110 }
1111 
1112 struct rde_nbr *
1113 rde_nbr_self(struct area *area)
1114 {
1115 	struct rde_nbr		*nbr;
1116 
1117 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1118 		if (nbr->self)
1119 			return (nbr);
1120 
1121 	/* this may not happen */
1122 	fatalx("rde_nbr_self: area without self");
1123 	return (NULL);
1124 }
1125 
1126 /*
1127  * LSA req list
1128  */
1129 void
1130 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1131 {
1132 	struct rde_req_entry	*le;
1133 
1134 	if ((le = calloc(1, sizeof(*le))) == NULL)
1135 		fatal("rde_req_list_add");
1136 
1137 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1138 	le->type = lsa->type;
1139 	le->ls_id = lsa->ls_id;
1140 	le->adv_rtr = lsa->adv_rtr;
1141 }
1142 
1143 int
1144 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1145 {
1146 	struct rde_req_entry	*le;
1147 
1148 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1149 		if ((lsa_hdr->type == le->type) &&
1150 		    (lsa_hdr->ls_id == le->ls_id) &&
1151 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1152 			return (1);
1153 	}
1154 	return (0);
1155 }
1156 
1157 void
1158 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1159 {
1160 	struct rde_req_entry	*le;
1161 
1162 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1163 		if ((lsa_hdr->type == le->type) &&
1164 		    (lsa_hdr->ls_id == le->ls_id) &&
1165 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1166 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1167 			free(le);
1168 			return;
1169 		}
1170 	}
1171 }
1172 
1173 void
1174 rde_req_list_free(struct rde_nbr *nbr)
1175 {
1176 	struct rde_req_entry	*le;
1177 
1178 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1179 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1180 		free(le);
1181 	}
1182 }
1183 
1184 /*
1185  * as-external LSA handling
1186  */
1187 struct lsa *
1188 rde_asext_get(struct rroute *rr)
1189 {
1190 	struct area		*area;
1191 	struct iface		*iface;
1192 	struct iface_addr	*ia;
1193 	struct in6_addr		 addr;
1194 
1195 	LIST_FOREACH(area, &rdeconf->area_list, entry)
1196 		LIST_FOREACH(iface, &area->iface_list, entry)
1197 			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1198 				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1199 					continue;
1200 
1201 				inet6applymask(&addr, &ia->addr,
1202 				    rr->kr.prefixlen);
1203 				if (!memcmp(&addr, &rr->kr.prefix,
1204 				    sizeof(addr)) && rr->kr.prefixlen ==
1205 				    ia->prefixlen) {
1206 					/* already announced as Prefix LSA */
1207 					log_debug("rde_asext_get: %s/%d is "
1208 					    "part of prefix LSA",
1209 					    log_in6addr(&rr->kr.prefix),
1210 					    rr->kr.prefixlen);
1211 					return (NULL);
1212 				}
1213 			}
1214 
1215 	/* update of seqnum is done by lsa_merge */
1216 	return (orig_asext_lsa(rr, DEFAULT_AGE));
1217 }
1218 
1219 struct lsa *
1220 rde_asext_put(struct rroute *rr)
1221 {
1222 	/*
1223 	 * just try to remove the LSA. If the prefix is announced as
1224 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1225 	 */
1226 
1227 	/* remove by reflooding with MAX_AGE */
1228 	return (orig_asext_lsa(rr, MAX_AGE));
1229 }
1230 
1231 /*
1232  * summary LSA stuff
1233  */
1234 void
1235 rde_summary_update(struct rt_node *rte, struct area *area)
1236 {
1237 	struct vertex		*v = NULL;
1238 //XXX	struct lsa		*lsa;
1239 	u_int16_t		 type = 0;
1240 
1241 	/* first check if we actually need to announce this route */
1242 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1243 		return;
1244 	/* never create summaries for as-ext LSA */
1245 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1246 		return;
1247 	/* no need for summary LSA in the originating area */
1248 	if (rte->area.s_addr == area->id.s_addr)
1249 		return;
1250 	/* no need to originate inter-area routes to the backbone */
1251 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1252 		return;
1253 	/* TODO nexthop check, nexthop part of area -> no summary */
1254 	if (rte->cost >= LS_INFINITY)
1255 		return;
1256 	/* TODO AS border router specific checks */
1257 	/* TODO inter-area network route stuff */
1258 	/* TODO intra-area stuff -- condense LSA ??? */
1259 
1260 	if (rte->d_type == DT_NET) {
1261 		type = LSA_TYPE_INTER_A_PREFIX;
1262 	} else if (rte->d_type == DT_RTR) {
1263 		type = LSA_TYPE_INTER_A_ROUTER;
1264 	} else
1265 
1266 #if 0 /* XXX a lot todo */
1267 	/* update lsa but only if it was changed */
1268 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1269 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1270 	lsa_merge(rde_nbr_self(area), lsa, v);
1271 
1272 	if (v == NULL)
1273 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1274 #endif
1275 
1276 	/* suppressed/deleted routes are not found in the second lsa_find */
1277 	if (v)
1278 		v->cost = rte->cost;
1279 }
1280 
1281 /*
1282  * Functions for self-originated LSAs
1283  */
1284 
1285 /* Prefix LSAs have variable size. We have to be careful to copy the right
1286  * amount of bytes, and to realloc() the right amount of memory. */
1287 void
1288 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1289 {
1290 	struct lsa_prefix	*copy;
1291 	unsigned int		 lsa_prefix_len;
1292 	unsigned int		 new_len;
1293 	char			*new_lsa;
1294 
1295 	lsa_prefix_len = sizeof(struct lsa_prefix)
1296 	    + LSA_PREFIXSIZE(prefix->prefixlen);
1297 
1298 	new_len = *len + lsa_prefix_len;
1299 
1300 	/* Make sure we have enough space for this prefix. */
1301 	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1302 		fatalx("append_prefix_lsa");
1303 
1304 	/* Append prefix to LSA. */
1305 	copy = (struct lsa_prefix *)(new_lsa + *len);
1306 	memcpy(copy, prefix, lsa_prefix_len);
1307 	copy->metric = 0;
1308 
1309 	*lsa = (struct lsa *)new_lsa;
1310 	*len = new_len;
1311 }
1312 
1313 int
1314 prefix_compare(struct prefix_node *a, struct prefix_node *b)
1315 {
1316 	struct lsa_prefix	*p;
1317 	struct lsa_prefix	*q;
1318 	int			 i;
1319 	int			 len;
1320 
1321 	p = a->prefix;
1322 	q = b->prefix;
1323 
1324 	len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1325 
1326 	i = memcmp(p + 1, q + 1, len);
1327 	if (i)
1328 		return (i);
1329 	if (p->prefixlen < q->prefixlen)
1330 		return (-1);
1331 	if (p->prefixlen > q->prefixlen)
1332 		return (1);
1333 	return (0);
1334 }
1335 
1336 void
1337 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1338 {
1339 	struct prefix_node	*old;
1340 	struct prefix_node	*new;
1341 	struct in6_addr		 addr;
1342 	unsigned int		 len;
1343 	unsigned int		 i;
1344 	char			*cur_prefix;
1345 
1346 	cur_prefix = (char *)(lsa + 1);
1347 
1348 	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1349 		if ((new = calloc(1, sizeof(*new))) == NULL)
1350 			fatal("prefix_tree_add");
1351 		new->prefix = (struct lsa_prefix *)cur_prefix;
1352 
1353 		len = sizeof(*new->prefix)
1354 		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1355 
1356 		bzero(&addr, sizeof(addr));
1357 		memcpy(&addr, new->prefix + 1,
1358 		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1359 
1360 		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1361 		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1362 		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1363 			old = RB_INSERT(prefix_tree, tree, new);
1364 			if (old != NULL) {
1365 				old->prefix->options |= new->prefix->options;
1366 				free(new);
1367 			}
1368 		}
1369 
1370 		cur_prefix = cur_prefix + len;
1371 	}
1372 }
1373 
1374 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1375 
1376 struct lsa *
1377 orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1378 {
1379 	struct lsa		*lsa;
1380 	struct vertex		*v;
1381 	struct rde_nbr		*nbr;
1382 	struct prefix_node	*node;
1383 	struct prefix_tree	 tree;
1384 	int			 num_full_nbr;
1385 	u_int16_t		 len;
1386 	u_int16_t		 numprefix;
1387 
1388 	log_debug("orig_intra_lsa_net: area %s, interface %s",
1389 	    inet_ntoa(area->id), iface->name);
1390 
1391 	RB_INIT(&tree);
1392 
1393 	if (iface->state & IF_STA_DR) {
1394 		num_full_nbr = 0;
1395 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1396 			if (nbr->self ||
1397 			    nbr->iface->ifindex != iface->ifindex ||
1398 			    (nbr->state & NBR_STA_FULL) == 0)
1399 				continue;
1400 			num_full_nbr++;
1401 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1402 			    htonl(nbr->iface_id), nbr->id.s_addr);
1403 			if (v)
1404 				prefix_tree_add(&tree, &v->lsa->data.link);
1405 		}
1406 		if (num_full_nbr == 0) {
1407 			/* There are no adjacent neighbors on link.
1408 			 * If a copy of this LSA already exists in DB,
1409 			 * it needs to be flushed. orig_intra_lsa_rtr()
1410 			 * will take care of prefixes configured on
1411 			 * this interface. */
1412 			if (!old)
1413 				return NULL;
1414 		} else {
1415 			/* Add our own prefixes configured for this link. */
1416 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1417 			    htonl(iface->ifindex), rde_router_id());
1418 			if (v)
1419 				prefix_tree_add(&tree, &v->lsa->data.link);
1420 		}
1421 	/* Continue only if a copy of this LSA already exists in DB.
1422 	 * It needs to be flushed. */
1423 	} else if (!old)
1424 		return NULL;
1425 
1426 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1427 	if ((lsa = calloc(1, len)) == NULL)
1428 		fatal("orig_intra_lsa_net");
1429 
1430 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1431 	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1432 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1433 
1434 	numprefix = 0;
1435 	RB_FOREACH(node, prefix_tree, &tree) {
1436 		append_prefix_lsa(&lsa, &len, node->prefix);
1437 		numprefix++;
1438 	}
1439 
1440 	lsa->data.pref_intra.numprefix = htons(numprefix);
1441 
1442 	while (!RB_EMPTY(&tree))
1443 		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1444 
1445 	/* LSA header */
1446 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1447 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1448 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1449 	lsa->hdr.ls_id = htonl(iface->ifindex);
1450 	lsa->hdr.adv_rtr = rde_router_id();
1451 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1452 	lsa->hdr.len = htons(len);
1453 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1454 
1455 	return lsa;
1456 }
1457 
1458 struct lsa *
1459 orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1460 {
1461 	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1462 				    + sizeof(struct in6_addr)];
1463 	struct lsa		*lsa;
1464 	struct lsa_prefix	*lsa_prefix;
1465 	struct in6_addr		*prefix;
1466 	struct iface		*iface;
1467 	struct iface_addr	*ia;
1468 	struct rde_nbr		*nbr;
1469 	u_int16_t		 len;
1470 	u_int16_t		 numprefix;
1471 
1472 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1473 	if ((lsa = calloc(1, len)) == NULL)
1474 		fatal("orig_intra_lsa_rtr");
1475 
1476 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1477 	lsa->data.pref_intra.ref_ls_id = 0;
1478 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1479 
1480 	numprefix = 0;
1481 	LIST_FOREACH(iface, &area->iface_list, entry) {
1482 		if (!((iface->flags & IFF_UP) &&
1483 		    LINK_STATE_IS_UP(iface->linkstate)))
1484 			/* interface or link state down */
1485 			continue;
1486 		if ((iface->state & IF_STA_DOWN) &&
1487 		    !(iface->cflags & F_IFACE_PASSIVE))
1488 			/* passive interfaces stay in state DOWN */
1489 			continue;
1490 
1491 		/* Broadcast links with adjacencies are handled
1492 		 * by orig_intra_lsa_net(), ignore. */
1493 		if (iface->type == IF_TYPE_BROADCAST ||
1494 		    iface->type == IF_TYPE_NBMA) {
1495 			if (iface->state & IF_STA_WAITING)
1496 				/* Skip, we're still waiting for
1497 				 * adjacencies to form. */
1498 				continue;
1499 
1500 			LIST_FOREACH(nbr, &area->nbr_list, entry)
1501 				if (!nbr->self &&
1502 				    nbr->iface->ifindex == iface->ifindex &&
1503 				    nbr->state & NBR_STA_FULL)
1504 					break;
1505 			if (nbr)
1506 				continue;
1507 		}
1508 
1509 		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1510 
1511 		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1512 			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1513 				continue;
1514 
1515 			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1516 
1517 			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1518 			    iface->state & IF_STA_LOOPBACK) {
1519 				lsa_prefix->prefixlen = 128;
1520 			} else {
1521 				lsa_prefix->prefixlen = ia->prefixlen;
1522 				lsa_prefix->metric = htons(iface->metric);
1523 			}
1524 
1525 			if (lsa_prefix->prefixlen == 128)
1526 				lsa_prefix->options |= OSPF_PREFIX_LA;
1527 
1528 			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1529 			    "%s/%d", inet_ntoa(area->id),
1530 			    iface->name, log_in6addr(&ia->addr),
1531 			    lsa_prefix->prefixlen);
1532 
1533 			prefix = (struct in6_addr *)(lsa_prefix + 1);
1534 			inet6applymask(prefix, &ia->addr,
1535 			    lsa_prefix->prefixlen);
1536 			append_prefix_lsa(&lsa, &len, lsa_prefix);
1537 			numprefix++;
1538 		}
1539 
1540 		/* TOD: Add prefixes of directly attached hosts, too */
1541 		/* TOD: Add prefixes for virtual links */
1542 	}
1543 
1544 	/* If no prefixes were included, continue only if a copy of this
1545 	 * LSA already exists in DB. It needs to be flushed. */
1546 	if (numprefix == 0 && !old) {
1547 		free(lsa);
1548 		return NULL;
1549 	}
1550 
1551 	lsa->data.pref_intra.numprefix = htons(numprefix);
1552 
1553 	/* LSA header */
1554 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1555 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1556 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1557 	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1558 	lsa->hdr.adv_rtr = rde_router_id();
1559 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1560 	lsa->hdr.len = htons(len);
1561 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1562 
1563 	return lsa;
1564 }
1565 
1566 void
1567 orig_intra_area_prefix_lsas(struct area *area)
1568 {
1569 	struct lsa	*lsa;
1570 	struct vertex	*old;
1571 	struct iface	*iface;
1572 
1573 	LIST_FOREACH(iface, &area->iface_list, entry) {
1574 		if (iface->type == IF_TYPE_BROADCAST ||
1575 		    iface->type == IF_TYPE_NBMA) {
1576 			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1577 			    htonl(iface->ifindex), rde_router_id());
1578 			lsa = orig_intra_lsa_net(area, iface, old);
1579 			if (lsa)
1580 				lsa_merge(rde_nbr_self(area), lsa, old);
1581 		}
1582 	}
1583 
1584 	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1585 		htonl(LS_ID_INTRA_RTR), rde_router_id());
1586 	lsa = orig_intra_lsa_rtr(area, old);
1587 	if (lsa)
1588 		lsa_merge(rde_nbr_self(area), lsa, old);
1589 }
1590 
1591 int
1592 comp_asext(struct lsa *a, struct lsa *b)
1593 {
1594 	/* compare prefixes, if they are equal or not */
1595 	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1596 		return (-1);
1597 	return (memcmp(
1598 	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1599 	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1600 	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1601 }
1602 
1603 struct lsa *
1604 orig_asext_lsa(struct rroute *rr, u_int16_t age)
1605 {
1606 	struct lsa	*lsa;
1607 	u_int32_t	 ext_tag;
1608 	u_int16_t	 len, ext_off;
1609 
1610 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1611 	    LSA_PREFIXSIZE(rr->kr.prefixlen);
1612 
1613 	/*
1614 	 * nexthop -- on connected routes we are the nexthop,
1615 	 * on all other cases we should announce the true nexthop
1616 	 * unless that nexthop is outside of the ospf cloud.
1617 	 * XXX for now we don't do this.
1618 	 */
1619 
1620 	ext_off = len;
1621 	if (rr->kr.ext_tag) {
1622 		len += sizeof(ext_tag);
1623 	}
1624 	if ((lsa = calloc(1, len)) == NULL)
1625 		fatal("orig_asext_lsa");
1626 
1627 	log_debug("orig_asext_lsa: %s/%d age %d",
1628 	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1629 
1630 	/* LSA header */
1631 	lsa->hdr.age = htons(age);
1632 	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1633 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1634 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1635 	lsa->hdr.len = htons(len);
1636 
1637 	lsa->data.asext.prefix.prefixlen = rr->kr.prefixlen;
1638 	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1639 	    &rr->kr.prefix, LSA_PREFIXSIZE(rr->kr.prefixlen));
1640 
1641 	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, lsa->hdr.type,
1642 	    lsa->hdr.adv_rtr, comp_asext, lsa);
1643 
1644 	if (age == MAX_AGE) {
1645 		/* inherit metric and ext_tag from the current LSA,
1646 		 * some routers don't like to get withdraws that are
1647 		 * different from what they have in their table.
1648 		 */
1649 		struct vertex *v;
1650 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1651 		    lsa->hdr.adv_rtr);
1652 		if (v != NULL) {
1653 			rr->metric = ntohl(v->lsa->data.asext.metric);
1654 			if (rr->metric & LSA_ASEXT_T_FLAG) {
1655 				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1656 				    sizeof(ext_tag));
1657 				rr->kr.ext_tag = ntohl(ext_tag);
1658 			}
1659 			rr->metric &= LSA_METRIC_MASK;
1660 		}
1661 	}
1662 
1663 	if (rr->kr.ext_tag) {
1664 		lsa->data.asext.metric = htonl(rr->metric | LSA_ASEXT_T_FLAG);
1665 		ext_tag = htonl(rr->kr.ext_tag);
1666 		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1667 	} else {
1668 		lsa->data.asext.metric = htonl(rr->metric);
1669 	}
1670 
1671 	lsa->hdr.ls_chksum = 0;
1672 	lsa->hdr.ls_chksum =
1673 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1674 
1675 	return (lsa);
1676 }
1677 
1678 struct lsa *
1679 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1680 {
1681 #if 0 /* XXX a lot todo */
1682 	struct lsa	*lsa;
1683 	u_int16_t	 len;
1684 
1685 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1686 	if ((lsa = calloc(1, len)) == NULL)
1687 		fatal("orig_sum_lsa");
1688 
1689 	/* LSA header */
1690 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1691 	lsa->hdr.type = type;
1692 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1693 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1694 	lsa->hdr.len = htons(len);
1695 
1696 	/* prefix and mask */
1697 	/*
1698 	 * TODO ls_id must be unique, for overlapping routes this may
1699 	 * not be true. In this case a hack needs to be done to
1700 	 * make the ls_id unique.
1701 	 */
1702 	lsa->hdr.ls_id = rte->prefix.s_addr;
1703 	if (type == LSA_TYPE_SUM_NETWORK)
1704 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1705 	else
1706 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1707 
1708 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1709 
1710 	lsa->hdr.ls_chksum = 0;
1711 	lsa->hdr.ls_chksum =
1712 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1713 
1714 	return (lsa);
1715 #endif
1716 	return NULL;
1717 }
1718