xref: /openbsd-src/usr.sbin/ospf6d/rde.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: rde.c,v 1.84 2020/02/17 08:12:22 denis Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <net/if_types.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ospf6.h"
37 #include "ospf6d.h"
38 #include "ospfe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 #define MINIMUM(a, b)	(((a) < (b)) ? (a) : (b))
43 
44 void		 rde_sig_handler(int sig, short, void *);
45 __dead void	 rde_shutdown(void);
46 void		 rde_dispatch_imsg(int, short, void *);
47 void		 rde_dispatch_parent(int, short, void *);
48 void		 rde_dump_area(struct area *, int, pid_t);
49 
50 void		 rde_send_summary(pid_t);
51 void		 rde_send_summary_area(struct area *, pid_t);
52 void		 rde_nbr_init(u_int32_t);
53 void		 rde_nbr_free(void);
54 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
55 void		 rde_nbr_del(struct rde_nbr *);
56 
57 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
58 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
59 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
60 void		 rde_req_list_free(struct rde_nbr *);
61 
62 struct lsa	*rde_asext_get(struct kroute *);
63 struct lsa	*rde_asext_put(struct kroute *);
64 
65 int		 comp_asext(struct lsa *, struct lsa *);
66 struct lsa	*orig_asext_lsa(struct kroute *, u_int16_t);
67 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
68 struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
69 		 struct vertex *);
70 struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
71 void		 append_prefix_lsa(struct lsa **, u_int16_t *,
72 		    struct lsa_prefix *);
73 
74 /* A 32-bit value != any ifindex.
75  * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
76 #define	LS_ID_INTRA_RTR	0x01000000
77 
78 /* Tree of prefixes with global scope on given a link,
79  * see orig_intra_lsa_*() */
80 struct prefix_node {
81 	RB_ENTRY(prefix_node)	 entry;
82 	struct lsa_prefix	*prefix;
83 };
84 RB_HEAD(prefix_tree, prefix_node);
85 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
86 int		 prefix_compare(struct prefix_node *, struct prefix_node *);
87 void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
88 
89 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
90 struct imsgev		*iev_ospfe;
91 struct imsgev		*iev_main;
92 struct rde_nbr		*nbrself;
93 struct lsa_tree		 asext_tree;
94 
95 /* ARGSUSED */
96 void
97 rde_sig_handler(int sig, short event, void *arg)
98 {
99 	/*
100 	 * signal handler rules don't apply, libevent decouples for us
101 	 */
102 
103 	switch (sig) {
104 	case SIGINT:
105 	case SIGTERM:
106 		rde_shutdown();
107 		/* NOTREACHED */
108 	default:
109 		fatalx("unexpected signal");
110 	}
111 }
112 
113 /* route decision engine */
114 pid_t
115 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
116     int pipe_parent2ospfe[2])
117 {
118 	struct event		 ev_sigint, ev_sigterm;
119 	struct timeval		 now;
120 	struct passwd		*pw;
121 	pid_t			 pid;
122 
123 	switch (pid = fork()) {
124 	case -1:
125 		fatal("cannot fork");
126 		/* NOTREACHED */
127 	case 0:
128 		break;
129 	default:
130 		return (pid);
131 	}
132 
133 	rdeconf = xconf;
134 
135 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
136 		fatal("getpwnam");
137 
138 	if (chroot(pw->pw_dir) == -1)
139 		fatal("chroot");
140 	if (chdir("/") == -1)
141 		fatal("chdir(\"/\")");
142 
143 	setproctitle("route decision engine");
144 	/*
145 	 * XXX needed with fork+exec
146 	 * log_init(debug, LOG_DAEMON);
147 	 * log_setverbose(verbose);
148 	 */
149 
150 	ospfd_process = PROC_RDE_ENGINE;
151 	log_procinit(log_procnames[ospfd_process]);
152 
153 	if (setgroups(1, &pw->pw_gid) ||
154 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
155 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
156 		fatal("can't drop privileges");
157 
158 	if (pledge("stdio", NULL) == -1)
159 		fatal("pledge");
160 
161 	event_init();
162 	rde_nbr_init(NBR_HASHSIZE);
163 	lsa_init(&asext_tree);
164 
165 	/* setup signal handler */
166 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
167 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
168 	signal_add(&ev_sigint, NULL);
169 	signal_add(&ev_sigterm, NULL);
170 	signal(SIGPIPE, SIG_IGN);
171 	signal(SIGHUP, SIG_IGN);
172 
173 	/* setup pipes */
174 	close(pipe_ospfe2rde[0]);
175 	close(pipe_parent2rde[0]);
176 	close(pipe_parent2ospfe[0]);
177 	close(pipe_parent2ospfe[1]);
178 
179 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
180 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
181 		fatal(NULL);
182 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
183 	iev_ospfe->handler = rde_dispatch_imsg;
184 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
185 	iev_main->handler = rde_dispatch_parent;
186 
187 	/* setup event handler */
188 	iev_ospfe->events = EV_READ;
189 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
190 	    iev_ospfe->handler, iev_ospfe);
191 	event_add(&iev_ospfe->ev, NULL);
192 
193 	iev_main->events = EV_READ;
194 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
195 	    iev_main->handler, iev_main);
196 	event_add(&iev_main->ev, NULL);
197 
198 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
199 	cand_list_init();
200 	rt_init();
201 
202 	/* remove unneeded stuff from config */
203 	conf_clear_redist_list(&rdeconf->redist_list);
204 
205 	gettimeofday(&now, NULL);
206 	rdeconf->uptime = now.tv_sec;
207 
208 	event_dispatch();
209 
210 	rde_shutdown();
211 	/* NOTREACHED */
212 
213 	return (0);
214 }
215 
216 __dead void
217 rde_shutdown(void)
218 {
219 	struct area	*a;
220 
221 	/* close pipes */
222 	msgbuf_clear(&iev_ospfe->ibuf.w);
223 	close(iev_ospfe->ibuf.fd);
224 	msgbuf_clear(&iev_main->ibuf.w);
225 	close(iev_main->ibuf.fd);
226 
227 	stop_spf_timer(rdeconf);
228 	cand_list_clr();
229 	rt_clear();
230 
231 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
232 		LIST_REMOVE(a, entry);
233 		area_del(a);
234 	}
235 	rde_nbr_free();
236 
237 	free(iev_ospfe);
238 	free(iev_main);
239 	free(rdeconf);
240 
241 	log_info("route decision engine exiting");
242 	_exit(0);
243 }
244 
245 int
246 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
247     u_int16_t datalen)
248 {
249 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
250 	    data, datalen));
251 }
252 
253 /* ARGSUSED */
254 void
255 rde_dispatch_imsg(int fd, short event, void *bula)
256 {
257 	struct imsgev		*iev = bula;
258 	struct imsgbuf		*ibuf = &iev->ibuf;
259 	struct imsg		 imsg;
260 	struct in_addr		 aid;
261 	struct ls_req_hdr	 req_hdr;
262 	struct lsa_hdr		 lsa_hdr, *db_hdr;
263 	struct rde_nbr		 rn, *nbr;
264 	struct timespec		 tp;
265 	struct lsa		*lsa;
266 	struct area		*area;
267 	struct vertex		*v;
268 	char			*buf;
269 	ssize_t			 n;
270 	time_t			 now;
271 	int			 r, state, self, shut = 0, verbose;
272 	u_int16_t		 l;
273 
274 	if (event & EV_READ) {
275 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
276 			fatal("imsg_read error");
277 		if (n == 0)	/* connection closed */
278 			shut = 1;
279 	}
280 	if (event & EV_WRITE) {
281 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
282 			fatal("msgbuf_write");
283 		if (n == 0)	/* connection closed */
284 			shut = 1;
285 	}
286 
287 	clock_gettime(CLOCK_MONOTONIC, &tp);
288 	now = tp.tv_sec;
289 
290 	for (;;) {
291 		if ((n = imsg_get(ibuf, &imsg)) == -1)
292 			fatal("rde_dispatch_imsg: imsg_get error");
293 		if (n == 0)
294 			break;
295 
296 		switch (imsg.hdr.type) {
297 		case IMSG_NEIGHBOR_UP:
298 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
299 				fatalx("invalid size of OE request");
300 			memcpy(&rn, imsg.data, sizeof(rn));
301 
302 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
303 				fatalx("rde_dispatch_imsg: "
304 				    "neighbor already exists");
305 			break;
306 		case IMSG_NEIGHBOR_DOWN:
307 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
308 			break;
309 		case IMSG_NEIGHBOR_CHANGE:
310 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
311 				fatalx("invalid size of OE request");
312 			memcpy(&state, imsg.data, sizeof(state));
313 
314 			nbr = rde_nbr_find(imsg.hdr.peerid);
315 			if (nbr == NULL)
316 				break;
317 
318 			if (state != nbr->state &&
319 			    (nbr->state & NBR_STA_FULL ||
320 			    state & NBR_STA_FULL)) {
321 				nbr->state = state;
322 				area_track(nbr->area);
323 				orig_intra_area_prefix_lsas(nbr->area);
324 			}
325 
326 			nbr->state = state;
327 			if (nbr->state & NBR_STA_FULL)
328 				rde_req_list_free(nbr);
329 			break;
330 		case IMSG_AREA_CHANGE:
331 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
332 				fatalx("invalid size of OE request");
333 
334 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
335 				if (area->id.s_addr == imsg.hdr.peerid)
336 					break;
337 			}
338 			if (area == NULL)
339 				break;
340 			memcpy(&state, imsg.data, sizeof(state));
341 			area->active = state;
342 			break;
343 		case IMSG_DB_SNAPSHOT:
344 			nbr = rde_nbr_find(imsg.hdr.peerid);
345 			if (nbr == NULL)
346 				break;
347 
348 			lsa_snap(nbr);
349 
350 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
351 			    0, -1, NULL, 0);
352 			break;
353 		case IMSG_DD:
354 			nbr = rde_nbr_find(imsg.hdr.peerid);
355 			if (nbr == NULL)
356 				break;
357 
358 			buf = imsg.data;
359 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
360 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
361 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
362 				buf += sizeof(lsa_hdr);
363 
364 				v = lsa_find(nbr->iface, lsa_hdr.type,
365 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
366 				if (v == NULL)
367 					db_hdr = NULL;
368 				else
369 					db_hdr = &v->lsa->hdr;
370 
371 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
372 					/*
373 					 * only request LSAs that are
374 					 * newer or missing
375 					 */
376 					rde_req_list_add(nbr, &lsa_hdr);
377 					imsg_compose_event(iev_ospfe, IMSG_DD,
378 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
379 					    sizeof(lsa_hdr));
380 				}
381 			}
382 			if (l != 0)
383 				log_warnx("rde_dispatch_imsg: peerid %u, "
384 				    "trailing garbage in Database Description "
385 				    "packet", imsg.hdr.peerid);
386 
387 			imsg_compose_event(iev_ospfe, IMSG_DD_END,
388 			    imsg.hdr.peerid, 0, -1, NULL, 0);
389 			break;
390 		case IMSG_LS_REQ:
391 			nbr = rde_nbr_find(imsg.hdr.peerid);
392 			if (nbr == NULL)
393 				break;
394 
395 			buf = imsg.data;
396 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
397 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
398 				memcpy(&req_hdr, buf, sizeof(req_hdr));
399 				buf += sizeof(req_hdr);
400 
401 				if ((v = lsa_find(nbr->iface,
402 				    req_hdr.type, req_hdr.ls_id,
403 				    req_hdr.adv_rtr)) == NULL) {
404 					imsg_compose_event(iev_ospfe,
405 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
406 					    0, -1, NULL, 0);
407 					continue;
408 				}
409 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
410 				    imsg.hdr.peerid, 0, -1, v->lsa,
411 				    ntohs(v->lsa->hdr.len));
412 			}
413 			if (l != 0)
414 				log_warnx("rde_dispatch_imsg: peerid %u, "
415 				    "trailing garbage in LS Request "
416 				    "packet", imsg.hdr.peerid);
417 			break;
418 		case IMSG_LS_UPD:
419 			nbr = rde_nbr_find(imsg.hdr.peerid);
420 			if (nbr == NULL)
421 				break;
422 
423 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
424 			if (lsa == NULL)
425 				fatal(NULL);
426 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
427 
428 			if (!lsa_check(nbr, lsa,
429 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
430 				free(lsa);
431 				break;
432 			}
433 
434 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
435 			    lsa->hdr.adv_rtr);
436 			if (v == NULL)
437 				db_hdr = NULL;
438 			else
439 				db_hdr = &v->lsa->hdr;
440 
441 			if (nbr->self) {
442 				lsa_merge(nbr, lsa, v);
443 				/* lsa_merge frees the right lsa */
444 				break;
445 			}
446 
447 			r = lsa_newer(&lsa->hdr, db_hdr);
448 			if (r > 0) {
449 				/* new LSA newer than DB */
450 				if (v && v->flooded &&
451 				    v->changed + MIN_LS_ARRIVAL >= now) {
452 					free(lsa);
453 					break;
454 				}
455 
456 				rde_req_list_del(nbr, &lsa->hdr);
457 
458 				if (!(self = lsa_self(nbr, lsa, v)))
459 					if (lsa_add(nbr, lsa))
460 						/* delayed lsa */
461 						break;
462 
463 				/* flood and perhaps ack LSA */
464 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
465 				    imsg.hdr.peerid, 0, -1, lsa,
466 				    ntohs(lsa->hdr.len));
467 
468 				/* reflood self originated LSA */
469 				if (self && v)
470 					imsg_compose_event(iev_ospfe,
471 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
472 					    v->lsa, ntohs(v->lsa->hdr.len));
473 				/* new LSA was not added so free it */
474 				if (self)
475 					free(lsa);
476 			} else if (r < 0) {
477 				/*
478 				 * point 6 of "The Flooding Procedure"
479 				 * We are violating the RFC here because
480 				 * it does not make sense to reset a session
481 				 * because an equal LSA is already in the table.
482 				 * Only if the LSA sent is older than the one
483 				 * in the table we should reset the session.
484 				 */
485 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
486 					imsg_compose_event(iev_ospfe,
487 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
488 					    0, -1, NULL, 0);
489 					free(lsa);
490 					break;
491 				}
492 
493 				/* lsa no longer needed */
494 				free(lsa);
495 
496 				/* new LSA older than DB */
497 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
498 				    ntohs(db_hdr->age) == MAX_AGE)
499 					/* seq-num wrap */
500 					break;
501 
502 				if (v->changed + MIN_LS_ARRIVAL >= now)
503 					break;
504 
505 				/* directly send current LSA, no ack */
506 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
507 				    imsg.hdr.peerid, 0, -1, v->lsa,
508 				    ntohs(v->lsa->hdr.len));
509 			} else {
510 				/* LSA equal send direct ack */
511 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
512 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
513 				    sizeof(lsa->hdr));
514 				free(lsa);
515 			}
516 			break;
517 		case IMSG_LS_MAXAGE:
518 			nbr = rde_nbr_find(imsg.hdr.peerid);
519 			if (nbr == NULL)
520 				break;
521 
522 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
523 			    sizeof(struct lsa_hdr))
524 				fatalx("invalid size of OE request");
525 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
526 
527 			if (rde_nbr_loading(nbr->area))
528 				break;
529 
530 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
531 			    lsa_hdr.adv_rtr);
532 			if (v == NULL)
533 				db_hdr = NULL;
534 			else
535 				db_hdr = &v->lsa->hdr;
536 
537 			/*
538 			 * only delete LSA if the one in the db is not newer
539 			 */
540 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
541 				lsa_del(nbr, &lsa_hdr);
542 			break;
543 		case IMSG_CTL_SHOW_DATABASE:
544 		case IMSG_CTL_SHOW_DB_EXT:
545 		case IMSG_CTL_SHOW_DB_LINK:
546 		case IMSG_CTL_SHOW_DB_NET:
547 		case IMSG_CTL_SHOW_DB_RTR:
548 		case IMSG_CTL_SHOW_DB_INTRA:
549 		case IMSG_CTL_SHOW_DB_SELF:
550 		case IMSG_CTL_SHOW_DB_SUM:
551 		case IMSG_CTL_SHOW_DB_ASBR:
552 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
553 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
554 				log_warnx("rde_dispatch_imsg: wrong imsg len");
555 				break;
556 			}
557 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
558 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
559 					rde_dump_area(area, imsg.hdr.type,
560 					    imsg.hdr.pid);
561 				}
562 				lsa_dump(&asext_tree, imsg.hdr.type,
563 				    imsg.hdr.pid);
564 			} else {
565 				memcpy(&aid, imsg.data, sizeof(aid));
566 				if ((area = area_find(rdeconf, aid)) != NULL) {
567 					rde_dump_area(area, imsg.hdr.type,
568 					    imsg.hdr.pid);
569 					if (!area->stub)
570 						lsa_dump(&asext_tree,
571 						    imsg.hdr.type,
572 						    imsg.hdr.pid);
573 				}
574 			}
575 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
576 			    imsg.hdr.pid, -1, NULL, 0);
577 			break;
578 		case IMSG_CTL_SHOW_RIB:
579 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
580 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
581 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
582 
583 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
584 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
585 			}
586 			aid.s_addr = 0;
587 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
588 
589 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
590 			    imsg.hdr.pid, -1, NULL, 0);
591 			break;
592 		case IMSG_CTL_SHOW_SUM:
593 			rde_send_summary(imsg.hdr.pid);
594 			LIST_FOREACH(area, &rdeconf->area_list, entry)
595 				rde_send_summary_area(area, imsg.hdr.pid);
596 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
597 			    imsg.hdr.pid, -1, NULL, 0);
598 			break;
599 		case IMSG_IFINFO:
600 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
601 			    sizeof(int))
602 				fatalx("IFINFO imsg with wrong len");
603 
604 			nbr = rde_nbr_find(imsg.hdr.peerid);
605 			if (nbr == NULL)
606 				fatalx("IFINFO imsg with bad peerid");
607 			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
608 
609 			/* Resend LSAs if interface state changes. */
610 			orig_intra_area_prefix_lsas(nbr->area);
611 			break;
612 		case IMSG_CTL_LOG_VERBOSE:
613 			/* already checked by ospfe */
614 			memcpy(&verbose, imsg.data, sizeof(verbose));
615 			log_setverbose(verbose);
616 			break;
617 		default:
618 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
619 			    imsg.hdr.type);
620 			break;
621 		}
622 		imsg_free(&imsg);
623 	}
624 	if (!shut)
625 		imsg_event_add(iev);
626 	else {
627 		/* this pipe is dead, so remove the event handler */
628 		event_del(&iev->ev);
629 		event_loopexit(NULL);
630 	}
631 }
632 
633 /* ARGSUSED */
634 void
635 rde_dispatch_parent(int fd, short event, void *bula)
636 {
637 	static struct area	*narea;
638 	struct area		*area;
639 	struct iface		*iface, *ifp, *i;
640 	struct ifaddrchange	*ifc;
641 	struct iface_addr	*ia, *nia;
642 	struct imsg		 imsg;
643 	struct kroute		 kr;
644 	struct imsgev		*iev = bula;
645 	struct imsgbuf		*ibuf = &iev->ibuf;
646 	struct lsa		*lsa;
647 	struct vertex		*v;
648 	ssize_t			 n;
649 	int			 shut = 0, link_ok, prev_link_ok, orig_lsa;
650 	unsigned int		 ifindex;
651 
652 	if (event & EV_READ) {
653 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
654 			fatal("imsg_read error");
655 		if (n == 0)	/* connection closed */
656 			shut = 1;
657 	}
658 	if (event & EV_WRITE) {
659 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
660 			fatal("msgbuf_write");
661 		if (n == 0)	/* connection closed */
662 			shut = 1;
663 	}
664 
665 	for (;;) {
666 		if ((n = imsg_get(ibuf, &imsg)) == -1)
667 			fatal("rde_dispatch_parent: imsg_get error");
668 		if (n == 0)
669 			break;
670 
671 		switch (imsg.hdr.type) {
672 		case IMSG_NETWORK_ADD:
673 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
674 				log_warnx("rde_dispatch_parent: "
675 				    "wrong imsg len");
676 				break;
677 			}
678 			memcpy(&kr, imsg.data, sizeof(kr));
679 
680 			if ((lsa = rde_asext_get(&kr)) != NULL) {
681 				v = lsa_find(NULL, lsa->hdr.type,
682 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
683 
684 				lsa_merge(nbrself, lsa, v);
685 			}
686 			break;
687 		case IMSG_NETWORK_DEL:
688 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
689 				log_warnx("rde_dispatch_parent: "
690 				    "wrong imsg len");
691 				break;
692 			}
693 			memcpy(&kr, imsg.data, sizeof(kr));
694 
695 			if ((lsa = rde_asext_put(&kr)) != NULL) {
696 				v = lsa_find(NULL, lsa->hdr.type,
697 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
698 
699 				/*
700 				 * if v == NULL no LSA is in the table and
701 				 * nothing has to be done.
702 				 */
703 				if (v)
704 					lsa_merge(nbrself, lsa, v);
705 				else
706 					free(lsa);
707 			}
708 			break;
709 		case IMSG_IFINFO:
710 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
711 			    sizeof(struct iface))
712 				fatalx("IFINFO imsg with wrong len");
713 
714 			ifp = imsg.data;
715 
716 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
717 				orig_lsa = 0;
718 				LIST_FOREACH(i, &area->iface_list, entry) {
719 					if (strcmp(i->dependon,
720 					    ifp->name) == 0) {
721 						i->depend_ok =
722 						    ifstate_is_up(ifp);
723 						if (ifstate_is_up(i))
724 							orig_lsa = 1;
725 					}
726 				}
727 				if (orig_lsa)
728 					orig_intra_area_prefix_lsas(area);
729 			}
730 
731 			if (!(ifp->cflags & F_IFACE_CONFIGURED))
732 				break;
733 			iface = if_find(ifp->ifindex);
734 			if (iface == NULL)
735 				fatalx("interface lost in rde");
736 
737 			prev_link_ok = (iface->flags & IFF_UP) &&
738 			    LINK_STATE_IS_UP(iface->linkstate);
739 
740 			if_update(iface, ifp->mtu, ifp->flags, ifp->if_type,
741 			    ifp->linkstate, ifp->baudrate, ifp->rdomain);
742 
743 			/* Resend LSAs if interface state changes. */
744 			link_ok = (iface->flags & IFF_UP) &&
745 			          LINK_STATE_IS_UP(iface->linkstate);
746 			if (prev_link_ok == link_ok)
747 				break;
748 
749 			orig_intra_area_prefix_lsas(iface->area);
750 
751 			break;
752 		case IMSG_IFADD:
753 			if ((iface = malloc(sizeof(struct iface))) == NULL)
754 				fatal(NULL);
755 			memcpy(iface, imsg.data, sizeof(struct iface));
756 
757 			LIST_INIT(&iface->nbr_list);
758 			TAILQ_INIT(&iface->ls_ack_list);
759 			RB_INIT(&iface->lsa_tree);
760 
761 			LIST_INSERT_HEAD(&iface->area->iface_list, iface, entry);
762 			break;
763 		case IMSG_IFDELETE:
764 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
765 			    sizeof(ifindex))
766 				fatalx("IFDELETE imsg with wrong len");
767 
768 			memcpy(&ifindex, imsg.data, sizeof(ifindex));
769 			iface = if_find(ifindex);
770 			if (iface == NULL)
771 				fatalx("interface lost in rde");
772 
773 			LIST_REMOVE(iface, entry);
774 			if_del(iface);
775 			break;
776 		case IMSG_IFADDRNEW:
777 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
778 			    sizeof(struct ifaddrchange))
779 				fatalx("IFADDRNEW imsg with wrong len");
780 			ifc = imsg.data;
781 
782 			iface = if_find(ifc->ifindex);
783 			if (iface == NULL)
784 				fatalx("IFADDRNEW interface lost in rde");
785 
786 			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
787 			    NULL)
788 				fatal("rde_dispatch_parent IFADDRNEW");
789 			ia->addr = ifc->addr;
790 			ia->dstbrd = ifc->dstbrd;
791 			ia->prefixlen = ifc->prefixlen;
792 
793 			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
794 			if (iface->area)
795 				orig_intra_area_prefix_lsas(iface->area);
796 			break;
797 		case IMSG_IFADDRDEL:
798 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
799 			    sizeof(struct ifaddrchange))
800 				fatalx("IFADDRDEL imsg with wrong len");
801 			ifc = imsg.data;
802 
803 			iface = if_find(ifc->ifindex);
804 			if (iface == NULL)
805 				fatalx("IFADDRDEL interface lost in rde");
806 
807 			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
808 			    ia = nia) {
809 				nia = TAILQ_NEXT(ia, entry);
810 
811 				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
812 				    &ifc->addr)) {
813 					TAILQ_REMOVE(&iface->ifa_list, ia,
814 					    entry);
815 					free(ia);
816 					break;
817 				}
818 			}
819 			if (iface->area)
820 				orig_intra_area_prefix_lsas(iface->area);
821 			break;
822 		case IMSG_RECONF_CONF:
823 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
824 			    NULL)
825 				fatal(NULL);
826 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
827 
828 			LIST_INIT(&nconf->area_list);
829 			LIST_INIT(&nconf->cand_list);
830 			break;
831 		case IMSG_RECONF_AREA:
832 			if ((narea = area_new()) == NULL)
833 				fatal(NULL);
834 			memcpy(narea, imsg.data, sizeof(struct area));
835 
836 			LIST_INIT(&narea->iface_list);
837 			LIST_INIT(&narea->nbr_list);
838 			RB_INIT(&narea->lsa_tree);
839 
840 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
841 			break;
842 		case IMSG_RECONF_END:
843 			merge_config(rdeconf, nconf);
844 			nconf = NULL;
845 			break;
846 		default:
847 			log_debug("rde_dispatch_parent: unexpected imsg %d",
848 			    imsg.hdr.type);
849 			break;
850 		}
851 		imsg_free(&imsg);
852 	}
853 	if (!shut)
854 		imsg_event_add(iev);
855 	else {
856 		/* this pipe is dead, so remove the event handler */
857 		event_del(&iev->ev);
858 		event_loopexit(NULL);
859 	}
860 }
861 
862 void
863 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
864 {
865 	struct iface	*iface;
866 
867 	/* dump header */
868 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
869 	    area, sizeof(*area));
870 
871 	/* dump link local lsa */
872 	LIST_FOREACH(iface, &area->iface_list, entry) {
873 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
874 		    0, pid, -1, iface, sizeof(*iface));
875 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
876 	}
877 
878 	/* dump area lsa */
879 	lsa_dump(&area->lsa_tree, imsg_type, pid);
880 }
881 
882 u_int32_t
883 rde_router_id(void)
884 {
885 	return (rdeconf->rtr_id.s_addr);
886 }
887 
888 void
889 rde_send_change_kroute(struct rt_node *r)
890 {
891 	int			 krcount = 0;
892 	struct kroute		 kr;
893 	struct rt_nexthop	*rn;
894 	struct ibuf		*wbuf;
895 
896 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
897 	    sizeof(kr))) == NULL) {
898 		return;
899 	}
900 
901 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
902 		if (rn->invalid)
903 			continue;
904 		krcount++;
905 
906 		bzero(&kr, sizeof(kr));
907 		kr.prefix = r->prefix;
908 		kr.nexthop = rn->nexthop;
909 		if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
910 		    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
911 			kr.scope = rn->ifindex;
912 		kr.ifindex = rn->ifindex;
913 		kr.prefixlen = r->prefixlen;
914 		kr.ext_tag = r->ext_tag;
915 		imsg_add(wbuf, &kr, sizeof(kr));
916 	}
917 	if (krcount == 0)
918 		fatalx("rde_send_change_kroute: no valid nexthop found");
919 
920 	imsg_close(&iev_main->ibuf, wbuf);
921 	imsg_event_add(iev_main);
922 }
923 
924 void
925 rde_send_delete_kroute(struct rt_node *r)
926 {
927 	struct kroute	 kr;
928 
929 	bzero(&kr, sizeof(kr));
930 	kr.prefix = r->prefix;
931 	kr.prefixlen = r->prefixlen;
932 
933 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
934 	    &kr, sizeof(kr));
935 }
936 
937 void
938 rde_send_summary(pid_t pid)
939 {
940 	static struct ctl_sum	 sumctl;
941 	struct timeval		 now;
942 	struct area		*area;
943 	struct vertex		*v;
944 
945 	bzero(&sumctl, sizeof(struct ctl_sum));
946 
947 	sumctl.rtr_id.s_addr = rde_router_id();
948 	sumctl.spf_delay = rdeconf->spf_delay;
949 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
950 
951 	LIST_FOREACH(area, &rdeconf->area_list, entry)
952 		sumctl.num_area++;
953 
954 	RB_FOREACH(v, lsa_tree, &asext_tree)
955 		sumctl.num_ext_lsa++;
956 
957 	gettimeofday(&now, NULL);
958 	if (rdeconf->uptime < now.tv_sec)
959 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
960 	else
961 		sumctl.uptime = 0;
962 
963 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
964 	    sizeof(sumctl));
965 }
966 
967 void
968 rde_send_summary_area(struct area *area, pid_t pid)
969 {
970 	static struct ctl_sum_area	 sumareactl;
971 	struct iface			*iface;
972 	struct rde_nbr			*nbr;
973 	struct lsa_tree			*tree = &area->lsa_tree;
974 	struct vertex			*v;
975 
976 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
977 
978 	sumareactl.area.s_addr = area->id.s_addr;
979 	sumareactl.num_spf_calc = area->num_spf_calc;
980 
981 	LIST_FOREACH(iface, &area->iface_list, entry)
982 		sumareactl.num_iface++;
983 
984 	LIST_FOREACH(nbr, &area->nbr_list, entry)
985 		if (nbr->state == NBR_STA_FULL && !nbr->self)
986 			sumareactl.num_adj_nbr++;
987 
988 	RB_FOREACH(v, lsa_tree, tree)
989 		sumareactl.num_lsa++;
990 
991 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
992 	    sizeof(sumareactl));
993 }
994 
995 LIST_HEAD(rde_nbr_head, rde_nbr);
996 
997 struct nbr_table {
998 	struct rde_nbr_head	*hashtbl;
999 	u_int32_t		 hashmask;
1000 } rdenbrtable;
1001 
1002 #define RDE_NBR_HASH(x)		\
1003 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
1004 
1005 void
1006 rde_nbr_init(u_int32_t hashsize)
1007 {
1008 	struct rde_nbr_head	*head;
1009 	u_int32_t		 hs, i;
1010 
1011 	for (hs = 1; hs < hashsize; hs <<= 1)
1012 		;
1013 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
1014 	if (rdenbrtable.hashtbl == NULL)
1015 		fatal("rde_nbr_init");
1016 
1017 	for (i = 0; i < hs; i++)
1018 		LIST_INIT(&rdenbrtable.hashtbl[i]);
1019 
1020 	rdenbrtable.hashmask = hs - 1;
1021 
1022 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
1023 		fatal("rde_nbr_init");
1024 
1025 	nbrself->id.s_addr = rde_router_id();
1026 	nbrself->peerid = NBR_IDSELF;
1027 	nbrself->state = NBR_STA_DOWN;
1028 	nbrself->self = 1;
1029 	head = RDE_NBR_HASH(NBR_IDSELF);
1030 	LIST_INSERT_HEAD(head, nbrself, hash);
1031 }
1032 
1033 void
1034 rde_nbr_free(void)
1035 {
1036 	free(nbrself);
1037 	free(rdenbrtable.hashtbl);
1038 }
1039 
1040 struct rde_nbr *
1041 rde_nbr_find(u_int32_t peerid)
1042 {
1043 	struct rde_nbr_head	*head;
1044 	struct rde_nbr		*nbr;
1045 
1046 	head = RDE_NBR_HASH(peerid);
1047 
1048 	LIST_FOREACH(nbr, head, hash) {
1049 		if (nbr->peerid == peerid)
1050 			return (nbr);
1051 	}
1052 
1053 	return (NULL);
1054 }
1055 
1056 struct rde_nbr *
1057 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1058 {
1059 	struct rde_nbr_head	*head;
1060 	struct rde_nbr		*nbr;
1061 	struct area		*area;
1062 	struct iface		*iface;
1063 
1064 	if (rde_nbr_find(peerid))
1065 		return (NULL);
1066 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1067 		fatalx("rde_nbr_new: unknown area");
1068 
1069 	if ((iface = if_find(new->ifindex)) == NULL)
1070 		fatalx("rde_nbr_new: unknown interface");
1071 
1072 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1073 		fatal("rde_nbr_new");
1074 
1075 	memcpy(nbr, new, sizeof(*nbr));
1076 	nbr->peerid = peerid;
1077 	nbr->area = area;
1078 	nbr->iface = iface;
1079 
1080 	TAILQ_INIT(&nbr->req_list);
1081 
1082 	head = RDE_NBR_HASH(peerid);
1083 	LIST_INSERT_HEAD(head, nbr, hash);
1084 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1085 
1086 	return (nbr);
1087 }
1088 
1089 void
1090 rde_nbr_del(struct rde_nbr *nbr)
1091 {
1092 	if (nbr == NULL)
1093 		return;
1094 
1095 	rde_req_list_free(nbr);
1096 
1097 	LIST_REMOVE(nbr, entry);
1098 	LIST_REMOVE(nbr, hash);
1099 
1100 	free(nbr);
1101 }
1102 
1103 int
1104 rde_nbr_loading(struct area *area)
1105 {
1106 	struct rde_nbr		*nbr;
1107 	int			 checkall = 0;
1108 
1109 	if (area == NULL) {
1110 		area = LIST_FIRST(&rdeconf->area_list);
1111 		checkall = 1;
1112 	}
1113 
1114 	while (area != NULL) {
1115 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1116 			if (nbr->self)
1117 				continue;
1118 			if (nbr->state & NBR_STA_XCHNG ||
1119 			    nbr->state & NBR_STA_LOAD)
1120 				return (1);
1121 		}
1122 		if (!checkall)
1123 			break;
1124 		area = LIST_NEXT(area, entry);
1125 	}
1126 
1127 	return (0);
1128 }
1129 
1130 struct rde_nbr *
1131 rde_nbr_self(struct area *area)
1132 {
1133 	struct rde_nbr		*nbr;
1134 
1135 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1136 		if (nbr->self)
1137 			return (nbr);
1138 
1139 	/* this may not happen */
1140 	fatalx("rde_nbr_self: area without self");
1141 	return (NULL);
1142 }
1143 
1144 /*
1145  * LSA req list
1146  */
1147 void
1148 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1149 {
1150 	struct rde_req_entry	*le;
1151 
1152 	if ((le = calloc(1, sizeof(*le))) == NULL)
1153 		fatal("rde_req_list_add");
1154 
1155 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1156 	le->type = lsa->type;
1157 	le->ls_id = lsa->ls_id;
1158 	le->adv_rtr = lsa->adv_rtr;
1159 }
1160 
1161 int
1162 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1163 {
1164 	struct rde_req_entry	*le;
1165 
1166 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1167 		if ((lsa_hdr->type == le->type) &&
1168 		    (lsa_hdr->ls_id == le->ls_id) &&
1169 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1170 			return (1);
1171 	}
1172 	return (0);
1173 }
1174 
1175 void
1176 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1177 {
1178 	struct rde_req_entry	*le;
1179 
1180 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1181 		if ((lsa_hdr->type == le->type) &&
1182 		    (lsa_hdr->ls_id == le->ls_id) &&
1183 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1184 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1185 			free(le);
1186 			return;
1187 		}
1188 	}
1189 }
1190 
1191 void
1192 rde_req_list_free(struct rde_nbr *nbr)
1193 {
1194 	struct rde_req_entry	*le;
1195 
1196 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1197 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1198 		free(le);
1199 	}
1200 }
1201 
1202 /*
1203  * as-external LSA handling
1204  */
1205 struct lsa *
1206 rde_asext_get(struct kroute *kr)
1207 {
1208 	struct area		*area;
1209 	struct iface		*iface;
1210 	struct iface_addr	*ia;
1211 	struct in6_addr		 addr;
1212 
1213 	LIST_FOREACH(area, &rdeconf->area_list, entry)
1214 		LIST_FOREACH(iface, &area->iface_list, entry)
1215 			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1216 				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1217 					continue;
1218 
1219 				inet6applymask(&addr, &ia->addr,
1220 				    kr->prefixlen);
1221 				if (!memcmp(&addr, &kr->prefix,
1222 				    sizeof(addr)) && kr->prefixlen ==
1223 				    ia->prefixlen) {
1224 					/* already announced as Prefix LSA */
1225 					log_debug("rde_asext_get: %s/%d is "
1226 					    "part of prefix LSA",
1227 					    log_in6addr(&kr->prefix),
1228 					    kr->prefixlen);
1229 					return (NULL);
1230 				}
1231 			}
1232 
1233 	/* update of seqnum is done by lsa_merge */
1234 	return (orig_asext_lsa(kr, DEFAULT_AGE));
1235 }
1236 
1237 struct lsa *
1238 rde_asext_put(struct kroute *kr)
1239 {
1240 	/*
1241 	 * just try to remove the LSA. If the prefix is announced as
1242 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1243 	 */
1244 
1245 	/* remove by reflooding with MAX_AGE */
1246 	return (orig_asext_lsa(kr, MAX_AGE));
1247 }
1248 
1249 /*
1250  * summary LSA stuff
1251  */
1252 void
1253 rde_summary_update(struct rt_node *rte, struct area *area)
1254 {
1255 	struct vertex		*v = NULL;
1256 //XXX	struct lsa		*lsa;
1257 	u_int16_t		 type = 0;
1258 
1259 	/* first check if we actually need to announce this route */
1260 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1261 		return;
1262 	/* never create summaries for as-ext LSA */
1263 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1264 		return;
1265 	/* no need for summary LSA in the originating area */
1266 	if (rte->area.s_addr == area->id.s_addr)
1267 		return;
1268 	/* no need to originate inter-area routes to the backbone */
1269 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1270 		return;
1271 	/* TODO nexthop check, nexthop part of area -> no summary */
1272 	if (rte->cost >= LS_INFINITY)
1273 		return;
1274 	/* TODO AS border router specific checks */
1275 	/* TODO inter-area network route stuff */
1276 	/* TODO intra-area stuff -- condense LSA ??? */
1277 
1278 	if (rte->d_type == DT_NET) {
1279 		type = LSA_TYPE_INTER_A_PREFIX;
1280 	} else if (rte->d_type == DT_RTR) {
1281 		type = LSA_TYPE_INTER_A_ROUTER;
1282 	} else
1283 
1284 #if 0 /* XXX a lot todo */
1285 	/* update lsa but only if it was changed */
1286 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1287 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1288 	lsa_merge(rde_nbr_self(area), lsa, v);
1289 
1290 	if (v == NULL)
1291 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1292 #endif
1293 
1294 	/* suppressed/deleted routes are not found in the second lsa_find */
1295 	if (v)
1296 		v->cost = rte->cost;
1297 }
1298 
1299 /*
1300  * Functions for self-originated LSAs
1301  */
1302 
1303 /* Prefix LSAs have variable size. We have to be careful to copy the right
1304  * amount of bytes, and to realloc() the right amount of memory. */
1305 void
1306 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1307 {
1308 	struct lsa_prefix	*copy;
1309 	unsigned int		 lsa_prefix_len;
1310 	unsigned int		 new_len;
1311 	char			*new_lsa;
1312 
1313 	lsa_prefix_len = sizeof(struct lsa_prefix)
1314 	    + LSA_PREFIXSIZE(prefix->prefixlen);
1315 
1316 	new_len = *len + lsa_prefix_len;
1317 
1318 	/* Make sure we have enough space for this prefix. */
1319 	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1320 		fatalx("append_prefix_lsa");
1321 
1322 	/* Append prefix to LSA. */
1323 	copy = (struct lsa_prefix *)(new_lsa + *len);
1324 	memcpy(copy, prefix, lsa_prefix_len);
1325 
1326 	*lsa = (struct lsa *)new_lsa;
1327 	*len = new_len;
1328 }
1329 
1330 int
1331 prefix_compare(struct prefix_node *a, struct prefix_node *b)
1332 {
1333 	struct lsa_prefix	*p;
1334 	struct lsa_prefix	*q;
1335 	int			 i;
1336 	int			 len;
1337 
1338 	p = a->prefix;
1339 	q = b->prefix;
1340 
1341 	len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1342 
1343 	i = memcmp(p + 1, q + 1, len);
1344 	if (i)
1345 		return (i);
1346 	if (p->prefixlen < q->prefixlen)
1347 		return (-1);
1348 	if (p->prefixlen > q->prefixlen)
1349 		return (1);
1350 	return (0);
1351 }
1352 
1353 void
1354 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1355 {
1356 	struct prefix_node	*old;
1357 	struct prefix_node	*new;
1358 	struct in6_addr		 addr;
1359 	unsigned int		 len;
1360 	unsigned int		 i;
1361 	char			*cur_prefix;
1362 
1363 	cur_prefix = (char *)(lsa + 1);
1364 
1365 	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1366 		if ((new = calloc(1, sizeof(*new))) == NULL)
1367 			fatal("prefix_tree_add");
1368 		new->prefix = (struct lsa_prefix *)cur_prefix;
1369 
1370 		len = sizeof(*new->prefix)
1371 		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1372 
1373 		bzero(&addr, sizeof(addr));
1374 		memcpy(&addr, new->prefix + 1,
1375 		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1376 
1377 		new->prefix->metric = 0;
1378 
1379 		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1380 		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1381 		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1382 			old = RB_INSERT(prefix_tree, tree, new);
1383 			if (old != NULL) {
1384 				old->prefix->options |= new->prefix->options;
1385 				free(new);
1386 			}
1387 		} else
1388 			free(new);
1389 
1390 		cur_prefix = cur_prefix + len;
1391 	}
1392 }
1393 
1394 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1395 
1396 struct lsa *
1397 orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1398 {
1399 	struct lsa		*lsa;
1400 	struct vertex		*v;
1401 	struct rde_nbr		*nbr;
1402 	struct prefix_node	*node;
1403 	struct prefix_tree	 tree;
1404 	int			 num_full_nbr;
1405 	u_int16_t		 len;
1406 	u_int16_t		 numprefix;
1407 
1408 	log_debug("orig_intra_lsa_net: area %s, interface %s",
1409 	    inet_ntoa(area->id), iface->name);
1410 
1411 	RB_INIT(&tree);
1412 
1413 	if (iface->state & IF_STA_DR) {
1414 		num_full_nbr = 0;
1415 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1416 			if (nbr->self ||
1417 			    nbr->iface->ifindex != iface->ifindex ||
1418 			    (nbr->state & NBR_STA_FULL) == 0)
1419 				continue;
1420 			num_full_nbr++;
1421 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1422 			    htonl(nbr->iface_id), nbr->id.s_addr);
1423 			if (v)
1424 				prefix_tree_add(&tree, &v->lsa->data.link);
1425 		}
1426 		if (num_full_nbr == 0) {
1427 			/* There are no adjacent neighbors on link.
1428 			 * If a copy of this LSA already exists in DB,
1429 			 * it needs to be flushed. orig_intra_lsa_rtr()
1430 			 * will take care of prefixes configured on
1431 			 * this interface. */
1432 			if (!old)
1433 				return NULL;
1434 		} else {
1435 			/* Add our own prefixes configured for this link. */
1436 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1437 			    htonl(iface->ifindex), rde_router_id());
1438 			if (v)
1439 				prefix_tree_add(&tree, &v->lsa->data.link);
1440 		}
1441 	/* Continue only if a copy of this LSA already exists in DB.
1442 	 * It needs to be flushed. */
1443 	} else if (!old)
1444 		return NULL;
1445 
1446 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1447 	if ((lsa = calloc(1, len)) == NULL)
1448 		fatal("orig_intra_lsa_net");
1449 
1450 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1451 	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1452 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1453 
1454 	numprefix = 0;
1455 	RB_FOREACH(node, prefix_tree, &tree) {
1456 		append_prefix_lsa(&lsa, &len, node->prefix);
1457 		numprefix++;
1458 	}
1459 
1460 	lsa->data.pref_intra.numprefix = htons(numprefix);
1461 
1462 	while (!RB_EMPTY(&tree))
1463 		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1464 
1465 	/* LSA header */
1466 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1467 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1468 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1469 	lsa->hdr.ls_id = htonl(iface->ifindex);
1470 	lsa->hdr.adv_rtr = rde_router_id();
1471 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1472 	lsa->hdr.len = htons(len);
1473 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1474 
1475 	return lsa;
1476 }
1477 
1478 struct lsa *
1479 orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1480 {
1481 	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1482 				    + sizeof(struct in6_addr)];
1483 	struct lsa		*lsa;
1484 	struct lsa_prefix	*lsa_prefix;
1485 	struct in6_addr		*prefix;
1486 	struct iface		*iface;
1487 	struct iface_addr	*ia;
1488 	struct rde_nbr		*nbr;
1489 	u_int16_t		 len;
1490 	u_int16_t		 numprefix;
1491 
1492 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1493 	if ((lsa = calloc(1, len)) == NULL)
1494 		fatal("orig_intra_lsa_rtr");
1495 
1496 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1497 	lsa->data.pref_intra.ref_ls_id = 0;
1498 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1499 
1500 	numprefix = 0;
1501 	LIST_FOREACH(iface, &area->iface_list, entry) {
1502 		if (!((iface->flags & IFF_UP) &&
1503 		    LINK_STATE_IS_UP(iface->linkstate)) &&
1504 		    !(iface->if_type == IFT_CARP))
1505 			/* interface or link state down
1506 			 * and not a carp interface */
1507 			continue;
1508 
1509 		if (iface->if_type == IFT_CARP &&
1510 		    (iface->linkstate == LINK_STATE_UNKNOWN ||
1511 		    iface->linkstate == LINK_STATE_INVALID))
1512 			/* carp interface in state invalid or unknown */
1513 			continue;
1514 
1515 		if ((iface->state & IF_STA_DOWN) &&
1516 		    !(iface->cflags & F_IFACE_PASSIVE))
1517 			/* passive interfaces stay in state DOWN */
1518 			continue;
1519 
1520 		/* Broadcast links with adjacencies are handled
1521 		 * by orig_intra_lsa_net(), ignore. */
1522 		if (iface->type == IF_TYPE_BROADCAST ||
1523 		    iface->type == IF_TYPE_NBMA) {
1524 			if (iface->state & IF_STA_WAITING)
1525 				/* Skip, we're still waiting for
1526 				 * adjacencies to form. */
1527 				continue;
1528 
1529 			LIST_FOREACH(nbr, &area->nbr_list, entry)
1530 				if (!nbr->self &&
1531 				    nbr->iface->ifindex == iface->ifindex &&
1532 				    nbr->state & NBR_STA_FULL)
1533 					break;
1534 			if (nbr)
1535 				continue;
1536 		}
1537 
1538 		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1539 
1540 		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1541 			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1542 				continue;
1543 
1544 			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1545 
1546 			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1547 			    iface->state & IF_STA_LOOPBACK) {
1548 				lsa_prefix->prefixlen = 128;
1549 				lsa_prefix->metric = 0;
1550 			} else if ((iface->if_type == IFT_CARP &&
1551 				   iface->linkstate == LINK_STATE_DOWN) ||
1552 				   !(iface->depend_ok)) {
1553 				/* carp interfaces in state backup are
1554 				 * announced with high metric for faster
1555 				 * failover. */
1556 				lsa_prefix->prefixlen = ia->prefixlen;
1557 				lsa_prefix->metric = MAX_METRIC;
1558 			} else {
1559 				lsa_prefix->prefixlen = ia->prefixlen;
1560 				lsa_prefix->metric = htons(iface->metric);
1561 			}
1562 
1563 			if (lsa_prefix->prefixlen == 128)
1564 				lsa_prefix->options |= OSPF_PREFIX_LA;
1565 
1566 			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1567 			    "%s/%d, metric %d", inet_ntoa(area->id),
1568 			    iface->name, log_in6addr(&ia->addr),
1569 			    lsa_prefix->prefixlen, ntohs(lsa_prefix->metric));
1570 
1571 			prefix = (struct in6_addr *)(lsa_prefix + 1);
1572 			inet6applymask(prefix, &ia->addr,
1573 			    lsa_prefix->prefixlen);
1574 			append_prefix_lsa(&lsa, &len, lsa_prefix);
1575 			numprefix++;
1576 		}
1577 
1578 		/* TOD: Add prefixes of directly attached hosts, too */
1579 		/* TOD: Add prefixes for virtual links */
1580 	}
1581 
1582 	/* If no prefixes were included, continue only if a copy of this
1583 	 * LSA already exists in DB. It needs to be flushed. */
1584 	if (numprefix == 0 && !old) {
1585 		free(lsa);
1586 		return NULL;
1587 	}
1588 
1589 	lsa->data.pref_intra.numprefix = htons(numprefix);
1590 
1591 	/* LSA header */
1592 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1593 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1594 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1595 	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1596 	lsa->hdr.adv_rtr = rde_router_id();
1597 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1598 	lsa->hdr.len = htons(len);
1599 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1600 
1601 	return lsa;
1602 }
1603 
1604 void
1605 orig_intra_area_prefix_lsas(struct area *area)
1606 {
1607 	struct lsa	*lsa;
1608 	struct vertex	*old;
1609 	struct iface	*iface;
1610 
1611 	LIST_FOREACH(iface, &area->iface_list, entry) {
1612 		if (iface->type == IF_TYPE_BROADCAST ||
1613 		    iface->type == IF_TYPE_NBMA) {
1614 			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1615 			    htonl(iface->ifindex), rde_router_id());
1616 			lsa = orig_intra_lsa_net(area, iface, old);
1617 			if (lsa)
1618 				lsa_merge(rde_nbr_self(area), lsa, old);
1619 		}
1620 	}
1621 
1622 	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1623 		htonl(LS_ID_INTRA_RTR), rde_router_id());
1624 	lsa = orig_intra_lsa_rtr(area, old);
1625 	if (lsa)
1626 		lsa_merge(rde_nbr_self(area), lsa, old);
1627 }
1628 
1629 int
1630 comp_asext(struct lsa *a, struct lsa *b)
1631 {
1632 	/* compare prefixes, if they are equal or not */
1633 	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1634 		return (-1);
1635 	return (memcmp(
1636 	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1637 	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1638 	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1639 }
1640 
1641 struct lsa *
1642 orig_asext_lsa(struct kroute *kr, u_int16_t age)
1643 {
1644 	struct lsa	*lsa;
1645 	u_int32_t	 ext_tag;
1646 	u_int16_t	 len, ext_off;
1647 
1648 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1649 	    LSA_PREFIXSIZE(kr->prefixlen);
1650 
1651 	/*
1652 	 * nexthop -- on connected routes we are the nexthop,
1653 	 * on all other cases we should announce the true nexthop
1654 	 * unless that nexthop is outside of the ospf cloud.
1655 	 * XXX for now we don't do this.
1656 	 */
1657 
1658 	ext_off = len;
1659 	if (kr->ext_tag) {
1660 		len += sizeof(ext_tag);
1661 	}
1662 	if ((lsa = calloc(1, len)) == NULL)
1663 		fatal("orig_asext_lsa");
1664 
1665 	log_debug("orig_asext_lsa: %s/%d age %d",
1666 	    log_in6addr(&kr->prefix), kr->prefixlen, age);
1667 
1668 	/* LSA header */
1669 	lsa->hdr.age = htons(age);
1670 	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1671 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1672 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1673 	lsa->hdr.len = htons(len);
1674 
1675 	lsa->data.asext.prefix.prefixlen = kr->prefixlen;
1676 	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1677 	    &kr->prefix, LSA_PREFIXSIZE(kr->prefixlen));
1678 
1679 	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, comp_asext, lsa);
1680 
1681 	if (age == MAX_AGE) {
1682 		/* inherit metric and ext_tag from the current LSA,
1683 		 * some routers don't like to get withdraws that are
1684 		 * different from what they have in their table.
1685 		 */
1686 		struct vertex *v;
1687 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1688 		    lsa->hdr.adv_rtr);
1689 		if (v != NULL) {
1690 			kr->metric = ntohl(v->lsa->data.asext.metric);
1691 			if (kr->metric & LSA_ASEXT_T_FLAG) {
1692 				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1693 				    sizeof(ext_tag));
1694 				kr->ext_tag = ntohl(ext_tag);
1695 			}
1696 			kr->metric &= LSA_METRIC_MASK;
1697 		}
1698 	}
1699 
1700 	if (kr->ext_tag) {
1701 		lsa->data.asext.metric = htonl(kr->metric | LSA_ASEXT_T_FLAG);
1702 		ext_tag = htonl(kr->ext_tag);
1703 		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1704 	} else {
1705 		lsa->data.asext.metric = htonl(kr->metric);
1706 	}
1707 
1708 	lsa->hdr.ls_chksum = 0;
1709 	lsa->hdr.ls_chksum =
1710 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1711 
1712 	return (lsa);
1713 }
1714 
1715 struct lsa *
1716 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1717 {
1718 #if 0 /* XXX a lot todo */
1719 	struct lsa	*lsa;
1720 	u_int16_t	 len;
1721 
1722 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1723 	if ((lsa = calloc(1, len)) == NULL)
1724 		fatal("orig_sum_lsa");
1725 
1726 	/* LSA header */
1727 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1728 	lsa->hdr.type = type;
1729 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1730 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1731 	lsa->hdr.len = htons(len);
1732 
1733 	/* prefix and mask */
1734 	/*
1735 	 * TODO ls_id must be unique, for overlapping routes this may
1736 	 * not be true. In this case a hack needs to be done to
1737 	 * make the ls_id unique.
1738 	 */
1739 	lsa->hdr.ls_id = rte->prefix.s_addr;
1740 	if (type == LSA_TYPE_SUM_NETWORK)
1741 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1742 	else
1743 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1744 
1745 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1746 
1747 	lsa->hdr.ls_chksum = 0;
1748 	lsa->hdr.ls_chksum =
1749 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1750 
1751 	return (lsa);
1752 #endif
1753 	return NULL;
1754 }
1755