xref: /openbsd-src/usr.sbin/ospfd/rde.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: rde.c,v 1.110 2019/11/19 09:55:55 remi Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ospf.h"
36 #include "ospfd.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 void		 rde_sig_handler(int sig, short, void *);
42 __dead void	 rde_shutdown(void);
43 void		 rde_dispatch_imsg(int, short, void *);
44 void		 rde_dispatch_parent(int, short, void *);
45 void		 rde_dump_area(struct area *, int, pid_t);
46 
47 void		 rde_send_summary(pid_t);
48 void		 rde_send_summary_area(struct area *, pid_t);
49 void		 rde_nbr_init(u_int32_t);
50 void		 rde_nbr_free(void);
51 struct rde_nbr	*rde_nbr_find(u_int32_t);
52 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53 void		 rde_nbr_del(struct rde_nbr *);
54 
55 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_free(struct rde_nbr *);
59 
60 struct iface	*rde_asext_lookup(u_int32_t, int);
61 void		 rde_asext_get(struct kroute *);
62 void		 rde_asext_put(struct kroute *);
63 void		 rde_asext_free(void);
64 struct lsa	*orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t);
65 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66 
67 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
68 struct imsgev		*iev_ospfe;
69 struct imsgev		*iev_main;
70 struct rde_nbr		*nbrself;
71 struct lsa_tree		 asext_tree;
72 
73 /* ARGSUSED */
74 void
75 rde_sig_handler(int sig, short event, void *arg)
76 {
77 	/*
78 	 * signal handler rules don't apply, libevent decouples for us
79 	 */
80 
81 	switch (sig) {
82 	case SIGINT:
83 	case SIGTERM:
84 		rde_shutdown();
85 		/* NOTREACHED */
86 	default:
87 		fatalx("unexpected signal");
88 	}
89 }
90 
91 /* route decision engine */
92 pid_t
93 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
94     int pipe_parent2ospfe[2])
95 {
96 	struct event		 ev_sigint, ev_sigterm;
97 	struct timeval		 now;
98 	struct area		*area;
99 	struct iface		*iface;
100 	struct passwd		*pw;
101 	pid_t			 pid;
102 
103 	switch (pid = fork()) {
104 	case -1:
105 		fatal("cannot fork");
106 		/* NOTREACHED */
107 	case 0:
108 		break;
109 	default:
110 		return (pid);
111 	}
112 
113 	/* cleanup a bit */
114 	kif_clear();
115 
116 	rdeconf = xconf;
117 
118 	if ((pw = getpwnam(OSPFD_USER)) == NULL)
119 		fatal("getpwnam");
120 
121 	if (chroot(pw->pw_dir) == -1)
122 		fatal("chroot");
123 	if (chdir("/") == -1)
124 		fatal("chdir(\"/\")");
125 
126 	setproctitle("route decision engine");
127 	/*
128 	 * XXX needed with fork+exec
129 	 * log_init(debug, LOG_DAEMON);
130 	 * log_setverbose(verbose);
131 	 */
132 
133 	ospfd_process = PROC_RDE_ENGINE;
134 	log_procinit(log_procnames[ospfd_process]);
135 
136 	if (setgroups(1, &pw->pw_gid) ||
137 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
138 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
139 		fatal("can't drop privileges");
140 
141 	if (pledge("stdio", NULL) == -1)
142 		fatal("pledge");
143 
144 	event_init();
145 	rde_nbr_init(NBR_HASHSIZE);
146 	lsa_init(&asext_tree);
147 
148 	/* setup signal handler */
149 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
150 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
151 	signal_add(&ev_sigint, NULL);
152 	signal_add(&ev_sigterm, NULL);
153 	signal(SIGPIPE, SIG_IGN);
154 	signal(SIGHUP, SIG_IGN);
155 
156 	/* setup pipes */
157 	close(pipe_ospfe2rde[0]);
158 	close(pipe_parent2rde[0]);
159 	close(pipe_parent2ospfe[0]);
160 	close(pipe_parent2ospfe[1]);
161 
162 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
163 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
164 		fatal(NULL);
165 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
166 	iev_ospfe->handler = rde_dispatch_imsg;
167 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
168 	iev_main->handler = rde_dispatch_parent;
169 
170 	/* setup event handler */
171 	iev_ospfe->events = EV_READ;
172 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
173 	    iev_ospfe->handler, iev_ospfe);
174 	event_add(&iev_ospfe->ev, NULL);
175 
176 	iev_main->events = EV_READ;
177 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
178 	    iev_main->handler, iev_main);
179 	event_add(&iev_main->ev, NULL);
180 
181 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
182 	cand_list_init();
183 	rt_init();
184 
185 	/* remove unneeded stuff from config */
186 	LIST_FOREACH(area, &rdeconf->area_list, entry)
187 		LIST_FOREACH(iface, &area->iface_list, entry)
188 			md_list_clr(&iface->auth_md_list);
189 
190 	conf_clear_redist_list(&rdeconf->redist_list);
191 
192 	gettimeofday(&now, NULL);
193 	rdeconf->uptime = now.tv_sec;
194 
195 	event_dispatch();
196 
197 	rde_shutdown();
198 	/* NOTREACHED */
199 
200 	return (0);
201 }
202 
203 __dead void
204 rde_shutdown(void)
205 {
206 	struct area	*a;
207 	struct vertex	*v, *nv;
208 
209 	/* close pipes */
210 	msgbuf_clear(&iev_ospfe->ibuf.w);
211 	close(iev_ospfe->ibuf.fd);
212 	msgbuf_clear(&iev_main->ibuf.w);
213 	close(iev_main->ibuf.fd);
214 
215 	stop_spf_timer(rdeconf);
216 	cand_list_clr();
217 	rt_clear();
218 
219 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
220 		LIST_REMOVE(a, entry);
221 		area_del(a);
222 	}
223 	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
224 		nv = RB_NEXT(lsa_tree, &asext_tree, v);
225 		vertex_free(v);
226 	}
227 	rde_asext_free();
228 	rde_nbr_free();
229 
230 	free(iev_ospfe);
231 	free(iev_main);
232 	free(rdeconf);
233 
234 	log_info("route decision engine exiting");
235 	_exit(0);
236 }
237 
238 int
239 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
240     u_int16_t datalen)
241 {
242 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
243 	    data, datalen));
244 }
245 
246 /* ARGSUSED */
247 void
248 rde_dispatch_imsg(int fd, short event, void *bula)
249 {
250 	struct imsgev		*iev = bula;
251 	struct imsgbuf		*ibuf;
252 	struct imsg		 imsg;
253 	struct in_addr		 aid;
254 	struct ls_req_hdr	 req_hdr;
255 	struct lsa_hdr		 lsa_hdr, *db_hdr;
256 	struct rde_nbr		 rn, *nbr;
257 	struct timespec		 tp;
258 	struct lsa		*lsa;
259 	struct area		*area;
260 	struct in_addr		 addr;
261 	struct vertex		*v;
262 	char			*buf;
263 	ssize_t			 n;
264 	time_t			 now;
265 	int			 r, state, self, error, shut = 0, verbose;
266 	u_int16_t		 l;
267 
268 	ibuf = &iev->ibuf;
269 
270 	if (event & EV_READ) {
271 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
272 			fatal("imsg_read error");
273 		if (n == 0)	/* connection closed */
274 			shut = 1;
275 	}
276 	if (event & EV_WRITE) {
277 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
278 			fatal("msgbuf_write");
279 		if (n == 0)	/* connection closed */
280 			shut = 1;
281 	}
282 
283 	clock_gettime(CLOCK_MONOTONIC, &tp);
284 	now = tp.tv_sec;
285 
286 	for (;;) {
287 		if ((n = imsg_get(ibuf, &imsg)) == -1)
288 			fatal("rde_dispatch_imsg: imsg_get error");
289 		if (n == 0)
290 			break;
291 
292 		switch (imsg.hdr.type) {
293 		case IMSG_NEIGHBOR_UP:
294 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
295 				fatalx("invalid size of OE request");
296 			memcpy(&rn, imsg.data, sizeof(rn));
297 
298 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
299 				fatalx("rde_dispatch_imsg: "
300 				    "neighbor already exists");
301 			break;
302 		case IMSG_NEIGHBOR_DOWN:
303 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
304 			break;
305 		case IMSG_NEIGHBOR_ADDR:
306 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(addr))
307 				fatalx("invalid size of OE request");
308 			memcpy(&addr, imsg.data, sizeof(addr));
309 
310 			nbr = rde_nbr_find(imsg.hdr.peerid);
311 			if (nbr == NULL)
312 				break;
313 
314 			nbr->addr.s_addr = addr.s_addr;
315 			break;
316 		case IMSG_NEIGHBOR_CHANGE:
317 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
318 				fatalx("invalid size of OE request");
319 			memcpy(&state, imsg.data, sizeof(state));
320 
321 			nbr = rde_nbr_find(imsg.hdr.peerid);
322 			if (nbr == NULL)
323 				break;
324 
325 			nbr->state = state;
326 			if (nbr->state & NBR_STA_FULL)
327 				rde_req_list_free(nbr);
328 			break;
329 		case IMSG_NEIGHBOR_CAPA:
330 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t))
331 				fatalx("invalid size of OE request");
332 			nbr = rde_nbr_find(imsg.hdr.peerid);
333 			if (nbr == NULL)
334 				break;
335 			nbr->capa_options = *(u_int8_t *)imsg.data;
336 			break;
337 		case IMSG_AREA_CHANGE:
338 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
339 				fatalx("invalid size of OE request");
340 
341 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
342 				if (area->id.s_addr == imsg.hdr.peerid)
343 					break;
344 			}
345 			if (area == NULL)
346 				break;
347 			memcpy(&state, imsg.data, sizeof(state));
348 			area->active = state;
349 			break;
350 		case IMSG_DB_SNAPSHOT:
351 			nbr = rde_nbr_find(imsg.hdr.peerid);
352 			if (nbr == NULL)
353 				break;
354 
355 			lsa_snap(nbr);
356 
357 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
358 			    0, -1, NULL, 0);
359 			break;
360 		case IMSG_DD:
361 			nbr = rde_nbr_find(imsg.hdr.peerid);
362 			if (nbr == NULL)
363 				break;
364 
365 			buf = imsg.data;
366 			error = 0;
367 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
368 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
369 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
370 				buf += sizeof(lsa_hdr);
371 
372 				if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
373 				    nbr->area->stub) {
374 					error = 1;
375 					break;
376 				}
377 				v = lsa_find(nbr->iface, lsa_hdr.type,
378 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
379 				if (v == NULL)
380 					db_hdr = NULL;
381 				else
382 					db_hdr = &v->lsa->hdr;
383 
384 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
385 					/*
386 					 * only request LSAs that are
387 					 * newer or missing
388 					 */
389 					rde_req_list_add(nbr, &lsa_hdr);
390 					imsg_compose_event(iev_ospfe, IMSG_DD,
391 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
392 					    sizeof(lsa_hdr));
393 				}
394 			}
395 			if (l != 0 && !error)
396 				log_warnx("rde_dispatch_imsg: peerid %u, "
397 				    "trailing garbage in Database Description "
398 				    "packet", imsg.hdr.peerid);
399 
400 			if (!error)
401 				imsg_compose_event(iev_ospfe, IMSG_DD_END,
402 				    imsg.hdr.peerid, 0, -1, NULL, 0);
403 			else
404 				imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA,
405 				    imsg.hdr.peerid, 0, -1, NULL, 0);
406 			break;
407 		case IMSG_LS_REQ:
408 			nbr = rde_nbr_find(imsg.hdr.peerid);
409 			if (nbr == NULL)
410 				break;
411 
412 			buf = imsg.data;
413 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
414 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
415 				memcpy(&req_hdr, buf, sizeof(req_hdr));
416 				buf += sizeof(req_hdr);
417 
418 				if ((v = lsa_find(nbr->iface,
419 				    ntohl(req_hdr.type), req_hdr.ls_id,
420 				    req_hdr.adv_rtr)) == NULL) {
421 					log_debug("rde_dispatch_imsg: "
422 					    "requested LSA not found");
423 					imsg_compose_event(iev_ospfe,
424 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
425 					    0, -1, NULL, 0);
426 					continue;
427 				}
428 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
429 				    imsg.hdr.peerid, 0, -1, v->lsa,
430 				    ntohs(v->lsa->hdr.len));
431 			}
432 			if (l != 0)
433 				log_warnx("rde_dispatch_imsg: peerid %u, "
434 				    "trailing garbage in LS Request "
435 				    "packet", imsg.hdr.peerid);
436 			break;
437 		case IMSG_LS_UPD:
438 			nbr = rde_nbr_find(imsg.hdr.peerid);
439 			if (nbr == NULL)
440 				break;
441 
442 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
443 			if (lsa == NULL)
444 				fatal(NULL);
445 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
446 
447 			if (!lsa_check(nbr, lsa,
448 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
449 				free(lsa);
450 				break;
451 			}
452 
453 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
454 			    lsa->hdr.adv_rtr);
455 			if (v == NULL)
456 				db_hdr = NULL;
457 			else
458 				db_hdr = &v->lsa->hdr;
459 
460 			if (nbr->self) {
461 				lsa_merge(nbr, lsa, v);
462 				/* lsa_merge frees the right lsa */
463 				break;
464 			}
465 
466 			r = lsa_newer(&lsa->hdr, db_hdr);
467 			if (r > 0) {
468 				/* new LSA newer than DB */
469 				if (v && v->flooded &&
470 				    v->changed + MIN_LS_ARRIVAL >= now) {
471 					free(lsa);
472 					break;
473 				}
474 
475 				rde_req_list_del(nbr, &lsa->hdr);
476 
477 				if (!(self = lsa_self(nbr, lsa, v)))
478 					if (lsa_add(nbr, lsa))
479 						/* delayed lsa */
480 						break;
481 
482 				/* flood and perhaps ack LSA */
483 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
484 				    imsg.hdr.peerid, 0, -1, lsa,
485 				    ntohs(lsa->hdr.len));
486 
487 				/* reflood self originated LSA */
488 				if (self && v)
489 					imsg_compose_event(iev_ospfe,
490 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
491 					    v->lsa, ntohs(v->lsa->hdr.len));
492 				/* new LSA was not added so free it */
493 				if (self)
494 					free(lsa);
495 			} else if (r < 0) {
496 				/*
497 				 * point 6 of "The Flooding Procedure"
498 				 * We are violating the RFC here because
499 				 * it does not make sense to reset a session
500 				 * because an equal LSA is already in the table.
501 				 * Only if the LSA sent is older than the one
502 				 * in the table we should reset the session.
503 				 */
504 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
505 					imsg_compose_event(iev_ospfe,
506 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
507 					    0, -1, NULL, 0);
508 					free(lsa);
509 					break;
510 				}
511 
512 				/* lsa no longer needed */
513 				free(lsa);
514 
515 				/* new LSA older than DB */
516 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
517 				    ntohs(db_hdr->age) == MAX_AGE)
518 					/* seq-num wrap */
519 					break;
520 
521 				if (v->changed + MIN_LS_ARRIVAL >= now)
522 					break;
523 
524 				/* directly send current LSA, no ack */
525 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
526 				    imsg.hdr.peerid, 0, -1, v->lsa,
527 				    ntohs(v->lsa->hdr.len));
528 			} else {
529 				/* LSA equal send direct ack */
530 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
531 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
532 				    sizeof(lsa->hdr));
533 				free(lsa);
534 			}
535 			break;
536 		case IMSG_LS_MAXAGE:
537 			nbr = rde_nbr_find(imsg.hdr.peerid);
538 			if (nbr == NULL)
539 				break;
540 
541 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
542 			    sizeof(struct lsa_hdr))
543 				fatalx("invalid size of OE request");
544 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
545 
546 			if (rde_nbr_loading(nbr->area))
547 				break;
548 
549 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
550 			    lsa_hdr.adv_rtr);
551 			if (v == NULL)
552 				db_hdr = NULL;
553 			else
554 				db_hdr = &v->lsa->hdr;
555 
556 			/*
557 			 * only delete LSA if the one in the db is not newer
558 			 */
559 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
560 				lsa_del(nbr, &lsa_hdr);
561 			break;
562 		case IMSG_CTL_SHOW_DATABASE:
563 		case IMSG_CTL_SHOW_DB_EXT:
564 		case IMSG_CTL_SHOW_DB_NET:
565 		case IMSG_CTL_SHOW_DB_RTR:
566 		case IMSG_CTL_SHOW_DB_SELF:
567 		case IMSG_CTL_SHOW_DB_SUM:
568 		case IMSG_CTL_SHOW_DB_ASBR:
569 		case IMSG_CTL_SHOW_DB_OPAQ:
570 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
571 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
572 				log_warnx("rde_dispatch_imsg: wrong imsg len");
573 				break;
574 			}
575 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
576 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
577 					rde_dump_area(area, imsg.hdr.type,
578 					    imsg.hdr.pid);
579 				}
580 				lsa_dump(&asext_tree, imsg.hdr.type,
581 				    imsg.hdr.pid);
582 			} else {
583 				memcpy(&aid, imsg.data, sizeof(aid));
584 				if ((area = area_find(rdeconf, aid)) != NULL) {
585 					rde_dump_area(area, imsg.hdr.type,
586 					    imsg.hdr.pid);
587 					if (!area->stub)
588 						lsa_dump(&asext_tree,
589 						    imsg.hdr.type,
590 						    imsg.hdr.pid);
591 				}
592 			}
593 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
594 			    imsg.hdr.pid, -1, NULL, 0);
595 			break;
596 		case IMSG_CTL_SHOW_RIB:
597 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
598 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
599 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
600 
601 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
602 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
603 			}
604 			aid.s_addr = 0;
605 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
606 
607 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
608 			    imsg.hdr.pid, -1, NULL, 0);
609 			break;
610 		case IMSG_CTL_SHOW_SUM:
611 			rde_send_summary(imsg.hdr.pid);
612 			LIST_FOREACH(area, &rdeconf->area_list, entry)
613 				rde_send_summary_area(area, imsg.hdr.pid);
614 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
615 			    imsg.hdr.pid, -1, NULL, 0);
616 			break;
617 		case IMSG_CTL_LOG_VERBOSE:
618 			/* already checked by ospfe */
619 			memcpy(&verbose, imsg.data, sizeof(verbose));
620 			log_setverbose(verbose);
621 			break;
622 		default:
623 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
624 			    imsg.hdr.type);
625 			break;
626 		}
627 		imsg_free(&imsg);
628 	}
629 	if (!shut)
630 		imsg_event_add(iev);
631 	else {
632 		/* this pipe is dead, so remove the event handler */
633 		event_del(&iev->ev);
634 		event_loopexit(NULL);
635 	}
636 }
637 
638 /* ARGSUSED */
639 void
640 rde_dispatch_parent(int fd, short event, void *bula)
641 {
642 	static struct area	*narea;
643 	struct iface		*niface;
644 	struct imsg		 imsg;
645 	struct kroute		 rr;
646 	struct imsgev		*iev = bula;
647 	struct imsgbuf		*ibuf;
648 	struct redistribute	*nred;
649 	ssize_t			 n;
650 	int			 shut = 0;
651 
652 	ibuf = &iev->ibuf;
653 
654 	if (event & EV_READ) {
655 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
656 			fatal("imsg_read error");
657 		if (n == 0)	/* connection closed */
658 			shut = 1;
659 	}
660 	if (event & EV_WRITE) {
661 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
662 			fatal("msgbuf_write");
663 		if (n == 0)	/* connection closed */
664 			shut = 1;
665 	}
666 
667 	for (;;) {
668 		if ((n = imsg_get(ibuf, &imsg)) == -1)
669 			fatal("rde_dispatch_parent: imsg_get error");
670 		if (n == 0)
671 			break;
672 
673 		switch (imsg.hdr.type) {
674 		case IMSG_NETWORK_ADD:
675 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
676 				log_warnx("rde_dispatch_parent: "
677 				    "wrong imsg len");
678 				break;
679 			}
680 			memcpy(&rr, imsg.data, sizeof(rr));
681 			rde_asext_get(&rr);
682 			break;
683 		case IMSG_NETWORK_DEL:
684 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
685 				log_warnx("rde_dispatch_parent: "
686 				    "wrong imsg len");
687 				break;
688 			}
689 			memcpy(&rr, imsg.data, sizeof(rr));
690 			rde_asext_put(&rr);
691 			break;
692 		case IMSG_RECONF_CONF:
693 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
694 			    NULL)
695 				fatal(NULL);
696 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
697 
698 			LIST_INIT(&nconf->area_list);
699 			LIST_INIT(&nconf->cand_list);
700 			break;
701 		case IMSG_RECONF_AREA:
702 			if ((narea = area_new()) == NULL)
703 				fatal(NULL);
704 			memcpy(narea, imsg.data, sizeof(struct area));
705 
706 			LIST_INIT(&narea->iface_list);
707 			LIST_INIT(&narea->nbr_list);
708 			RB_INIT(&narea->lsa_tree);
709 			SIMPLEQ_INIT(&narea->redist_list);
710 
711 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
712 			break;
713 		case IMSG_RECONF_REDIST:
714 			if ((nred= malloc(sizeof(struct redistribute))) == NULL)
715 				fatal(NULL);
716 			memcpy(nred, imsg.data, sizeof(struct redistribute));
717 
718 			SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
719 			break;
720 		case IMSG_RECONF_IFACE:
721 			if ((niface = malloc(sizeof(struct iface))) == NULL)
722 				fatal(NULL);
723 			memcpy(niface, imsg.data, sizeof(struct iface));
724 
725 			LIST_INIT(&niface->nbr_list);
726 			TAILQ_INIT(&niface->ls_ack_list);
727 			TAILQ_INIT(&niface->auth_md_list);
728 			RB_INIT(&niface->lsa_tree);
729 
730 			niface->area = narea;
731 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
732 
733 			break;
734 		case IMSG_RECONF_END:
735 			merge_config(rdeconf, nconf);
736 			nconf = NULL;
737 			break;
738 		default:
739 			log_debug("rde_dispatch_parent: unexpected imsg %d",
740 			    imsg.hdr.type);
741 			break;
742 		}
743 		imsg_free(&imsg);
744 	}
745 	if (!shut)
746 		imsg_event_add(iev);
747 	else {
748 		/* this pipe is dead, so remove the event handler */
749 		event_del(&iev->ev);
750 		event_loopexit(NULL);
751 	}
752 }
753 
754 void
755 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
756 {
757 	struct iface	*iface;
758 
759 	/* dump header */
760 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
761 	    area, sizeof(*area));
762 
763 	/* dump link local lsa */
764 	LIST_FOREACH(iface, &area->iface_list, entry) {
765 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
766 		    0, pid, -1, iface, sizeof(*iface));
767 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
768 	}
769 
770 	/* dump area lsa */
771 	lsa_dump(&area->lsa_tree, imsg_type, pid);
772 }
773 
774 u_int32_t
775 rde_router_id(void)
776 {
777 	return (rdeconf->rtr_id.s_addr);
778 }
779 
780 struct area *
781 rde_backbone_area(void)
782 {
783 	struct in_addr	id;
784 
785 	id.s_addr = INADDR_ANY;
786 
787 	return (area_find(rdeconf, id));
788 }
789 
790 void
791 rde_send_change_kroute(struct rt_node *r)
792 {
793 	int			 krcount = 0;
794 	struct kroute		 kr;
795 	struct rt_nexthop	*rn;
796 	struct ibuf		*wbuf;
797 
798 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
799 	    sizeof(kr))) == NULL) {
800 		return;
801 	}
802 
803 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
804 		if (rn->invalid)
805 			continue;
806 		if (rn->connected)
807 			/* skip self-originated routes */
808 			continue;
809 		krcount++;
810 
811 		bzero(&kr, sizeof(kr));
812 		kr.prefix.s_addr = r->prefix.s_addr;
813 		kr.nexthop.s_addr = rn->nexthop.s_addr;
814 		kr.prefixlen = r->prefixlen;
815 		kr.ext_tag = r->ext_tag;
816 		imsg_add(wbuf, &kr, sizeof(kr));
817 	}
818 	if (krcount == 0) {
819 		/* no valid nexthop or self originated, so remove */
820 		ibuf_free(wbuf);
821 		rde_send_delete_kroute(r);
822 		return;
823 	}
824 	imsg_close(&iev_main->ibuf, wbuf);
825 	imsg_event_add(iev_main);
826 }
827 
828 void
829 rde_send_delete_kroute(struct rt_node *r)
830 {
831 	struct kroute	 kr;
832 
833 	bzero(&kr, sizeof(kr));
834 	kr.prefix.s_addr = r->prefix.s_addr;
835 	kr.prefixlen = r->prefixlen;
836 
837 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
838 	    &kr, sizeof(kr));
839 }
840 
841 void
842 rde_send_summary(pid_t pid)
843 {
844 	static struct ctl_sum	 sumctl;
845 	struct timeval		 now;
846 	struct area		*area;
847 	struct vertex		*v;
848 
849 	bzero(&sumctl, sizeof(struct ctl_sum));
850 
851 	sumctl.rtr_id.s_addr = rde_router_id();
852 	sumctl.spf_delay = rdeconf->spf_delay;
853 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
854 
855 	LIST_FOREACH(area, &rdeconf->area_list, entry)
856 		sumctl.num_area++;
857 
858 	RB_FOREACH(v, lsa_tree, &asext_tree) {
859 		sumctl.num_ext_lsa++;
860 		sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
861 	}
862 
863 	gettimeofday(&now, NULL);
864 	if (rdeconf->uptime < now.tv_sec)
865 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
866 	else
867 		sumctl.uptime = 0;
868 
869 	sumctl.rfc1583compat = rdeconf->rfc1583compat;
870 
871 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
872 	    sizeof(sumctl));
873 }
874 
875 void
876 rde_send_summary_area(struct area *area, pid_t pid)
877 {
878 	static struct ctl_sum_area	 sumareactl;
879 	struct iface			*iface;
880 	struct rde_nbr			*nbr;
881 	struct lsa_tree			*tree = &area->lsa_tree;
882 	struct vertex			*v;
883 
884 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
885 
886 	sumareactl.area.s_addr = area->id.s_addr;
887 	sumareactl.num_spf_calc = area->num_spf_calc;
888 
889 	LIST_FOREACH(iface, &area->iface_list, entry)
890 		sumareactl.num_iface++;
891 
892 	LIST_FOREACH(nbr, &area->nbr_list, entry)
893 		if (nbr->state == NBR_STA_FULL && !nbr->self)
894 			sumareactl.num_adj_nbr++;
895 
896 	RB_FOREACH(v, lsa_tree, tree) {
897 		sumareactl.num_lsa++;
898 		sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
899 	}
900 
901 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
902 	    sizeof(sumareactl));
903 }
904 
905 LIST_HEAD(rde_nbr_head, rde_nbr);
906 
907 struct nbr_table {
908 	struct rde_nbr_head	*hashtbl;
909 	u_int32_t		 hashmask;
910 } rdenbrtable;
911 
912 #define RDE_NBR_HASH(x)		\
913 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
914 
915 void
916 rde_nbr_init(u_int32_t hashsize)
917 {
918 	struct rde_nbr_head	*head;
919 	u_int32_t		 hs, i;
920 
921 	for (hs = 1; hs < hashsize; hs <<= 1)
922 		;
923 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
924 	if (rdenbrtable.hashtbl == NULL)
925 		fatal("rde_nbr_init");
926 
927 	for (i = 0; i < hs; i++)
928 		LIST_INIT(&rdenbrtable.hashtbl[i]);
929 
930 	rdenbrtable.hashmask = hs - 1;
931 
932 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
933 		fatal("rde_nbr_init");
934 
935 	nbrself->id.s_addr = rde_router_id();
936 	nbrself->peerid = NBR_IDSELF;
937 	nbrself->state = NBR_STA_DOWN;
938 	nbrself->self = 1;
939 	head = RDE_NBR_HASH(NBR_IDSELF);
940 	LIST_INSERT_HEAD(head, nbrself, hash);
941 }
942 
943 void
944 rde_nbr_free(void)
945 {
946 	free(nbrself);
947 	free(rdenbrtable.hashtbl);
948 }
949 
950 struct rde_nbr *
951 rde_nbr_find(u_int32_t peerid)
952 {
953 	struct rde_nbr_head	*head;
954 	struct rde_nbr		*nbr;
955 
956 	head = RDE_NBR_HASH(peerid);
957 
958 	LIST_FOREACH(nbr, head, hash) {
959 		if (nbr->peerid == peerid)
960 			return (nbr);
961 	}
962 
963 	return (NULL);
964 }
965 
966 struct rde_nbr *
967 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
968 {
969 	struct rde_nbr_head	*head;
970 	struct rde_nbr		*nbr;
971 	struct area		*area;
972 	struct iface		*iface;
973 
974 	if (rde_nbr_find(peerid))
975 		return (NULL);
976 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
977 		fatalx("rde_nbr_new: unknown area");
978 
979 	LIST_FOREACH(iface, &area->iface_list, entry) {
980 		if (iface->ifindex == new->ifindex)
981 			break;
982 	}
983 	if (iface == NULL)
984 		fatalx("rde_nbr_new: unknown interface");
985 
986 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
987 		fatal("rde_nbr_new");
988 
989 	memcpy(nbr, new, sizeof(*nbr));
990 	nbr->peerid = peerid;
991 	nbr->area = area;
992 	nbr->iface = iface;
993 
994 	TAILQ_INIT(&nbr->req_list);
995 
996 	head = RDE_NBR_HASH(peerid);
997 	LIST_INSERT_HEAD(head, nbr, hash);
998 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
999 
1000 	return (nbr);
1001 }
1002 
1003 void
1004 rde_nbr_iface_del(struct iface *iface)
1005 {
1006 	struct rde_nbr_head	*head;
1007 	struct rde_nbr		*nbr, *xnbr;
1008 	u_int32_t		 i;
1009 
1010 	for (i = 0; i <= rdenbrtable.hashmask; i++) {
1011 		head = &rdenbrtable.hashtbl[i];
1012 		LIST_FOREACH_SAFE(nbr, head, hash, xnbr) {
1013 			if (nbr->iface == iface)
1014 				rde_nbr_del(nbr);
1015 		}
1016 	}
1017 }
1018 
1019 void
1020 rde_nbr_del(struct rde_nbr *nbr)
1021 {
1022 	if (nbr == NULL)
1023 		return;
1024 
1025 	rde_req_list_free(nbr);
1026 
1027 	LIST_REMOVE(nbr, entry);
1028 	LIST_REMOVE(nbr, hash);
1029 
1030 	free(nbr);
1031 }
1032 
1033 int
1034 rde_nbr_loading(struct area *area)
1035 {
1036 	struct rde_nbr		*nbr;
1037 	int			 checkall = 0;
1038 
1039 	if (area == NULL) {
1040 		area = LIST_FIRST(&rdeconf->area_list);
1041 		checkall = 1;
1042 	}
1043 
1044 	while (area != NULL) {
1045 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1046 			if (nbr->self)
1047 				continue;
1048 			if (nbr->state & NBR_STA_XCHNG ||
1049 			    nbr->state & NBR_STA_LOAD)
1050 				return (1);
1051 		}
1052 		if (!checkall)
1053 			break;
1054 		area = LIST_NEXT(area, entry);
1055 	}
1056 
1057 	return (0);
1058 }
1059 
1060 struct rde_nbr *
1061 rde_nbr_self(struct area *area)
1062 {
1063 	struct rde_nbr		*nbr;
1064 
1065 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1066 		if (nbr->self)
1067 			return (nbr);
1068 
1069 	/* this may not happen */
1070 	fatalx("rde_nbr_self: area without self");
1071 	return (NULL);
1072 }
1073 
1074 /*
1075  * LSA req list
1076  */
1077 void
1078 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1079 {
1080 	struct rde_req_entry	*le;
1081 
1082 	if ((le = calloc(1, sizeof(*le))) == NULL)
1083 		fatal("rde_req_list_add");
1084 
1085 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1086 	le->type = lsa->type;
1087 	le->ls_id = lsa->ls_id;
1088 	le->adv_rtr = lsa->adv_rtr;
1089 }
1090 
1091 int
1092 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1093 {
1094 	struct rde_req_entry	*le;
1095 
1096 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1097 		if ((lsa_hdr->type == le->type) &&
1098 		    (lsa_hdr->ls_id == le->ls_id) &&
1099 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1100 			return (1);
1101 	}
1102 	return (0);
1103 }
1104 
1105 void
1106 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1107 {
1108 	struct rde_req_entry	*le;
1109 
1110 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1111 		if ((lsa_hdr->type == le->type) &&
1112 		    (lsa_hdr->ls_id == le->ls_id) &&
1113 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1114 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1115 			free(le);
1116 			return;
1117 		}
1118 	}
1119 }
1120 
1121 void
1122 rde_req_list_free(struct rde_nbr *nbr)
1123 {
1124 	struct rde_req_entry	*le;
1125 
1126 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1127 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1128 		free(le);
1129 	}
1130 }
1131 
1132 /*
1133  * as-external LSA handling
1134  */
1135 struct asext_node {
1136 	RB_ENTRY(asext_node)    entry;
1137 	struct kroute		r;
1138 	u_int32_t		ls_id;
1139 };
1140 
1141 static __inline int	asext_compare(struct asext_node *, struct asext_node *);
1142 struct asext_node	*asext_find(u_int32_t, u_int8_t);
1143 
1144 RB_HEAD(asext_tree, asext_node)		ast;
1145 RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare)
1146 RB_GENERATE(asext_tree, asext_node, entry, asext_compare)
1147 
1148 static __inline int
1149 asext_compare(struct asext_node *a, struct asext_node *b)
1150 {
1151 	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1152 		return (-1);
1153 	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1154 		return (1);
1155 	if (a->r.prefixlen < b->r.prefixlen)
1156 		return (-1);
1157 	if (a->r.prefixlen > b->r.prefixlen)
1158 		return (1);
1159 	return (0);
1160 }
1161 
1162 struct asext_node *
1163 asext_find(u_int32_t addr, u_int8_t prefixlen)
1164 {
1165 	struct asext_node	a;
1166 
1167 	a.r.prefix.s_addr = addr;
1168 	a.r.prefixlen = prefixlen;
1169 
1170 	return (RB_FIND(asext_tree, &ast, &a));
1171 }
1172 
1173 struct iface *
1174 rde_asext_lookup(u_int32_t prefix, int plen)
1175 {
1176 	struct area	*area;
1177 	struct iface	*iface;
1178 
1179 	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1180 		LIST_FOREACH(iface, &area->iface_list, entry) {
1181 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1182 			    (prefix & iface->mask.s_addr) && (plen == -1 ||
1183 			    iface->mask.s_addr == prefixlen2mask(plen)))
1184 				return (iface);
1185 		}
1186 	}
1187 	return (NULL);
1188 }
1189 
1190 void
1191 rde_asext_get(struct kroute *kr)
1192 {
1193 	struct asext_node	*an, *oan;
1194 	struct vertex		*v;
1195 	struct lsa		*lsa;
1196 	u_int32_t		 mask;
1197 
1198 	if (rde_asext_lookup(kr->prefix.s_addr, kr->prefixlen)) {
1199 		/* already announced as (stub) net LSA */
1200 		log_debug("rde_asext_get: %s/%d is net LSA",
1201 		    inet_ntoa(kr->prefix), kr->prefixlen);
1202 		return;
1203 	}
1204 
1205 	an = asext_find(kr->prefix.s_addr, kr->prefixlen);
1206 	if (an == NULL) {
1207 		if ((an = calloc(1, sizeof(*an))) == NULL)
1208 			fatal("rde_asext_get");
1209 		bcopy(kr, &an->r, sizeof(*kr));
1210 		an->ls_id = kr->prefix.s_addr;
1211 		RB_INSERT(asext_tree, &ast, an);
1212 	} else {
1213 		/* the bcopy does not change the lookup key so it is save */
1214 		bcopy(kr, &an->r, sizeof(*kr));
1215 	}
1216 
1217 	/*
1218 	 * ls_id must be unique, for overlapping routes this may
1219 	 * not be true. In this case a unique ls_id needs to be found.
1220 	 * The algorithm will change the ls_id of the less specific
1221 	 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24
1222 	 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16
1223 	 * will change the ls_id to 10.0.255.255 and see if that is unique.
1224 	 */
1225 	oan = an;
1226 	mask = prefixlen2mask(oan->r.prefixlen);
1227 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1228 	    rdeconf->rtr_id.s_addr);
1229 	while (v && v->lsa->data.asext.mask != mask) {
1230 		/* conflict needs to be resolved. change less specific lsa */
1231 		if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) {
1232 			/* lsa to insert is more specific, fix other lsa */
1233 			mask = v->lsa->data.asext.mask;
1234 			oan = asext_find(v->lsa->hdr.ls_id & mask,
1235 			   mask2prefixlen(mask));
1236 			if (oan == NULL)
1237 				fatalx("as-ext LSA DB corrupted");
1238 		}
1239 		/* oan is less specific and needs new ls_id */
1240 		if (oan->ls_id == oan->r.prefix.s_addr)
1241 			oan->ls_id |= ~mask;
1242 		else {
1243 			u_int32_t	tmp = ntohl(oan->ls_id);
1244 			oan->ls_id = htonl(tmp - 1);
1245 			if (oan->ls_id == oan->r.prefix.s_addr) {
1246 				log_warnx("prefix %s/%d can not be "
1247 				    "redistributed, no unique ls_id found.",
1248 				    inet_ntoa(kr->prefix), kr->prefixlen);
1249 				RB_REMOVE(asext_tree, &ast, an);
1250 				free(an);
1251 				return;
1252 			}
1253 		}
1254 		mask = prefixlen2mask(oan->r.prefixlen);
1255 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1256 		    rdeconf->rtr_id.s_addr);
1257 	}
1258 
1259 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1260 	    rdeconf->rtr_id.s_addr);
1261 	lsa = orig_asext_lsa(kr, an->ls_id, DEFAULT_AGE);
1262 	lsa_merge(nbrself, lsa, v);
1263 
1264 	if (oan != an) {
1265 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1266 		    rdeconf->rtr_id.s_addr);
1267 		lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE);
1268 		lsa_merge(nbrself, lsa, v);
1269 	}
1270 }
1271 
1272 void
1273 rde_asext_put(struct kroute *kr)
1274 {
1275 	struct asext_node	*an;
1276 	struct vertex		*v;
1277 	struct lsa		*lsa;
1278 
1279 	/*
1280 	 * just try to remove the LSA. If the prefix is announced as
1281 	 * stub net LSA asext_find() will fail and nothing will happen.
1282 	 */
1283 	an = asext_find(kr->prefix.s_addr, kr->prefixlen);
1284 	if (an == NULL) {
1285 		log_debug("rde_asext_put: NO SUCH LSA %s/%d",
1286 		    inet_ntoa(kr->prefix), kr->prefixlen);
1287 		return;
1288 	}
1289 
1290 	/* inherit metric and ext_tag from the current LSA,
1291 	 * some routers don't like to get withdraws that are
1292 	 * different from what they have in their table.
1293 	 */
1294 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1295 	    rdeconf->rtr_id.s_addr);
1296 	if (v != NULL) {
1297 		kr->metric = ntohl(v->lsa->data.asext.metric);
1298 		kr->ext_tag = ntohl(v->lsa->data.asext.ext_tag);
1299 	}
1300 
1301 	/* remove by reflooding with MAX_AGE */
1302 	lsa = orig_asext_lsa(kr, an->ls_id, MAX_AGE);
1303 	lsa_merge(nbrself, lsa, v);
1304 
1305 	RB_REMOVE(asext_tree, &ast, an);
1306 	free(an);
1307 }
1308 
1309 void
1310 rde_asext_free(void)
1311 {
1312 	struct asext_node	*an, *nan;
1313 
1314 	for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) {
1315 		nan = RB_NEXT(asext_tree, &ast, an);
1316 		RB_REMOVE(asext_tree, &ast, an);
1317 		free(an);
1318 	}
1319 }
1320 
1321 struct lsa *
1322 orig_asext_lsa(struct kroute *kr, u_int32_t ls_id, u_int16_t age)
1323 {
1324 	struct lsa	*lsa;
1325 	struct iface	*iface;
1326 	u_int16_t	 len;
1327 
1328 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1329 	if ((lsa = calloc(1, len)) == NULL)
1330 		fatal("orig_asext_lsa");
1331 
1332 	log_debug("orig_asext_lsa: %s/%d age %d",
1333 	    inet_ntoa(kr->prefix), kr->prefixlen, age);
1334 
1335 	/* LSA header */
1336 	lsa->hdr.age = htons(age);
1337 	lsa->hdr.opts = area_ospf_options(NULL);
1338 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1339 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1340 	/* update of seqnum is done by lsa_merge */
1341 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1342 	lsa->hdr.len = htons(len);
1343 
1344 	/* prefix and mask */
1345 	lsa->hdr.ls_id = ls_id;
1346 	lsa->data.asext.mask = prefixlen2mask(kr->prefixlen);
1347 
1348 	/*
1349 	 * nexthop -- on connected routes we are the nexthop,
1350 	 * in other cases we may announce the true nexthop if the
1351 	 * nexthop is reachable via an OSPF enabled interface but only
1352 	 * broadcast & NBMA interfaces are considered in that case.
1353 	 * It does not make sense to announce the nexthop of a point-to-point
1354 	 * link since the traffic has to go through this box anyway.
1355 	 * Some implementations actually check that there are multiple
1356 	 * neighbors on the particular segment, we skip that check.
1357 	 */
1358 	iface = rde_asext_lookup(kr->nexthop.s_addr, -1);
1359 	if (kr->flags & F_CONNECTED)
1360 		lsa->data.asext.fw_addr = 0;
1361 	else if (iface && (iface->type == IF_TYPE_BROADCAST ||
1362 	    iface->type == IF_TYPE_NBMA))
1363 		lsa->data.asext.fw_addr = kr->nexthop.s_addr;
1364 	else
1365 		lsa->data.asext.fw_addr = 0;
1366 
1367 	lsa->data.asext.metric = htonl(kr->metric);
1368 	lsa->data.asext.ext_tag = htonl(kr->ext_tag);
1369 
1370 	lsa->hdr.ls_chksum = 0;
1371 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1372 
1373 	return (lsa);
1374 }
1375 
1376 /*
1377  * summary LSA stuff
1378  */
1379 void
1380 rde_summary_update(struct rt_node *rte, struct area *area)
1381 {
1382 	struct rt_nexthop	*rn;
1383 	struct rt_node		*nr;
1384 	struct vertex		*v = NULL;
1385 	struct lsa		*lsa;
1386 	u_int8_t		 type = 0;
1387 
1388 	/* first check if we actually need to announce this route */
1389 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1390 		return;
1391 	/* route is invalid, lsa_remove_invalid_sums() will do the cleanup */
1392 	if (rte->cost >= LS_INFINITY)
1393 		return;
1394 	/* never create summaries for as-ext LSA */
1395 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1396 		return;
1397 	/* no need for summary LSA in the originating area */
1398 	if (rte->area.s_addr == area->id.s_addr)
1399 		return;
1400 	/* no need to originate inter-area routes to the backbone */
1401 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1402 		return;
1403 	/* nexthop check, nexthop part of area -> no summary */
1404 	TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1405 		if (rn->invalid)
1406 			continue;
1407 		nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1408 		if (nr && nr->area.s_addr == area->id.s_addr)
1409 			continue;
1410 		break;
1411 	}
1412 	if (rn == NULL)
1413 		/* all nexthops belong to this area or are invalid */
1414 		return;
1415 
1416 	/* TODO AS border router specific checks */
1417 	/* TODO inter-area network route stuff */
1418 	/* TODO intra-area stuff -- condense LSA ??? */
1419 
1420 	if (rte->d_type == DT_NET) {
1421 		type = LSA_TYPE_SUM_NETWORK;
1422 	} else if (rte->d_type == DT_RTR) {
1423 		if (area->stub)
1424 			/* do not redistribute type 4 LSA into stub areas */
1425 			return;
1426 		type = LSA_TYPE_SUM_ROUTER;
1427 	} else
1428 		fatalx("rde_summary_update: unknown route type");
1429 
1430 	/* update lsa but only if it was changed */
1431 	v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id());
1432 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1433 	lsa_merge(rde_nbr_self(area), lsa, v);
1434 
1435 	if (v == NULL)
1436 		v = lsa_find_area(area, type, rte->prefix.s_addr,
1437 		    rde_router_id());
1438 
1439 	/* suppressed/deleted routes are not found in the second lsa_find */
1440 	if (v)
1441 		v->cost = rte->cost;
1442 }
1443 
1444 struct lsa *
1445 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1446 {
1447 	struct lsa	*lsa;
1448 	u_int16_t	 len;
1449 
1450 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1451 	if ((lsa = calloc(1, len)) == NULL)
1452 		fatal("orig_sum_lsa");
1453 
1454 	/* LSA header */
1455 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1456 	lsa->hdr.opts = area_ospf_options(area);
1457 	lsa->hdr.type = type;
1458 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1459 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1460 	lsa->hdr.len = htons(len);
1461 
1462 	/* prefix and mask */
1463 	/*
1464 	 * TODO ls_id must be unique, for overlapping routes this may
1465 	 * not be true. In this case a hack needs to be done to
1466 	 * make the ls_id unique.
1467 	 */
1468 	lsa->hdr.ls_id = rte->prefix.s_addr;
1469 	if (type == LSA_TYPE_SUM_NETWORK)
1470 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1471 	else
1472 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1473 
1474 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1475 
1476 	lsa->hdr.ls_chksum = 0;
1477 	lsa->hdr.ls_chksum =
1478 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1479 
1480 	return (lsa);
1481 }
1482