xref: /openbsd-src/usr.sbin/ospfd/rde.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: rde.c,v 1.96 2014/07/12 20:16:38 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ospf.h"
36 #include "ospfd.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 void		 rde_sig_handler(int sig, short, void *);
42 void		 rde_shutdown(void);
43 void		 rde_dispatch_imsg(int, short, void *);
44 void		 rde_dispatch_parent(int, short, void *);
45 void		 rde_dump_area(struct area *, int, pid_t);
46 
47 void		 rde_send_summary(pid_t);
48 void		 rde_send_summary_area(struct area *, pid_t);
49 void		 rde_nbr_init(u_int32_t);
50 void		 rde_nbr_free(void);
51 struct rde_nbr	*rde_nbr_find(u_int32_t);
52 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53 void		 rde_nbr_del(struct rde_nbr *);
54 
55 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_free(struct rde_nbr *);
59 
60 struct iface	*rde_asext_lookup(u_int32_t, int);
61 void		 rde_asext_get(struct kroute *);
62 void		 rde_asext_put(struct kroute *);
63 void		 rde_asext_free(void);
64 struct lsa	*orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t);
65 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66 
67 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
68 struct imsgev		*iev_ospfe;
69 struct imsgev		*iev_main;
70 struct rde_nbr		*nbrself;
71 struct lsa_tree		 asext_tree;
72 
73 /* ARGSUSED */
74 void
75 rde_sig_handler(int sig, short event, void *arg)
76 {
77 	/*
78 	 * signal handler rules don't apply, libevent decouples for us
79 	 */
80 
81 	switch (sig) {
82 	case SIGINT:
83 	case SIGTERM:
84 		rde_shutdown();
85 		/* NOTREACHED */
86 	default:
87 		fatalx("unexpected signal");
88 	}
89 }
90 
91 /* route decision engine */
92 pid_t
93 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
94     int pipe_parent2ospfe[2])
95 {
96 	struct event		 ev_sigint, ev_sigterm;
97 	struct timeval		 now;
98 	struct area		*area;
99 	struct iface		*iface;
100 	struct passwd		*pw;
101 	struct redistribute	*r;
102 	pid_t			 pid;
103 
104 	switch (pid = fork()) {
105 	case -1:
106 		fatal("cannot fork");
107 		/* NOTREACHED */
108 	case 0:
109 		break;
110 	default:
111 		return (pid);
112 	}
113 
114 	rdeconf = xconf;
115 
116 	if ((pw = getpwnam(OSPFD_USER)) == NULL)
117 		fatal("getpwnam");
118 
119 	if (chroot(pw->pw_dir) == -1)
120 		fatal("chroot");
121 	if (chdir("/") == -1)
122 		fatal("chdir(\"/\")");
123 
124 	setproctitle("route decision engine");
125 	ospfd_process = PROC_RDE_ENGINE;
126 
127 	if (setgroups(1, &pw->pw_gid) ||
128 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
129 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
130 		fatal("can't drop privileges");
131 
132 	event_init();
133 	rde_nbr_init(NBR_HASHSIZE);
134 	lsa_init(&asext_tree);
135 
136 	/* setup signal handler */
137 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
138 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
139 	signal_add(&ev_sigint, NULL);
140 	signal_add(&ev_sigterm, NULL);
141 	signal(SIGPIPE, SIG_IGN);
142 	signal(SIGHUP, SIG_IGN);
143 
144 	/* setup pipes */
145 	close(pipe_ospfe2rde[0]);
146 	close(pipe_parent2rde[0]);
147 	close(pipe_parent2ospfe[0]);
148 	close(pipe_parent2ospfe[1]);
149 
150 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
151 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
152 		fatal(NULL);
153 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
154 	iev_ospfe->handler = rde_dispatch_imsg;
155 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
156 	iev_main->handler = rde_dispatch_parent;
157 
158 	/* setup event handler */
159 	iev_ospfe->events = EV_READ;
160 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
161 	    iev_ospfe->handler, iev_ospfe);
162 	event_add(&iev_ospfe->ev, NULL);
163 
164 	iev_main->events = EV_READ;
165 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
166 	    iev_main->handler, iev_main);
167 	event_add(&iev_main->ev, NULL);
168 
169 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
170 	cand_list_init();
171 	rt_init();
172 
173 	/* remove unneded stuff from config */
174 	LIST_FOREACH(area, &rdeconf->area_list, entry)
175 		LIST_FOREACH(iface, &area->iface_list, entry)
176 			md_list_clr(&iface->auth_md_list);
177 
178 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
179 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
180 		free(r);
181 	}
182 
183 	gettimeofday(&now, NULL);
184 	rdeconf->uptime = now.tv_sec;
185 
186 	event_dispatch();
187 
188 	rde_shutdown();
189 	/* NOTREACHED */
190 
191 	return (0);
192 }
193 
194 void
195 rde_shutdown(void)
196 {
197 	struct area	*a;
198 	struct vertex	*v, *nv;
199 
200 	stop_spf_timer(rdeconf);
201 	cand_list_clr();
202 	rt_clear();
203 
204 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
205 		LIST_REMOVE(a, entry);
206 		area_del(a);
207 	}
208 	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
209 		nv = RB_NEXT(lsa_tree, &asext_tree, v);
210 		vertex_free(v);
211 	}
212 	rde_asext_free();
213 	rde_nbr_free();
214 	kr_shutdown();
215 
216 	msgbuf_clear(&iev_ospfe->ibuf.w);
217 	free(iev_ospfe);
218 	msgbuf_clear(&iev_main->ibuf.w);
219 	free(iev_main);
220 	free(rdeconf);
221 
222 	log_info("route decision engine exiting");
223 	_exit(0);
224 }
225 
226 int
227 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
228     u_int16_t datalen)
229 {
230 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
231 	    data, datalen));
232 }
233 
234 /* ARGSUSED */
235 void
236 rde_dispatch_imsg(int fd, short event, void *bula)
237 {
238 	struct imsgev		*iev = bula;
239 	struct imsgbuf		*ibuf;
240 	struct imsg		 imsg;
241 	struct in_addr		 aid;
242 	struct ls_req_hdr	 req_hdr;
243 	struct lsa_hdr		 lsa_hdr, *db_hdr;
244 	struct rde_nbr		 rn, *nbr;
245 	struct timespec		 tp;
246 	struct lsa		*lsa;
247 	struct area		*area;
248 	struct vertex		*v;
249 	char			*buf;
250 	ssize_t			 n;
251 	time_t			 now;
252 	int			 r, state, self, error, shut = 0, verbose;
253 	u_int16_t		 l;
254 
255 	ibuf = &iev->ibuf;
256 
257 	if (event & EV_READ) {
258 		if ((n = imsg_read(ibuf)) == -1)
259 			fatal("imsg_read error");
260 		if (n == 0)	/* connection closed */
261 			shut = 1;
262 	}
263 	if (event & EV_WRITE) {
264 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
265 			fatal("msgbuf_write");
266 		if (n == 0)	/* connection closed */
267 			shut = 1;
268 	}
269 
270 	clock_gettime(CLOCK_MONOTONIC, &tp);
271 	now = tp.tv_sec;
272 
273 	for (;;) {
274 		if ((n = imsg_get(ibuf, &imsg)) == -1)
275 			fatal("rde_dispatch_imsg: imsg_read error");
276 		if (n == 0)
277 			break;
278 
279 		switch (imsg.hdr.type) {
280 		case IMSG_NEIGHBOR_UP:
281 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
282 				fatalx("invalid size of OE request");
283 			memcpy(&rn, imsg.data, sizeof(rn));
284 
285 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
286 				fatalx("rde_dispatch_imsg: "
287 				    "neighbor already exists");
288 			break;
289 		case IMSG_NEIGHBOR_DOWN:
290 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
291 			break;
292 		case IMSG_NEIGHBOR_CHANGE:
293 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
294 				fatalx("invalid size of OE request");
295 			memcpy(&state, imsg.data, sizeof(state));
296 
297 			nbr = rde_nbr_find(imsg.hdr.peerid);
298 			if (nbr == NULL)
299 				break;
300 
301 			if (state != nbr->state &&
302 			    (nbr->state & NBR_STA_FULL ||
303 			    state & NBR_STA_FULL))
304 				area_track(nbr->area, state);
305 
306 			nbr->state = state;
307 			if (nbr->state & NBR_STA_FULL)
308 				rde_req_list_free(nbr);
309 			break;
310 		case IMSG_NEIGHBOR_CAPA:
311 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t))
312 				fatalx("invalid size of OE request");
313 			nbr = rde_nbr_find(imsg.hdr.peerid);
314 			if (nbr == NULL)
315 				break;
316 			nbr->capa_options = *(u_int8_t *)imsg.data;
317 			break;
318 		case IMSG_DB_SNAPSHOT:
319 			nbr = rde_nbr_find(imsg.hdr.peerid);
320 			if (nbr == NULL)
321 				break;
322 
323 			lsa_snap(nbr);
324 
325 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
326 			    0, -1, NULL, 0);
327 			break;
328 		case IMSG_DD:
329 			nbr = rde_nbr_find(imsg.hdr.peerid);
330 			if (nbr == NULL)
331 				break;
332 
333 			buf = imsg.data;
334 			error = 0;
335 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
336 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
337 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
338 				buf += sizeof(lsa_hdr);
339 
340 				if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
341 				    nbr->area->stub) {
342 					error = 1;
343 					break;
344 				}
345 				v = lsa_find(nbr->iface, lsa_hdr.type,
346 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
347 				if (v == NULL)
348 					db_hdr = NULL;
349 				else
350 					db_hdr = &v->lsa->hdr;
351 
352 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
353 					/*
354 					 * only request LSAs that are
355 					 * newer or missing
356 					 */
357 					rde_req_list_add(nbr, &lsa_hdr);
358 					imsg_compose_event(iev_ospfe, IMSG_DD,
359 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
360 					    sizeof(lsa_hdr));
361 				}
362 			}
363 			if (l != 0 && !error)
364 				log_warnx("rde_dispatch_imsg: peerid %lu, "
365 				    "trailing garbage in Database Description "
366 				    "packet", imsg.hdr.peerid);
367 
368 			if (!error)
369 				imsg_compose_event(iev_ospfe, IMSG_DD_END,
370 				    imsg.hdr.peerid, 0, -1, NULL, 0);
371 			else
372 				imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA,
373 				    imsg.hdr.peerid, 0, -1, NULL, 0);
374 			break;
375 		case IMSG_LS_REQ:
376 			nbr = rde_nbr_find(imsg.hdr.peerid);
377 			if (nbr == NULL)
378 				break;
379 
380 			buf = imsg.data;
381 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
382 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
383 				memcpy(&req_hdr, buf, sizeof(req_hdr));
384 				buf += sizeof(req_hdr);
385 
386 				if ((v = lsa_find(nbr->iface,
387 				    ntohl(req_hdr.type), req_hdr.ls_id,
388 				    req_hdr.adv_rtr)) == NULL) {
389 					log_debug("rde_dispatch_imsg: "
390 					    "requested LSA not found");
391 					imsg_compose_event(iev_ospfe,
392 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
393 					    0, -1, NULL, 0);
394 					continue;
395 				}
396 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
397 				    imsg.hdr.peerid, 0, -1, v->lsa,
398 				    ntohs(v->lsa->hdr.len));
399 			}
400 			if (l != 0)
401 				log_warnx("rde_dispatch_imsg: peerid %lu, "
402 				    "trailing garbage in LS Request "
403 				    "packet", imsg.hdr.peerid);
404 			break;
405 		case IMSG_LS_UPD:
406 			nbr = rde_nbr_find(imsg.hdr.peerid);
407 			if (nbr == NULL)
408 				break;
409 
410 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
411 			if (lsa == NULL)
412 				fatal(NULL);
413 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
414 
415 			if (!lsa_check(nbr, lsa,
416 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
417 				free(lsa);
418 				break;
419 			}
420 
421 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
422 			    lsa->hdr.adv_rtr);
423 			if (v == NULL)
424 				db_hdr = NULL;
425 			else
426 				db_hdr = &v->lsa->hdr;
427 
428 			if (nbr->self) {
429 				lsa_merge(nbr, lsa, v);
430 				/* lsa_merge frees the right lsa */
431 				break;
432 			}
433 
434 			r = lsa_newer(&lsa->hdr, db_hdr);
435 			if (r > 0) {
436 				/* new LSA newer than DB */
437 				if (v && v->flooded &&
438 				    v->changed + MIN_LS_ARRIVAL >= now) {
439 					free(lsa);
440 					break;
441 				}
442 
443 				rde_req_list_del(nbr, &lsa->hdr);
444 
445 				if (!(self = lsa_self(nbr, lsa, v)))
446 					if (lsa_add(nbr, lsa))
447 						/* delayed lsa */
448 						break;
449 
450 				/* flood and perhaps ack LSA */
451 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
452 				    imsg.hdr.peerid, 0, -1, lsa,
453 				    ntohs(lsa->hdr.len));
454 
455 				/* reflood self originated LSA */
456 				if (self && v)
457 					imsg_compose_event(iev_ospfe,
458 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
459 					    v->lsa, ntohs(v->lsa->hdr.len));
460 				/* new LSA was not added so free it */
461 				if (self)
462 					free(lsa);
463 			} else if (r < 0) {
464 				/*
465 				 * point 6 of "The Flooding Procedure"
466 				 * We are violating the RFC here because
467 				 * it does not make sense to reset a session
468 				 * because an equal LSA is already in the table.
469 				 * Only if the LSA sent is older than the one
470 				 * in the table we should reset the session.
471 				 */
472 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
473 					imsg_compose_event(iev_ospfe,
474 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
475 					    0, -1, NULL, 0);
476 					free(lsa);
477 					break;
478 				}
479 
480 				/* lsa no longer needed */
481 				free(lsa);
482 
483 				/* new LSA older than DB */
484 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
485 				    ntohs(db_hdr->age) == MAX_AGE)
486 					/* seq-num wrap */
487 					break;
488 
489 				if (v->changed + MIN_LS_ARRIVAL >= now)
490 					break;
491 
492 				/* directly send current LSA, no ack */
493 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
494 				    imsg.hdr.peerid, 0, -1, v->lsa,
495 				    ntohs(v->lsa->hdr.len));
496 			} else {
497 				/* LSA equal send direct ack */
498 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
499 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
500 				    sizeof(lsa->hdr));
501 				free(lsa);
502 			}
503 			break;
504 		case IMSG_LS_MAXAGE:
505 			nbr = rde_nbr_find(imsg.hdr.peerid);
506 			if (nbr == NULL)
507 				break;
508 
509 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
510 			    sizeof(struct lsa_hdr))
511 				fatalx("invalid size of OE request");
512 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
513 
514 			if (rde_nbr_loading(nbr->area))
515 				break;
516 
517 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
518 			    lsa_hdr.adv_rtr);
519 			if (v == NULL)
520 				db_hdr = NULL;
521 			else
522 				db_hdr = &v->lsa->hdr;
523 
524 			/*
525 			 * only delete LSA if the one in the db is not newer
526 			 */
527 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
528 				lsa_del(nbr, &lsa_hdr);
529 			break;
530 		case IMSG_CTL_SHOW_DATABASE:
531 		case IMSG_CTL_SHOW_DB_EXT:
532 		case IMSG_CTL_SHOW_DB_NET:
533 		case IMSG_CTL_SHOW_DB_RTR:
534 		case IMSG_CTL_SHOW_DB_SELF:
535 		case IMSG_CTL_SHOW_DB_SUM:
536 		case IMSG_CTL_SHOW_DB_ASBR:
537 		case IMSG_CTL_SHOW_DB_OPAQ:
538 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
539 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
540 				log_warnx("rde_dispatch_imsg: wrong imsg len");
541 				break;
542 			}
543 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
544 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
545 					rde_dump_area(area, imsg.hdr.type,
546 					    imsg.hdr.pid);
547 				}
548 				lsa_dump(&asext_tree, imsg.hdr.type,
549 				    imsg.hdr.pid);
550 			} else {
551 				memcpy(&aid, imsg.data, sizeof(aid));
552 				if ((area = area_find(rdeconf, aid)) != NULL) {
553 					rde_dump_area(area, imsg.hdr.type,
554 					    imsg.hdr.pid);
555 					if (!area->stub)
556 						lsa_dump(&asext_tree,
557 						    imsg.hdr.type,
558 						    imsg.hdr.pid);
559 				}
560 			}
561 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
562 			    imsg.hdr.pid, -1, NULL, 0);
563 			break;
564 		case IMSG_CTL_SHOW_RIB:
565 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
566 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
567 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
568 
569 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
570 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
571 			}
572 			aid.s_addr = 0;
573 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
574 
575 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
576 			    imsg.hdr.pid, -1, NULL, 0);
577 			break;
578 		case IMSG_CTL_SHOW_SUM:
579 			rde_send_summary(imsg.hdr.pid);
580 			LIST_FOREACH(area, &rdeconf->area_list, entry)
581 				rde_send_summary_area(area, imsg.hdr.pid);
582 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
583 			    imsg.hdr.pid, -1, NULL, 0);
584 			break;
585 		case IMSG_CTL_LOG_VERBOSE:
586 			/* already checked by ospfe */
587 			memcpy(&verbose, imsg.data, sizeof(verbose));
588 			log_verbose(verbose);
589 			break;
590 		default:
591 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
592 			    imsg.hdr.type);
593 			break;
594 		}
595 		imsg_free(&imsg);
596 	}
597 	if (!shut)
598 		imsg_event_add(iev);
599 	else {
600 		/* this pipe is dead, so remove the event handler */
601 		event_del(&iev->ev);
602 		event_loopexit(NULL);
603 	}
604 }
605 
606 /* ARGSUSED */
607 void
608 rde_dispatch_parent(int fd, short event, void *bula)
609 {
610 	static struct area	*narea;
611 	struct iface		*niface;
612 	struct imsg		 imsg;
613 	struct kroute		 rr;
614 	struct imsgev		*iev = bula;
615 	struct imsgbuf		*ibuf;
616 	struct redistribute	*nred;
617 	ssize_t			 n;
618 	int			 shut = 0;
619 
620 	ibuf = &iev->ibuf;
621 
622 	if (event & EV_READ) {
623 		if ((n = imsg_read(ibuf)) == -1)
624 			fatal("imsg_read error");
625 		if (n == 0)	/* connection closed */
626 			shut = 1;
627 	}
628 	if (event & EV_WRITE) {
629 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
630 			fatal("msgbuf_write");
631 		if (n == 0)	/* connection closed */
632 			shut = 1;
633 	}
634 
635 	for (;;) {
636 		if ((n = imsg_get(ibuf, &imsg)) == -1)
637 			fatal("rde_dispatch_parent: imsg_read error");
638 		if (n == 0)
639 			break;
640 
641 		switch (imsg.hdr.type) {
642 		case IMSG_NETWORK_ADD:
643 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
644 				log_warnx("rde_dispatch_parent: "
645 				    "wrong imsg len");
646 				break;
647 			}
648 			memcpy(&rr, imsg.data, sizeof(rr));
649 			rde_asext_get(&rr);
650 			break;
651 		case IMSG_NETWORK_DEL:
652 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
653 				log_warnx("rde_dispatch_parent: "
654 				    "wrong imsg len");
655 				break;
656 			}
657 			memcpy(&rr, imsg.data, sizeof(rr));
658 			rde_asext_put(&rr);
659 			break;
660 		case IMSG_RECONF_CONF:
661 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
662 			    NULL)
663 				fatal(NULL);
664 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
665 
666 			LIST_INIT(&nconf->area_list);
667 			LIST_INIT(&nconf->cand_list);
668 			break;
669 		case IMSG_RECONF_AREA:
670 			if ((narea = area_new()) == NULL)
671 				fatal(NULL);
672 			memcpy(narea, imsg.data, sizeof(struct area));
673 
674 			LIST_INIT(&narea->iface_list);
675 			LIST_INIT(&narea->nbr_list);
676 			RB_INIT(&narea->lsa_tree);
677 			SIMPLEQ_INIT(&narea->redist_list);
678 
679 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
680 			break;
681 		case IMSG_RECONF_REDIST:
682 			if ((nred= malloc(sizeof(struct redistribute))) == NULL)
683 				fatal(NULL);
684 			memcpy(nred, imsg.data, sizeof(struct redistribute));
685 
686 			SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
687 			break;
688 		case IMSG_RECONF_IFACE:
689 			if ((niface = malloc(sizeof(struct iface))) == NULL)
690 				fatal(NULL);
691 			memcpy(niface, imsg.data, sizeof(struct iface));
692 
693 			LIST_INIT(&niface->nbr_list);
694 			TAILQ_INIT(&niface->ls_ack_list);
695 			TAILQ_INIT(&niface->auth_md_list);
696 			RB_INIT(&niface->lsa_tree);
697 
698 			niface->area = narea;
699 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
700 
701 			break;
702 		case IMSG_RECONF_END:
703 			merge_config(rdeconf, nconf);
704 			nconf = NULL;
705 			break;
706 		default:
707 			log_debug("rde_dispatch_parent: unexpected imsg %d",
708 			    imsg.hdr.type);
709 			break;
710 		}
711 		imsg_free(&imsg);
712 	}
713 	if (!shut)
714 		imsg_event_add(iev);
715 	else {
716 		/* this pipe is dead, so remove the event handler */
717 		event_del(&iev->ev);
718 		event_loopexit(NULL);
719 	}
720 }
721 
722 void
723 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
724 {
725 	struct iface	*iface;
726 
727 	/* dump header */
728 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
729 	    area, sizeof(*area));
730 
731 	/* dump link local lsa */
732 	LIST_FOREACH(iface, &area->iface_list, entry) {
733 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
734 		    0, pid, -1, iface, sizeof(*iface));
735 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
736 	}
737 
738 	/* dump area lsa */
739 	lsa_dump(&area->lsa_tree, imsg_type, pid);
740 }
741 
742 u_int32_t
743 rde_router_id(void)
744 {
745 	return (rdeconf->rtr_id.s_addr);
746 }
747 
748 struct area *
749 rde_backbone_area(void)
750 {
751 	struct in_addr	id;
752 
753 	id.s_addr = INADDR_ANY;
754 
755 	return (area_find(rdeconf, id));
756 }
757 
758 void
759 rde_send_change_kroute(struct rt_node *r)
760 {
761 	int			 krcount = 0;
762 	struct kroute		 kr;
763 	struct rt_nexthop	*rn;
764 	struct ibuf		*wbuf;
765 
766 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
767 	    sizeof(kr))) == NULL) {
768 		return;
769 	}
770 
771 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
772 		if (rn->invalid)
773 			continue;
774 		krcount++;
775 
776 		bzero(&kr, sizeof(kr));
777 		kr.prefix.s_addr = r->prefix.s_addr;
778 		kr.nexthop.s_addr = rn->nexthop.s_addr;
779 		kr.prefixlen = r->prefixlen;
780 		kr.ext_tag = r->ext_tag;
781 		imsg_add(wbuf, &kr, sizeof(kr));
782 	}
783 	if (krcount == 0)
784 		fatalx("rde_send_change_kroute: no valid nexthop found");
785 	imsg_close(&iev_main->ibuf, wbuf);
786 	imsg_event_add(iev_main);
787 }
788 
789 void
790 rde_send_delete_kroute(struct rt_node *r)
791 {
792 	struct kroute	 kr;
793 
794 	bzero(&kr, sizeof(kr));
795 	kr.prefix.s_addr = r->prefix.s_addr;
796 	kr.prefixlen = r->prefixlen;
797 
798 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
799 	    &kr, sizeof(kr));
800 }
801 
802 void
803 rde_send_summary(pid_t pid)
804 {
805 	static struct ctl_sum	 sumctl;
806 	struct timeval		 now;
807 	struct area		*area;
808 	struct vertex		*v;
809 
810 	bzero(&sumctl, sizeof(struct ctl_sum));
811 
812 	sumctl.rtr_id.s_addr = rde_router_id();
813 	sumctl.spf_delay = rdeconf->spf_delay;
814 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
815 
816 	LIST_FOREACH(area, &rdeconf->area_list, entry)
817 		sumctl.num_area++;
818 
819 	RB_FOREACH(v, lsa_tree, &asext_tree) {
820 		sumctl.num_ext_lsa++;
821 		sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
822 
823 	}
824 
825 	gettimeofday(&now, NULL);
826 	if (rdeconf->uptime < now.tv_sec)
827 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
828 	else
829 		sumctl.uptime = 0;
830 
831 	sumctl.rfc1583compat = rdeconf->rfc1583compat;
832 
833 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
834 	    sizeof(sumctl));
835 }
836 
837 void
838 rde_send_summary_area(struct area *area, pid_t pid)
839 {
840 	static struct ctl_sum_area	 sumareactl;
841 	struct iface			*iface;
842 	struct rde_nbr			*nbr;
843 	struct lsa_tree			*tree = &area->lsa_tree;
844 	struct vertex			*v;
845 
846 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
847 
848 	sumareactl.area.s_addr = area->id.s_addr;
849 	sumareactl.num_spf_calc = area->num_spf_calc;
850 
851 	LIST_FOREACH(iface, &area->iface_list, entry)
852 		sumareactl.num_iface++;
853 
854 	LIST_FOREACH(nbr, &area->nbr_list, entry)
855 		if (nbr->state == NBR_STA_FULL && !nbr->self)
856 			sumareactl.num_adj_nbr++;
857 
858 	RB_FOREACH(v, lsa_tree, tree) {
859 		sumareactl.num_lsa++;
860 		sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
861 	}
862 
863 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
864 	    sizeof(sumareactl));
865 }
866 
867 LIST_HEAD(rde_nbr_head, rde_nbr);
868 
869 struct nbr_table {
870 	struct rde_nbr_head	*hashtbl;
871 	u_int32_t		 hashmask;
872 } rdenbrtable;
873 
874 #define RDE_NBR_HASH(x)		\
875 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
876 
877 void
878 rde_nbr_init(u_int32_t hashsize)
879 {
880 	struct rde_nbr_head	*head;
881 	u_int32_t		 hs, i;
882 
883 	for (hs = 1; hs < hashsize; hs <<= 1)
884 		;
885 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
886 	if (rdenbrtable.hashtbl == NULL)
887 		fatal("rde_nbr_init");
888 
889 	for (i = 0; i < hs; i++)
890 		LIST_INIT(&rdenbrtable.hashtbl[i]);
891 
892 	rdenbrtable.hashmask = hs - 1;
893 
894 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
895 		fatal("rde_nbr_init");
896 
897 	nbrself->id.s_addr = rde_router_id();
898 	nbrself->peerid = NBR_IDSELF;
899 	nbrself->state = NBR_STA_DOWN;
900 	nbrself->self = 1;
901 	head = RDE_NBR_HASH(NBR_IDSELF);
902 	LIST_INSERT_HEAD(head, nbrself, hash);
903 }
904 
905 void
906 rde_nbr_free(void)
907 {
908 	free(nbrself);
909 	free(rdenbrtable.hashtbl);
910 }
911 
912 struct rde_nbr *
913 rde_nbr_find(u_int32_t peerid)
914 {
915 	struct rde_nbr_head	*head;
916 	struct rde_nbr		*nbr;
917 
918 	head = RDE_NBR_HASH(peerid);
919 
920 	LIST_FOREACH(nbr, head, hash) {
921 		if (nbr->peerid == peerid)
922 			return (nbr);
923 	}
924 
925 	return (NULL);
926 }
927 
928 struct rde_nbr *
929 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
930 {
931 	struct rde_nbr_head	*head;
932 	struct rde_nbr		*nbr;
933 	struct area		*area;
934 	struct iface		*iface;
935 
936 	if (rde_nbr_find(peerid))
937 		return (NULL);
938 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
939 		fatalx("rde_nbr_new: unknown area");
940 
941 	LIST_FOREACH(iface, &area->iface_list, entry) {
942 		if (iface->ifindex == new->ifindex)
943 			break;
944 	}
945 	if (iface == NULL)
946 		fatalx("rde_nbr_new: unknown interface");
947 
948 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
949 		fatal("rde_nbr_new");
950 
951 	memcpy(nbr, new, sizeof(*nbr));
952 	nbr->peerid = peerid;
953 	nbr->area = area;
954 	nbr->iface = iface;
955 
956 	TAILQ_INIT(&nbr->req_list);
957 
958 	head = RDE_NBR_HASH(peerid);
959 	LIST_INSERT_HEAD(head, nbr, hash);
960 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
961 
962 	return (nbr);
963 }
964 
965 void
966 rde_nbr_del(struct rde_nbr *nbr)
967 {
968 	if (nbr == NULL)
969 		return;
970 
971 	rde_req_list_free(nbr);
972 
973 	LIST_REMOVE(nbr, entry);
974 	LIST_REMOVE(nbr, hash);
975 
976 	free(nbr);
977 }
978 
979 int
980 rde_nbr_loading(struct area *area)
981 {
982 	struct rde_nbr		*nbr;
983 	int			 checkall = 0;
984 
985 	if (area == NULL) {
986 		area = LIST_FIRST(&rdeconf->area_list);
987 		checkall = 1;
988 	}
989 
990 	while (area != NULL) {
991 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
992 			if (nbr->self)
993 				continue;
994 			if (nbr->state & NBR_STA_XCHNG ||
995 			    nbr->state & NBR_STA_LOAD)
996 				return (1);
997 		}
998 		if (!checkall)
999 			break;
1000 		area = LIST_NEXT(area, entry);
1001 	}
1002 
1003 	return (0);
1004 }
1005 
1006 struct rde_nbr *
1007 rde_nbr_self(struct area *area)
1008 {
1009 	struct rde_nbr		*nbr;
1010 
1011 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1012 		if (nbr->self)
1013 			return (nbr);
1014 
1015 	/* this may not happen */
1016 	fatalx("rde_nbr_self: area without self");
1017 	return (NULL);
1018 }
1019 
1020 /*
1021  * LSA req list
1022  */
1023 void
1024 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1025 {
1026 	struct rde_req_entry	*le;
1027 
1028 	if ((le = calloc(1, sizeof(*le))) == NULL)
1029 		fatal("rde_req_list_add");
1030 
1031 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1032 	le->type = lsa->type;
1033 	le->ls_id = lsa->ls_id;
1034 	le->adv_rtr = lsa->adv_rtr;
1035 }
1036 
1037 int
1038 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1039 {
1040 	struct rde_req_entry	*le;
1041 
1042 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1043 		if ((lsa_hdr->type == le->type) &&
1044 		    (lsa_hdr->ls_id == le->ls_id) &&
1045 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1046 			return (1);
1047 	}
1048 	return (0);
1049 }
1050 
1051 void
1052 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1053 {
1054 	struct rde_req_entry	*le;
1055 
1056 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1057 		if ((lsa_hdr->type == le->type) &&
1058 		    (lsa_hdr->ls_id == le->ls_id) &&
1059 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1060 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1061 			free(le);
1062 			return;
1063 		}
1064 	}
1065 }
1066 
1067 void
1068 rde_req_list_free(struct rde_nbr *nbr)
1069 {
1070 	struct rde_req_entry	*le;
1071 
1072 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1073 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1074 		free(le);
1075 	}
1076 }
1077 
1078 /*
1079  * as-external LSA handling
1080  */
1081 struct asext_node {
1082 	RB_ENTRY(asext_node)    entry;
1083 	struct kroute		r;
1084 	u_int32_t		ls_id;
1085 };
1086 
1087 static __inline int	asext_compare(struct asext_node *, struct asext_node *);
1088 struct asext_node	*asext_find(u_int32_t, u_int8_t);
1089 
1090 RB_HEAD(asext_tree, asext_node)		ast;
1091 RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare)
1092 RB_GENERATE(asext_tree, asext_node, entry, asext_compare)
1093 
1094 static __inline int
1095 asext_compare(struct asext_node *a, struct asext_node *b)
1096 {
1097 	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1098 		return (-1);
1099 	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1100 		return (1);
1101 	if (a->r.prefixlen < b->r.prefixlen)
1102 		return (-1);
1103 	if (a->r.prefixlen > b->r.prefixlen)
1104 		return (1);
1105 	return (0);
1106 }
1107 
1108 struct asext_node *
1109 asext_find(u_int32_t addr, u_int8_t prefixlen)
1110 {
1111 	struct asext_node	a;
1112 
1113 	a.r.prefix.s_addr = addr;
1114 	a.r.prefixlen = prefixlen;
1115 
1116 	return (RB_FIND(asext_tree, &ast, &a));
1117 }
1118 
1119 struct iface *
1120 rde_asext_lookup(u_int32_t prefix, int plen)
1121 {
1122 	struct area	*area;
1123 	struct iface	*iface;
1124 
1125 	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1126 		LIST_FOREACH(iface, &area->iface_list, entry) {
1127 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1128 			    (prefix & iface->mask.s_addr) && (plen == -1 ||
1129 			    iface->mask.s_addr == prefixlen2mask(plen)))
1130 				return (iface);
1131 		}
1132 	}
1133 	return (NULL);
1134 }
1135 
1136 void
1137 rde_asext_get(struct kroute *rr)
1138 {
1139 	struct asext_node	*an, *oan;
1140 	struct vertex		*v;
1141 	struct lsa		*lsa;
1142 	u_int32_t		 mask;
1143 
1144 	if (rde_asext_lookup(rr->prefix.s_addr, rr->prefixlen)) {
1145 		/* already announced as (stub) net LSA */
1146 		log_debug("rde_asext_get: %s/%d is net LSA",
1147 		    inet_ntoa(rr->prefix), rr->prefixlen);
1148 		return;
1149 	}
1150 
1151 	an = asext_find(rr->prefix.s_addr, rr->prefixlen);
1152 	if (an == NULL) {
1153 		if ((an = calloc(1, sizeof(*an))) == NULL)
1154 			fatal("rde_asext_get");
1155 		bcopy(rr, &an->r, sizeof(*rr));
1156 		an->ls_id = rr->prefix.s_addr;
1157 		RB_INSERT(asext_tree, &ast, an);
1158 	} else {
1159 		/* the bcopy does not change the lookup key so it is save */
1160 		bcopy(rr, &an->r, sizeof(*rr));
1161 	}
1162 
1163 	/*
1164 	 * ls_id must be unique, for overlapping routes this may
1165 	 * not be true. In this case a unique ls_id needs to be found.
1166 	 * The algorithm will change the ls_id of the less specific
1167 	 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24
1168 	 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16
1169 	 * will change the ls_id to 10.0.255.255 and see if that is unique.
1170 	 */
1171 	oan = an;
1172 	mask = prefixlen2mask(oan->r.prefixlen);
1173 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1174 	    rdeconf->rtr_id.s_addr);
1175 	while (v && v->lsa->data.asext.mask != mask) {
1176 		/* conflict needs to be resolved. change less specific lsa */
1177 		if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) {
1178 			/* lsa to insert is more specific, fix other lsa */
1179 			mask = v->lsa->data.asext.mask;
1180 			oan = asext_find(v->lsa->hdr.ls_id & mask,
1181 			   mask2prefixlen(mask));
1182 			if (oan == NULL)
1183 				fatalx("as-ext LSA DB corrupted");
1184 		}
1185 		/* oan is less specific and needs new ls_id */
1186 		if (oan->ls_id == oan->r.prefix.s_addr)
1187 			oan->ls_id |= ~mask;
1188 		else {
1189 			u_int32_t	tmp = ntohl(oan->ls_id);
1190 			oan->ls_id = htonl(tmp - 1);
1191 			if (oan->ls_id == oan->r.prefix.s_addr) {
1192 				log_warnx("prefix %s/%d can not be "
1193 				    "redistributed, no unique ls_id found.",
1194 				    inet_ntoa(rr->prefix), rr->prefixlen);
1195 				RB_REMOVE(asext_tree, &ast, an);
1196 				free(an);
1197 				return;
1198 			}
1199 		}
1200 		mask = prefixlen2mask(oan->r.prefixlen);
1201 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1202 		    rdeconf->rtr_id.s_addr);
1203 	}
1204 
1205 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1206 	    rdeconf->rtr_id.s_addr);
1207 	lsa = orig_asext_lsa(rr, an->ls_id, DEFAULT_AGE);
1208 	lsa_merge(nbrself, lsa, v);
1209 
1210 	if (oan != an) {
1211 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1212 		    rdeconf->rtr_id.s_addr);
1213 		lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE);
1214 		lsa_merge(nbrself, lsa, v);
1215 	}
1216 }
1217 
1218 void
1219 rde_asext_put(struct kroute *rr)
1220 {
1221 	struct asext_node	*an;
1222 	struct vertex		*v;
1223 	struct lsa		*lsa;
1224 
1225 	/*
1226 	 * just try to remove the LSA. If the prefix is announced as
1227 	 * stub net LSA asext_find() will fail and nothing will happen.
1228 	 */
1229 	an = asext_find(rr->prefix.s_addr, rr->prefixlen);
1230 	if (an == NULL) {
1231 		log_debug("rde_asext_put: NO SUCH LSA %s/%d",
1232 		    inet_ntoa(rr->prefix), rr->prefixlen);
1233 		return;
1234 	}
1235 
1236 	/* inherit metric and ext_tag from the current LSA,
1237 	 * some routers don't like to get withdraws that are
1238 	 * different from what they have in their table.
1239 	 */
1240 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1241 	    rdeconf->rtr_id.s_addr);
1242 	if (v != NULL) {
1243 		rr->metric = ntohl(v->lsa->data.asext.metric);
1244 		rr->ext_tag = ntohl(v->lsa->data.asext.ext_tag);
1245 	}
1246 
1247 	/* remove by reflooding with MAX_AGE */
1248 	lsa = orig_asext_lsa(rr, an->ls_id, MAX_AGE);
1249 	lsa_merge(nbrself, lsa, v);
1250 
1251 	RB_REMOVE(asext_tree, &ast, an);
1252 	free(an);
1253 }
1254 
1255 void
1256 rde_asext_free(void)
1257 {
1258 	struct asext_node	*an, *nan;
1259 
1260 	for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) {
1261 		nan = RB_NEXT(asext_tree, &ast, an);
1262 		RB_REMOVE(asext_tree, &ast, an);
1263 		free(an);
1264 	}
1265 }
1266 
1267 struct lsa *
1268 orig_asext_lsa(struct kroute *rr, u_int32_t ls_id, u_int16_t age)
1269 {
1270 	struct lsa	*lsa;
1271 	struct iface	*iface;
1272 	u_int16_t	 len;
1273 
1274 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1275 	if ((lsa = calloc(1, len)) == NULL)
1276 		fatal("orig_asext_lsa");
1277 
1278 	log_debug("orig_asext_lsa: %s/%d age %d",
1279 	    inet_ntoa(rr->prefix), rr->prefixlen, age);
1280 
1281 	/* LSA header */
1282 	lsa->hdr.age = htons(age);
1283 	lsa->hdr.opts = area_ospf_options(NULL);
1284 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1285 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1286 	/* update of seqnum is done by lsa_merge */
1287 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1288 	lsa->hdr.len = htons(len);
1289 
1290 	/* prefix and mask */
1291 	lsa->hdr.ls_id = ls_id;
1292 	lsa->data.asext.mask = prefixlen2mask(rr->prefixlen);
1293 
1294 	/*
1295 	 * nexthop -- on connected routes we are the nexthop,
1296 	 * in other cases we may announce the true nexthop if the
1297 	 * nexthop is reachable via an OSPF enabled interface but only
1298 	 * broadcast & NBMA interfaces are considered in that case.
1299 	 * It does not make sense to announce the nexthop of a point-to-point
1300 	 * link since the traffic has to go through this box anyway.
1301 	 * Some implementations actually check that there are multiple
1302 	 * neighbors on the particular segment, we skip that check.
1303 	 */
1304 	iface = rde_asext_lookup(rr->nexthop.s_addr, -1);
1305 	if (rr->flags & F_FORCED_NEXTHOP)
1306 		lsa->data.asext.fw_addr = rr->nexthop.s_addr;
1307 	else if (rr->flags & F_CONNECTED)
1308 		lsa->data.asext.fw_addr = 0;
1309 	else if (iface && (iface->type == IF_TYPE_BROADCAST ||
1310 	    iface->type == IF_TYPE_NBMA))
1311 		lsa->data.asext.fw_addr = rr->nexthop.s_addr;
1312 	else
1313 		lsa->data.asext.fw_addr = 0;
1314 
1315 	lsa->data.asext.metric = htonl(rr->metric);
1316 	lsa->data.asext.ext_tag = htonl(rr->ext_tag);
1317 
1318 	lsa->hdr.ls_chksum = 0;
1319 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1320 
1321 	return (lsa);
1322 }
1323 
1324 /*
1325  * summary LSA stuff
1326  */
1327 void
1328 rde_summary_update(struct rt_node *rte, struct area *area)
1329 {
1330 	struct rt_nexthop	*rn;
1331 	struct rt_node		*nr;
1332 	struct vertex		*v = NULL;
1333 	struct lsa		*lsa;
1334 	u_int8_t		 type = 0;
1335 
1336 	/* first check if we actually need to announce this route */
1337 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1338 		return;
1339 	/* never create summaries for as-ext LSA */
1340 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1341 		return;
1342 	/* no need for summary LSA in the originating area */
1343 	if (rte->area.s_addr == area->id.s_addr)
1344 		return;
1345 	/* no need to originate inter-area routes to the backbone */
1346 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1347 		return;
1348 	/* nexthop check, nexthop part of area -> no summary */
1349 	TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1350 		nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1351 		if (nr && nr->area.s_addr == area->id.s_addr)
1352 			continue;
1353 		break;
1354 	}
1355 	if (rn == NULL)	/* all nexthops belong to this area */
1356 		return;
1357 
1358 	if (rte->cost >= LS_INFINITY)
1359 		return;
1360 	/* TODO AS border router specific checks */
1361 	/* TODO inter-area network route stuff */
1362 	/* TODO intra-area stuff -- condense LSA ??? */
1363 
1364 	if (rte->d_type == DT_NET) {
1365 		type = LSA_TYPE_SUM_NETWORK;
1366 	} else if (rte->d_type == DT_RTR) {
1367 		if (area->stub)
1368 			/* do not redistribute type 4 LSA into stub areas */
1369 			return;
1370 		type = LSA_TYPE_SUM_ROUTER;
1371 	} else
1372 		fatalx("rde_summary_update: unknown route type");
1373 
1374 	/* update lsa but only if it was changed */
1375 	v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id());
1376 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1377 	lsa_merge(rde_nbr_self(area), lsa, v);
1378 
1379 	if (v == NULL)
1380 		v = lsa_find_area(area, type, rte->prefix.s_addr,
1381 		    rde_router_id());
1382 
1383 	/* suppressed/deleted routes are not found in the second lsa_find */
1384 	if (v)
1385 		v->cost = rte->cost;
1386 }
1387 
1388 struct lsa *
1389 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1390 {
1391 	struct lsa	*lsa;
1392 	u_int16_t	 len;
1393 
1394 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1395 	if ((lsa = calloc(1, len)) == NULL)
1396 		fatal("orig_sum_lsa");
1397 
1398 	/* LSA header */
1399 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1400 	lsa->hdr.opts = area_ospf_options(area);
1401 	lsa->hdr.type = type;
1402 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1403 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1404 	lsa->hdr.len = htons(len);
1405 
1406 	/* prefix and mask */
1407 	/*
1408 	 * TODO ls_id must be unique, for overlapping routes this may
1409 	 * not be true. In this case a hack needs to be done to
1410 	 * make the ls_id unique.
1411 	 */
1412 	lsa->hdr.ls_id = rte->prefix.s_addr;
1413 	if (type == LSA_TYPE_SUM_NETWORK)
1414 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1415 	else
1416 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1417 
1418 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1419 
1420 	lsa->hdr.ls_chksum = 0;
1421 	lsa->hdr.ls_chksum =
1422 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1423 
1424 	return (lsa);
1425 }
1426